diff --git a/spaces/1-13-am/neural-style-transfer/app.py b/spaces/1-13-am/neural-style-transfer/app.py
deleted file mode 100644
index 5b47da80c190876b3384074b2a83e9440fd00bfa..0000000000000000000000000000000000000000
--- a/spaces/1-13-am/neural-style-transfer/app.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import gradio as gr
-import torch
-from utils import transformer, tensor_to_img
-from network import Style_Transfer_Network
-
-check_point = torch.load("check_point1_0.pth", map_location = torch.device('cpu'))
-model = Style_Transfer_Network()
-model.load_state_dict(check_point['state_dict'])
-
-def style_transfer(content_img, style_strength, style_img_1 = None, iw_1 = 0, style_img_2 = None, iw_2 = 0, style_img_3 = None, iw_3 = 0, preserve_color = None):
- transform = transformer(imsize = 512)
-
- content = transform(content_img).unsqueeze(0)
-
- iw = [iw_1, iw_2, iw_3]
- interpolation_weights = [i/ sum(iw) for i in iw]
-
- style_imgs = [style_img_1, style_img_2, style_img_3]
- styles = []
- for style_img in style_imgs:
- if style_img is not None:
- styles.append(transform(style_img).unsqueeze(0))
- if preserve_color == "None": preserve_color = None
- elif preserve_color == "Whitening & Coloring": preserve_color = "whitening_and_coloring"
- elif preserve_color == "Histogram matching": preserve_color = "histogram_matching"
- with torch.no_grad():
- stylized_img = model(content, styles, style_strength, interpolation_weights, preserve_color = preserve_color)
- return tensor_to_img(stylized_img)
-
-title = "Artistic Style Transfer"
-
-content_img = gr.components.Image(label="Content image", type = "pil")
-
-style_img_1 = gr.components.Image(label="Style images", type = "pil")
-iw_1 = gr.components.Slider(0., 1., label = "Style 1 strength")
-style_img_2 = gr.components.Image(label="Style images", type = "pil")
-iw_2 = gr.components.Slider(0., 1., label = "Style 2 strength")
-style_img_3 = gr.components.Image(label="Style images", type = "pil")
-iw_3 = gr.components.Slider(0., 1., label = "Style 3 strength")
-style_strength = gr.components.Slider(0., 1., label = "Adjust style strength")
-preserve_color = gr.components.Dropdown(["None", "Whitening & Coloring", "Histogram matching"], label = "Choose color preserving mode")
-
-interface = gr.Interface(fn = style_transfer,
- inputs = [content_img,
- style_strength,
- style_img_1,
- iw_1,
- style_img_2,
- iw_2,
- style_img_3,
- iw_3,
- preserve_color],
- outputs = gr.components.Image(),
- title = title
- )
-interface.queue()
-interface.launch(share = True, debug = True)
\ No newline at end of file
diff --git a/spaces/101-5/gpt4free/CONTRIBUTING.md b/spaces/101-5/gpt4free/CONTRIBUTING.md
deleted file mode 100644
index 67aa60da1ce8322d31d71d9c8460f845f338bcde..0000000000000000000000000000000000000000
--- a/spaces/101-5/gpt4free/CONTRIBUTING.md
+++ /dev/null
@@ -1,8 +0,0 @@
- Have you ever lost your important data due to accidental deletion, formatting, virus attack, or any other reason? If yes, then you might be looking for a reliable and effective data recovery tool that can help you get back your lost files. One such tool is 7 Data Recovery Suite 4.4, which is a popular and powerful software that can recover data from various scenarios and devices. However, the official version of 7 Data Recovery Suite 4.4 is not free, and you need to pay for a license to use its full features. That's why some people may try to find a crack version of the software online, which claims to offer the same functionality without any cost. But is it safe and legal to use a cracked version of 7 Data Recovery Suite 4.4? And how can you download and install it on your computer? Download >>> https://byltly.com/2uKyCw In this article, we will answer these questions and provide you with a detailed guide on how to use 7 Data Recovery Suite 4.4 Crack. We will also warn you about the potential risks of using a cracked software and suggest a better alternative for data recovery. 7 Data Recovery Suite 4.4 is a comprehensive data recovery software that can recover deleted, formatted, or lost data from hard disks, memory cards, flash drives, and other storage devices. It supports various file types, such as photos, videos, audios, documents, emails, etc. The software consists of four modules that can handle different data loss situations: Some of the main features of 7 Data Recovery Suite 4.4 are: Some of the benefits of using 7 Data Recovery Suite 4.4 are: If you want to use the full features of 7 Data Recovery Suite 4.4 without paying for a license, you may be tempted to download and install a crack version of the software online. However, this is not recommended for several reasons that we will discuss later in this article. If you still want to try it at your own risk, here are the steps to download and install 7 Data Recovery Suite 4.4 Crack: How to get 7 Data Recovery Suite 4.4 Crack for free You can find many websites that offer a download link for 7 Data Recovery Suite 4.4 Crack online. However, you should be careful about the source and the authenticity of the file. Some websites may provide fake or malicious files that may harm your computer or steal your information. One possible website that claims to provide a working download link for 7 Data Recovery Suite 4.4 Crack is https://kolompc.com/7-data-recovery-suite/. However, we cannot guarantee its safety or reliability. After downloading the file from the website above or any other source, you need to follow these steps to install it on your computer: After installing the crack version of 7 Data Recovery Suite 4.4 on your computer, you can use it to recover your lost data by following these steps: Launch the software and select one of the four recovery modes according to your data loss situation: Select the device or partition where you lost your data and click Next to start scanning for recoverable files. The scanning process may take some time depending on the size and condition of your device or partition. After scanning is completed, you can preview the found files by clicking on them in the left pane. You can also filter them by file type or path in the right pane. Select the files that you want to recover and click Recover to save them to a location of your choice on your computer or another device. While using a crack version of 7 Data Recovery Suite 4.4 may seem tempting for some people who want to save money and enjoy its full features without paying for a license, there are also some serious risks involved in doing so: The crack file that you download online may contain virus or malware that can infect your computer and damage your system files or programs. It may also steal your personal information or encrypt your data and demand ransom for decryption. The use of cracked software is also illegal in most countries, as it violates the software copyright law. By using a cracked version of 7 Data Recovery Suite 4.4, you are infringing on the rights of the software developers and distributors who invested time and money to create and market the product. You may face legal consequences if you are caught using or distributing cracked software, such as fines, lawsuits, or even imprisonment. Moreover, you may also lose your academic or professional reputation if you use cracked software for your research or work projects. 7 Data Recovery Suite 4.4 is a powerful and comprehensive data recovery software that can help you recover your lost data from various scenarios and devices. However, using a crack version of the software is not a wise choice, as it comes with many risks and disadvantages. Using cracked software can expose your computer to virus or malware infection, breach your privacy and security, and cause legal issues for you and your organization. Moreover, using cracked software is unethical and unfair to the software developers and distributors who deserve to be compensated for their work. Therefore, we recommend that you avoid using 7 Data Recovery Suite 4.4 Crack and look for a better alternative for data recovery. One such alternative is Recoverit, which is a reliable and professional data recovery tool that can recover data from various scenarios and devices with high success rate and ease of use.
-
-### Please, follow these steps to contribute:
-1. Reverse a website from this list: [sites-to-reverse](https://github.com/xtekky/gpt4free/issues/40)
-2. Add it to [./testing](https://github.com/xtekky/gpt4free/tree/main/testing)
-3. Refractor it and add it to [./g4f](https://github.com/xtekky/gpt4free/tree/main/g4f)
-
-### We will be grateful to see you as a contributor!
diff --git a/spaces/17TheWord/vits-models/text/cleaners.py b/spaces/17TheWord/vits-models/text/cleaners.py
deleted file mode 100644
index d26581deb399609163518054718ad80ecca5d934..0000000000000000000000000000000000000000
--- a/spaces/17TheWord/vits-models/text/cleaners.py
+++ /dev/null
@@ -1,475 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-
-'''
-Cleaners are transformations that run over the input text at both training and eval time.
-
-Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
-hyperparameter. Some cleaners are English-specific. You'll typically want to use:
- 1. "english_cleaners" for English text
- 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
- the Unidecode library (https://pypi.python.org/pypi/Unidecode)
- 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
- the symbols in symbols.py to match your data).
-'''
-
-import re
-from unidecode import unidecode
-import pyopenjtalk
-from jamo import h2j, j2hcj
-from pypinyin import lazy_pinyin, BOPOMOFO
-import jieba, cn2an
-
-
-# This is a list of Korean classifiers preceded by pure Korean numerals.
-_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
-
-# Regular expression matching whitespace:
-_whitespace_re = re.compile(r'\s+')
-
-# Regular expression matching Japanese without punctuation marks:
-_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# Regular expression matching non-Japanese characters or punctuation marks:
-_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# List of (regular expression, replacement) pairs for abbreviations:
-_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
- ('mrs', 'misess'),
- ('mr', 'mister'),
- ('dr', 'doctor'),
- ('st', 'saint'),
- ('co', 'company'),
- ('jr', 'junior'),
- ('maj', 'major'),
- ('gen', 'general'),
- ('drs', 'doctors'),
- ('rev', 'reverend'),
- ('lt', 'lieutenant'),
- ('hon', 'honorable'),
- ('sgt', 'sergeant'),
- ('capt', 'captain'),
- ('esq', 'esquire'),
- ('ltd', 'limited'),
- ('col', 'colonel'),
- ('ft', 'fort'),
-]]
-
-# List of (hangul, hangul divided) pairs:
-_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('ㄳ', 'ㄱㅅ'),
- ('ㄵ', 'ㄴㅈ'),
- ('ㄶ', 'ㄴㅎ'),
- ('ㄺ', 'ㄹㄱ'),
- ('ㄻ', 'ㄹㅁ'),
- ('ㄼ', 'ㄹㅂ'),
- ('ㄽ', 'ㄹㅅ'),
- ('ㄾ', 'ㄹㅌ'),
- ('ㄿ', 'ㄹㅍ'),
- ('ㅀ', 'ㄹㅎ'),
- ('ㅄ', 'ㅂㅅ'),
- ('ㅘ', 'ㅗㅏ'),
- ('ㅙ', 'ㅗㅐ'),
- ('ㅚ', 'ㅗㅣ'),
- ('ㅝ', 'ㅜㅓ'),
- ('ㅞ', 'ㅜㅔ'),
- ('ㅟ', 'ㅜㅣ'),
- ('ㅢ', 'ㅡㅣ'),
- ('ㅑ', 'ㅣㅏ'),
- ('ㅒ', 'ㅣㅐ'),
- ('ㅕ', 'ㅣㅓ'),
- ('ㅖ', 'ㅣㅔ'),
- ('ㅛ', 'ㅣㅗ'),
- ('ㅠ', 'ㅣㅜ')
-]]
-
-# List of (Latin alphabet, hangul) pairs:
-_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('a', '에이'),
- ('b', '비'),
- ('c', '시'),
- ('d', '디'),
- ('e', '이'),
- ('f', '에프'),
- ('g', '지'),
- ('h', '에이치'),
- ('i', '아이'),
- ('j', '제이'),
- ('k', '케이'),
- ('l', '엘'),
- ('m', '엠'),
- ('n', '엔'),
- ('o', '오'),
- ('p', '피'),
- ('q', '큐'),
- ('r', '아르'),
- ('s', '에스'),
- ('t', '티'),
- ('u', '유'),
- ('v', '브이'),
- ('w', '더블유'),
- ('x', '엑스'),
- ('y', '와이'),
- ('z', '제트')
-]]
-
-# List of (Latin alphabet, bopomofo) pairs:
-_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('a', 'ㄟˉ'),
- ('b', 'ㄅㄧˋ'),
- ('c', 'ㄙㄧˉ'),
- ('d', 'ㄉㄧˋ'),
- ('e', 'ㄧˋ'),
- ('f', 'ㄝˊㄈㄨˋ'),
- ('g', 'ㄐㄧˋ'),
- ('h', 'ㄝˇㄑㄩˋ'),
- ('i', 'ㄞˋ'),
- ('j', 'ㄐㄟˋ'),
- ('k', 'ㄎㄟˋ'),
- ('l', 'ㄝˊㄛˋ'),
- ('m', 'ㄝˊㄇㄨˋ'),
- ('n', 'ㄣˉ'),
- ('o', 'ㄡˉ'),
- ('p', 'ㄆㄧˉ'),
- ('q', 'ㄎㄧㄡˉ'),
- ('r', 'ㄚˋ'),
- ('s', 'ㄝˊㄙˋ'),
- ('t', 'ㄊㄧˋ'),
- ('u', 'ㄧㄡˉ'),
- ('v', 'ㄨㄧˉ'),
- ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
- ('x', 'ㄝˉㄎㄨˋㄙˋ'),
- ('y', 'ㄨㄞˋ'),
- ('z', 'ㄗㄟˋ')
-]]
-
-
-# List of (bopomofo, romaji) pairs:
-_bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('ㄅㄛ', 'p⁼wo'),
- ('ㄆㄛ', 'pʰwo'),
- ('ㄇㄛ', 'mwo'),
- ('ㄈㄛ', 'fwo'),
- ('ㄅ', 'p⁼'),
- ('ㄆ', 'pʰ'),
- ('ㄇ', 'm'),
- ('ㄈ', 'f'),
- ('ㄉ', 't⁼'),
- ('ㄊ', 'tʰ'),
- ('ㄋ', 'n'),
- ('ㄌ', 'l'),
- ('ㄍ', 'k⁼'),
- ('ㄎ', 'kʰ'),
- ('ㄏ', 'h'),
- ('ㄐ', 'ʧ⁼'),
- ('ㄑ', 'ʧʰ'),
- ('ㄒ', 'ʃ'),
- ('ㄓ', 'ʦ`⁼'),
- ('ㄔ', 'ʦ`ʰ'),
- ('ㄕ', 's`'),
- ('ㄖ', 'ɹ`'),
- ('ㄗ', 'ʦ⁼'),
- ('ㄘ', 'ʦʰ'),
- ('ㄙ', 's'),
- ('ㄚ', 'a'),
- ('ㄛ', 'o'),
- ('ㄜ', 'ə'),
- ('ㄝ', 'e'),
- ('ㄞ', 'ai'),
- ('ㄟ', 'ei'),
- ('ㄠ', 'au'),
- ('ㄡ', 'ou'),
- ('ㄧㄢ', 'yeNN'),
- ('ㄢ', 'aNN'),
- ('ㄧㄣ', 'iNN'),
- ('ㄣ', 'əNN'),
- ('ㄤ', 'aNg'),
- ('ㄧㄥ', 'iNg'),
- ('ㄨㄥ', 'uNg'),
- ('ㄩㄥ', 'yuNg'),
- ('ㄥ', 'əNg'),
- ('ㄦ', 'əɻ'),
- ('ㄧ', 'i'),
- ('ㄨ', 'u'),
- ('ㄩ', 'ɥ'),
- ('ˉ', '→'),
- ('ˊ', '↑'),
- ('ˇ', '↓↑'),
- ('ˋ', '↓'),
- ('˙', ''),
- (',', ','),
- ('。', '.'),
- ('!', '!'),
- ('?', '?'),
- ('—', '-')
-]]
-
-
-def expand_abbreviations(text):
- for regex, replacement in _abbreviations:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def lowercase(text):
- return text.lower()
-
-
-def collapse_whitespace(text):
- return re.sub(_whitespace_re, ' ', text)
-
-
-def convert_to_ascii(text):
- return unidecode(text)
-
-
-def japanese_to_romaji_with_accent(text):
- '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
- sentences = re.split(_japanese_marks, text)
- marks = re.findall(_japanese_marks, text)
- text = ''
- for i, sentence in enumerate(sentences):
- if re.match(_japanese_characters, sentence):
- if text!='':
- text+=' '
- labels = pyopenjtalk.extract_fullcontext(sentence)
- for n, label in enumerate(labels):
- phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
- if phoneme not in ['sil','pau']:
- text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q')
- else:
- continue
- n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
- a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
- a2 = int(re.search(r"\+(\d+)\+", label).group(1))
- a3 = int(re.search(r"\+(\d+)/", label).group(1))
- if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']:
- a2_next=-1
- else:
- a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
- # Accent phrase boundary
- if a3 == 1 and a2_next == 1:
- text += ' '
- # Falling
- elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras:
- text += '↓'
- # Rising
- elif a2 == 1 and a2_next == 2:
- text += '↑'
- if i
7 Data Recovery Suite 4.4 Crack Download HERE !
-What is 7 Data Recovery Suite 4.4?
-
-
- Features of 7 Data Recovery Suite 4.4
-
-
- Benefits of 7 Data Recovery Suite 4.4
-
-
- How to download and install 7 Data Recovery Suite 4.4 Crack?
-
-7 Data Recovery Suite 4.4 Crack full version download link
-Best data recovery software with 7 Data Recovery Suite 4.4 Crack
-7 Data Recovery Suite 4.4 Crack license key generator
-Download 7 Data Recovery Suite 4.4 Crack with serial key
-7 Data Recovery Suite 4.4 Crack activation code online
-Recover deleted files with 7 Data Recovery Suite 4.4 Crack
-7 Data Recovery Suite 4.4 Crack review and features
-7 Data Recovery Suite 4.4 Crack tutorial and guide
-7 Data Recovery Suite 4.4 Crack system requirements and compatibility
-Is 7 Data Recovery Suite 4.4 Crack safe and legit
-7 Data Recovery Suite 4.4 Crack alternatives and competitors
-Pros and cons of using 7 Data Recovery Suite 4.4 Crack
-How to update 7 Data Recovery Suite 4.4 Crack to the latest version
-How to uninstall 7 Data Recovery Suite 4.4 Crack completely
-How to fix errors and issues with 7 Data Recovery Suite 4.4 Crack
-How to backup and restore data with 7 Data Recovery Suite 4.4 Crack
-How to recover data from formatted or corrupted drives with 7 Data Recovery Suite 4.4 Crack
-How to recover data from SD card, USB flash drive, or external hard drive with 7 Data Recovery Suite 4.4 Crack
-How to recover data from Android or iOS devices with 7 Data Recovery Suite 4.4 Crack
-How to recover data from Windows or Mac computers with 7 Data Recovery Suite 4.4 Crack
-How to recover data from different file systems with 7 Data Recovery Suite 4.4 Crack
-How to recover data from various scenarios with 7 Data Recovery Suite 4.4 Crack
-How to recover photos, videos, audio, documents, emails, or other files with 7 Data Recovery Suite 4.4 Crack
-How to recover lost or forgotten passwords with 7 Data Recovery Suite 4.4 Crack
-How to recover data from encrypted or protected files with 7 Data Recovery Suite 4.4 Crack
-How to recover data from RAID arrays or partitions with 7 Data Recovery Suite 4.4 Crack
-How to recover data from cloud storage or online services with 7 Data Recovery Suite 4.4 Crack
-How to recover data from virtual machines or disks with 7 Data Recovery Suite 4.4 Crack
-How to recover data from optical discs or floppy disks with 7 Data Recovery Suite 4.4 Crack
-How to use advanced tools and settings in 7 Data Recovery Suite 4.4 Crack
-How to customize and optimize the performance of 7 Data Recovery Suite 4.4 Crack
-How to contact the support team of 7 Data Recovery Suite 4.4 Crack
-How to get a refund or exchange for the purchase of the product key of the software.Download link
-Installation steps
-
-
- How to use 7 Data Recovery Suite 4.4 Crack?
-Select a recovery mode
-
-
- Scan the device or partition
-Preview and recover the data
-Risks of using 7 Data Recovery Suite 4.4 Crack
-Virus or malware infection
-Privacy breach
-Legal issues
-Conclusion
-FAQs
-
-
-
-
-
If you are looking for a movie that combines humor, adventure, emotion, and stunning animation, you might want to check out Kung Fu Panda 2. This film is the sequel to Kung Fu Panda (2008), which introduced us to Po, a clumsy but lovable panda who became the Dragon Warrior and saved China from the evil Tai Lung.
-Download ✒ ✒ ✒ https://byltly.com/2uKxxH
In this film, Po faces a new challenge: Lord Shen, a peacock who has invented a weapon that can destroy kung fu and conquer China. Along with his friends, the Furious Five, Po must stop Shen before it is too late. But along the way, Po also discovers some secrets about his past and his true identity.
-Kung Fu Panda 2 was released in 2011 by DreamWorks Animation and Paramount Pictures. It was directed by Jennifer Yuh Nelson, who became the first woman to solely direct an animated feature film from a major Hollywood studio. It was written by Jonathan Aibel and Glenn Berger, who also wrote the first film.
-The film received critical acclaim for its story, characters, animation, music, and themes. It was nominated for an Academy Award for Best Animated Feature, losing to Rango. It also became the highest-grossing film directed by a woman until Frozen (2013), as well as the highest-grossing film solely directed by a woman until Wonder Woman (2017). It is also the sixth highest-grossing film of 2011, and the highest-grossing animated feature film of the year.
-The film begins with a flashback that tells us how Lord Shen, the son of the peacock rulers of Gongmen City, became obsessed with using fireworks as a weapon. He learned of a prophecy that said he would be defeated by "a warrior of black and white". He then ordered his wolf army to kill all the pandas in China, hoping to prevent the prophecy from coming true.
-Shen's parents were horrified by his actions and banished him from their city. Shen swore revenge and vowed to return with his weapon one day.
-In the present day, Po is enjoying his life as the Dragon Warrior and the leader of the Furious Five: Tigress, Monkey, Viper, Crane, and Mantis. He is also learning more about kung fu from his mentor, Master Shifu.
-One day, Po and his friends are sent to stop a group of wolf bandits who are stealing metal for Shen's weapon. Po has a flashback of his mother when he sees a symbol on one of the wolves' armor.
-Po becomes curious about his past and asks his adoptive father, Mr. Ping, about where he came from.
-Watch Kung Fu Panda 2 in HD quality online for free
-How to download Kung Fu Panda 2 full movie in HD
-Kung Fu Panda 2 streaming online with subtitles
-Best HD online player for Kung Fu Panda 2 movie
-Kung Fu Panda 2 full movie download link
-Kung Fu Panda 2 HD online player without ads
-Where to watch Kung Fu Panda 2 full movie online
-Kung Fu Panda 2 full movie HD download torrent
-Kung Fu Panda 2 online streaming HD quality
-Kung Fu Panda 2 full movie download in Hindi
-Kung Fu Panda 2 HD online player for Android
-Kung Fu Panda 2 full movie download in Tamil
-Kung Fu Panda 2 online watch HD free
-Kung Fu Panda 2 full movie download in Telugu
-Kung Fu Panda 2 HD online player for PC
-Kung Fu Panda 2 full movie download in Malayalam
-Kung Fu Panda 2 online HD with English subtitles
-Kung Fu Panda 2 full movie download in Kannada
-Kung Fu Panda 2 HD online player for iOS
-Kung Fu Panda 2 full movie download in Bengali
-Kung Fu Panda 2 online HD with Hindi dubbing
-Kung Fu Panda 2 full movie download in Marathi
-Kung Fu Panda 2 HD online player for Mac
-Kung Fu Panda 2 full movie download in Urdu
-Kung Fu Panda 2 online HD with Tamil dubbing
-Kung Fu Panda 2 full movie download in Gujarati
-Kung Fu Panda 2 HD online player for Windows
-Kung Fu Panda 2 full movie download in Punjabi
-Kung Fu Panda 2 online HD with Telugu dubbing
-Kung Fu Panda 2 full movie download in Nepali
-Kung Fu Panda 2 HD online player for Linux
-Kung Fu Panda 2 full movie download in Sinhala
-Kung Fu Panda 2 online HD with Malayalam dubbing
-Kung Fu Panda 2 full movie download in Indonesian
-Kung Fu Panda 2 HD online player for Chromebook
-Kung Fu Panda 2 full movie download in Filipino
-Kung Fu Panda 2 online HD with Kannada dubbing
-Kung Fu Panda 2 full movie download in Vietnamese
-Kung Fu Panda 2 HD online player for Roku
-Kung Fu Panda 2 full movie download in Thai
-Kung Fu Panda 2 online HD with Bengali dubbing
-Kung Fu Panda 2 full movie download in Arabic
-Kung Fu Panda 2 HD online player for Firestick
-Kung Fu Panda 2 full movie download in Persian
-Kung Fu Panda 2 online HD with Urdu dubbing
-Kung Fu Panda 2 full movie download in Turkish
-Kung Fu Panda 2 HD online player for Smart TV
-Kung Fu Panda 2 full movie download in Korean
-Kung Fu Panda 2 online HD with Gujarati dubbing
Mr. Ping tells him that he found him in a radish crate when he was a baby and decided to raise him as his son.
-Po is not satisfied with this answer and decides to find out more about his origins.
-He learns from Master Shifu that Shen has returned to Gongmen City with his weapon, which is a cannon that can fire metal balls with explosive force.
-Po and his friends travel to Gongmen City to stop Shen.
-There they meet two other kung fu masters who have been hiding from Shen: Master Ox and Master Croc.
-They also encounter Shen's old nanny, a goat named Soothsayer, who can see the future.
-Po tries to confront Shen several times but fails due to his flashbacks.
-He eventually learns that Shen was responsible for killing his parents and destroying his village.
-Po is devastated by this revelation but also determined to stop Shen once and for all.
-He realizes that he must achieve inner peace in order to overcome his past trauma.
-With the help of Soothsayer, Po meditates on his memories and accepts them as part of who he is.
-He then leads his friends into a final battle against Shen and his army.
-Po uses his kung fu skills to deflect Shen's cannonballs back at him.
-He also tries to persuade Shen to let go of his hatred and find inner peace.
-Shen refuses to listen and attacks Po with his blades.
-Po dodges them but one of them cuts through Shen's cannon ropes, causing it to fall on him.
-Shen is crushed by his own weapon while Po watches in sadness.
-Po then returns to Mr. Ping's noodle shop with his friends.
-He tells Mr. Ping that he knows he is not his biological father but he still loves him as his dad.
-Mr. Ping hugs him and tells him that he loves him too.
-The film ends with a scene showing that Po's biological father is still alive somewhere in China with other pandas.
-Name | -Voice Actor | -Description | -|||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Po | -Jack Black | -The Dragon Warrior and the leader of the Furious Five. He is a panda who loves kung fu, food, and fun. He is brave, loyal, optimistic, friendly, clumsy, naive, but also smart when it matters most. | - | ||||||||||||||||||
Tigress | -Angelina Jolie | -Po's closest friend and a fierce fighter. She is a tiger who is strong, serious, disciplined, stoic, | -and sometimes cold but also caring deep down. | ||||||||||||||||||
Monkey | -Jackie Chan | -
-emotion, and adults alike, as it offers a compelling story, engaging characters, beautiful animation, and memorable music. It is a film that celebrates the art of kung fu, the culture of China, and the themes of family, destiny, and inner peace. It is a film that showcases the talents of its director, writers, voice actors, animators, and composers. It is a film that deserves to be watched and enjoyed by everyone. -FAQs-Here are some frequently asked questions about Kung Fu Panda 2: -
No, Kung Fu Panda 2 is not based on a true story. It is a fictional story that takes place in a fantasy world of anthropomorphic animals who practice kung fu. However, the film does draw inspiration from real aspects of Chinese culture, history, and mythology. -Lord Shen's weapon is a cannon that can fire metal balls with explosive force. It is based on the real invention of gunpowder and firearms in China during the Song dynasty (960-1279 CE). -Po's name means "precious" or "treasure" in Chinese. It is also a homophone for the word "potato" in Mandarin, which is a reference to Po's chubby appearance and his love for food. -Po's biological parents are Li Shan and Mei Mei. They are both giant pandas who live in a hidden panda village in the mountains. They appear in Kung Fu Panda 3, where Po reunites with them and learns more about his heritage. -There are currently three films in the Kung Fu Panda franchise: Kung Fu Panda (2008), Kung Fu Panda 2 (2011), and Kung Fu Panda 3 (2016). A fourth film is currently in development at DreamWorks Animation. -- - \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Ad-aware 6.0 Professional REPACK Keygen Serial Key.md b/spaces/1gistliPinn/ChatGPT4/Examples/Ad-aware 6.0 Professional REPACK Keygen Serial Key.md deleted file mode 100644 index a74f4edae195dcef698f4b7b41d687c961f3a423..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Ad-aware 6.0 Professional REPACK Keygen Serial Key.md +++ /dev/null @@ -1,6 +0,0 @@ - Ad-aware 6.0 Professional Keygen Serial KeyDOWNLOAD ✶ https://imgfil.com/2uy1Ko - -If the product continues to prompt you for an activation code you have been unsuccessful. ... Try for Free Buy Now; Nessus Professional is for security pros on the front lines ... So now you are aware of the excellent features provided by Sage 50 ... I called HP and you can't get a real person. slmgr /ad-activation-get-IID (start ... 1fdad05405 - - - diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Drift Racing 2 APK Customize Your Car and Compete Against the Best Drifters in the World.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Drift Racing 2 APK Customize Your Car and Compete Against the Best Drifters in the World.md deleted file mode 100644 index 5bcef2ad805aa9a93e044fd184fb6f8387ebbbe6..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Drift Racing 2 APK Customize Your Car and Compete Against the Best Drifters in the World.md +++ /dev/null @@ -1,123 +0,0 @@ - - CarX Drift Racing 2: The Ultimate Drifting Game for Android-If you are a fan of drifting games, you must have heard of CarX Drift Racing 2. This is one of the most popular and realistic drifting games for Android devices. In this article, we will tell you everything you need to know about this game, including its features, how to download and install it, and some frequently asked questions. -Introduction-What is CarX Drift Racing 2?-CarX Drift Racing 2 is a sequel of the original CarX Drift Racing game, which has over 100 million fans around the world. It is a racing game that focuses on drifting, which is a driving technique where the driver intentionally oversteers the car to make it slide sideways. Drifting is not only fun, but also challenging and rewarding, as it requires skill, precision, and timing. -carx drift racing 2 latest version apkDownload File ○ https://urlin.us/2uT12l - Why should you play CarX Drift Racing 2?-There are many reasons why you should play CarX Drift Racing 2, such as: -
Features of CarX Drift Racing 2-Online Rooms-This is a new feature that allows you to drift in real time with your friends or other players. You can create or join an online room, pick a location, drift, and earn points. You can also watch other players drift using the drone camera. You can earn valuable rewards for achieving different ranks in the online mode. -Visual Auto Tuning-This feature allows you to customize your car's appearance and performance. You can replace mirrors, lights, bumpers, rims, and many other parts. You can also create a unique image of your car with body kits, vinyls, stickers, and paint. You can express your creativity and style with this feature. -Improved Performance Tuning-This feature allows you to adjust your car's performance according to your preferences and needs. You can tune the suspension, springs, tyre pressure, wheel angle, engine, turbo pressure, gearbox, brakes, and differential. You can fine tune your car to achieve the best drifting results. -Realistic Racing Physics-This feature makes CarX Drift Racing 2 one of the most realistic drifting games on Android. The game uses a physics engine that simulates the behavior of different cars, surfaces, and weather conditions. You can see the smoke, dust, sparks, and tyre tracks that result from your drifting. You can also feel the difference between asphalt, grass, sand, and snow. You can also experience different weather conditions such as rain, fog, and sun. You can enjoy the realistic racing physics of this game. -XDS Mode-This feature allows you to practice tandem drifting with yourself or other players. Tandem drifting is a technique where two or more cars drift together in a synchronized manner. It is one of the most spectacular and difficult forms of drifting. In XDS mode, you can choose a leader car and a follower car, and try to match the leader's trajectory and angle. You can also switch roles and become the leader or the follower. You can improve your drifting skills and coordination with this feature. -TOP-32 Mode-This feature allows you to compete against the best drifters in the world. TOP-32 mode is a tournament mode where you have to qualify for the final round by beating 31 other opponents. You have to drift on different tracks and earn points based on your speed, angle, and line. You have to be fast, precise, and consistent to win this mode. You can earn fame and glory by becoming the champion of TOP-32 mode. -carx drift racing 2 apk download latest version How to download and install CarX Drift Racing 2 APK OBB?-If you want to play CarX Drift Racing 2 on your Android device, you have to download and install the APK OBB files. APK is the application package file that contains the game's code and resources. OBB is the data file that contains the game's graphics and sound files. Here are the steps to download and install CarX Drift Racing 2 APK OBB: -Step 1: Download the APK and OBB files from a trusted source-You can find many websites that offer CarX Drift Racing 2 APK OBB files for free download. However, not all of them are safe and reliable. Some of them may contain viruses, malware, or fake files that can harm your device or steal your data. Therefore, you have to be careful and choose a trusted source to download the files. One of the best sources is [CarX Drift Racing 2 APK OBB], which provides the latest version of the game with high-quality graphics and sound. -Step 2: Enable unknown sources on your device-Before you can install the APK file, you have to enable unknown sources on your device. This is a security setting that prevents you from installing apps from sources other than the Google Play Store. To enable unknown sources, go to Settings > Security > Unknown Sources and toggle it on. -Step 3: Install the APK file-After you have enabled unknown sources, you can install the APK file. To do this, locate the downloaded APK file on your device using a file manager app. Tap on it and follow the instructions on the screen to complete the installation. -Step 4: Extract and copy the OBB folder to Android/OBB-After you have installed the APK file, you have to extract and copy the OBB folder to Android/OBB on your device's internal storage. To do this, locate the downloaded OBB file on your device using a file manager app. Tap on it and select Extract Here or Extract To depending on your app. You will see a folder named com.carxtech.carxdr2. Copy this folder and paste it in Android/OBB on your device's internal storage. -Step 5: Launch the game and enjoy-After you have copied the OBB folder, you are ready to launch the game and enjoy it. To do this, go to your app drawer and tap on CarX Drift Racing 2 icon. The game will start and load the data from the OBB folder. You can now drift away with CarX Drift Racing 2. -Conclusion-CarX Drift Racing 2 is one of the best drifting games for Android devices. It has amazing graphics, realistic physics, online mode, visual auto tuning, XDS mode, TOP-32 mode, and many other features that make it fun and exciting. If you want to play this game, you have to download and install CarX Drift Racing 2 APK OBB files from a trusted source. Follow our guide above to do it easily and safely. -FAQs-
Yes, CarX Drift Racing 2 is free to download and play. However, it contains in app purchases that allow you to buy coins, cars, and other items. You can also watch ads to earn free coins. -The minimum requirements to play CarX Drift Racing 2 are: -
You can get more coins in CarX Drift Racing 2 by: -
You can unlock more cars and tracks in CarX Drift Racing 2 by: -
You can contact the developers of CarX Drift Racing 2 by: -
- - \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dolphin Emulator v5.0 APK for 32 Bit Android - The Best Way to Enjoy Retro Games.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dolphin Emulator v5.0 APK for 32 Bit Android - The Best Way to Enjoy Retro Games.md deleted file mode 100644 index 75e9beaf485be840d0cb80f471ba0741a37dd544..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dolphin Emulator v5.0 APK for 32 Bit Android - The Best Way to Enjoy Retro Games.md +++ /dev/null @@ -1,118 +0,0 @@ - - Dolphin Emulator: The Ultimate Guide for Android Users-Do you love playing Nintendo GameCube and Wii games? Do you wish you could play them on your Android device? If yes, then you are in luck. Dolphin Emulator is a free and open-source software that allows you to do just that. In this article, we will show you how to download, install, configure, and use Dolphin Emulator on your Android device. We will also answer some of the frequently asked questions about this amazing app. Let's get started! -dolphin emulator v5 0 32 bit apkDownload File ✒ ✒ ✒ https://urlin.us/2uSTlF - What is Dolphin Emulator and why you should use it-Dolphin Emulator is a software that emulates the hardware and software of Nintendo GameCube and Wii consoles. It enables you to play games from these consoles on your Android device, as well as other platforms such as Windows, Linux, and macOS. Dolphin Emulator offers many features and benefits, such as: -
Dolphin Emulator supports both 32-bit and 64-bit Android devices, but the 32-bit version has some limitations and compatibility issues. For example, the 32-bit version cannot run games that require more than 2 GB of RAM, such as The Legend of Zelda: Skyward Sword or Xenoblade Chronicles. The 32-bit version also has lower performance and stability than the 64-bit version. Therefore, if you have a 64-bit device, we recommend you to use the 64-bit version of Dolphin Emulator for a better gaming experience. -How to download and install Dolphin Emulator on your Android device-Downloading and installing Dolphin Emulator on your Android device is very easy and straightforward. You can follow these steps: -
Congratulations! You have successfully installed Dolphin Emulator on your Android device. Now you are ready to configure it and play your favorite games. -dolphin emulator 5.0 download for android 32 bit How to configure Dolphin Emulator settings for optimal performance-Dolphin Emulator has a lot of settings that you can adjust to optimize its performance and compatibility with different games and devices. You can access the settings menu by tapping on the three dots icon in the top right corner of the emulator screen. You will see various tabs, such as graphics, audio, controls, enhancements, hacks, and more. You can tap on each tab to see and change the settings related to it. You can also create custom profiles for different games and devices by tapping on the plus icon in the top right corner of the settings menu. -Some of the settings that you should pay attention to are: -
These are some of the most important settings that you should consider when configuring Dolphin Emulator for 32-bit devices. However, you may need to experiment with different settings to find the best balance between performance and quality for your device and game. You can also check the Dolphin Wiki for more information and tips on specific games. -How to load and play your favorite games on Dolphin Emulator-Now that you have installed and configured Dolphin Emulator on your Android device, you are ready to load and play your favorite games. Here are the steps to do so: -
That's it! You can now enjoy playing GameCube and Wii games on your Android device with Dolphin Emulator. -How to troubleshoot common issues with Dolphin Emulator-Dolphin Emulator is a complex software that may not work perfectly with every device and game. Some of the common issues that you may encounter with Dolphin Emulator are: -
Some of the possible solutions for these issues are: -
These are some of the tips that you can use to improve your gaming experience on Dolphin Emulator. However, you may find other ways to enhance your gaming experience by experimenting with different settings and features. - -This is the end of the article. I hope you enjoyed reading it and learned something new. Thank you for your attention and have a nice day! 197e85843d- - \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/AetherSX2 for Windows and Android - The Best Way to Relive Your Favorite PS2 Games in 2023 - Download Here.md b/spaces/1phancelerku/anime-remove-background/AetherSX2 for Windows and Android - The Best Way to Relive Your Favorite PS2 Games in 2023 - Download Here.md deleted file mode 100644 index ad3bc66e3b3954f91f5c477e6011330e2de0d47d..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/AetherSX2 for Windows and Android - The Best Way to Relive Your Favorite PS2 Games in 2023 - Download Here.md +++ /dev/null @@ -1,159 +0,0 @@ - - AetherSX2 2023 Download: How to Play PS2 Games on Your Android Device-Do you miss playing your favorite PlayStation 2 games? Do you wish you could relive the nostalgia of playing classic PS2 titles on your Android device? If yes, then you are in luck. There is a new PS2 emulator for Android that lets you play PS2 games on your smartphone with ease. It is called AetherSX2, and it is the best PS2 emulator for Android by far. -In this article, we will tell you everything you need to know about AetherSX2, including what it is, how it works, what features and benefits it offers, how to download and install it on your Android device, and how to play PS2 games on your Android device using AetherSX2. By the end of this article, you will be able to enjoy playing PS2 games on your smartphone with AetherSX2. -aethersx2 2023 downloadDownload Zip ↔ https://jinyurl.com/2uNLLs - What is AetherSX2?-AetherSX2 is a PS2 emulator for Android that allows you to play PS2 games on your smartphone. An emulator is a software that mimics the hardware and software of another device, in this case, a PS2 console. By using an emulator, you can run games and applications that are designed for another platform, such as a PS2 game on an Android device. -A brief history of AetherSX2-AetherSX2 is the brainchild of one person, a developer who goes by the handle Tahlreth. The developer actually used the PCSX2 emulator as the basis for their Android-based emulator. PCSX2 is a long-running, well-established emulator on PC, so it makes sense to take advantage of the work that has gone into this program. -aethersx2 ps2 emulator for android download 2023 (search volume: 10-100, competition: low) The developer of AetherSX2 got the green light to use the PCSX2 code from the developers themselves and is licensed under the LGPL license — unlike the DamonPS2 developers, who stole the code and didn’t follow the requisite license. In any event, the emulator was initially released in December 2021 via the Google Play Store as an open beta. You can also sideload the APK via the AetherSX2 website. We’d recommend you steer clear of any other websites claiming to offer the APK. -The AetherSX2 emulator is a major step forward for emulation on Android devices. It’s also worth noting that the app is free to download and use, so don’t be duped by anyone saying you need to pay for it. This is in contrast to the DamonPS2 emulator, which is filled to the brim with ads and charges for a Pro version limited to two devices. -Features and benefits of AetherSX2-AetherSX2 is not just another PS2 emulator for Android. It is a powerful and feature-rich emulator that offers many advantages over other emulators. Here are some of the features and benefits of AetherSX2 that make it stand out from the crowd. -High compatibility-AetherSX2 boasts high compatibility with a wide range of PS2 games from various genres and regions. You can play popular games like God of War, Final Fantasy, Grand Theft Auto, Metal Gear Solid, Kingdom Hearts, and many more on your Android device with AetherSX2. You can also play games from different regions, such as Japan, Europe, and North America, with the appropriate BIOS files. AetherSX2 supports both ISO and CSO formats for PS2 games. -Enhanced graphics-AetherSX2 does not just emulate the PS2 graphics faithfully, but also enhances them to make them look better on your Android device. You can adjust the resolution, aspect ratio, anti-aliasing, texture filtering, and other graphical settings to improve the visual quality of the games. You can also use shaders to add effects like scanlines, CRT, bloom, and more to the games. AetherSX2 supports both Vulkan and OpenGL renderers for graphics. -Save and load states-AetherSX2 allows you to save and load your game progress at any point with the save and load state feature. This is very convenient for playing on your Android device, as you can resume your game from where you left off without having to go through the in-game save system. You can also use this feature to skip difficult or boring parts of the game by loading a state from another source. AetherSX2 supports up to 10 save slots for each game. -Controller support-AetherSX2 lets you play PS2 games on your Android device with a variety of controllers. You can use the touchscreen controls that are customizable and responsive, or you can use an external controller that connects via Bluetooth or USB. AetherSX2 supports many popular controllers, such as Xbox One, PS4, PS3, Switch Pro, and more. You can also map the buttons and analog sticks to your liking. -Fast and smooth performance-AetherSX2 delivers fast and smooth performance for PS2 games on your Android device. You can play most games at full speed without any lag or stuttering. You can also tweak the performance settings to optimize the emulator for your device. You can adjust the frame rate, frame skip, speed hack, audio latency, and other options to improve the performance of the emulator. AetherSX2 runs well on most modern Android devices with decent hardware. -How to download and install AetherSX2 on your Android device-Now that you know what AetherSX2 is and what it can do, you might be wondering how to download and install it on your Android device. Well, it is very easy and simple to do so. Just follow these steps and you will be ready to play PS2 games on your smartphone in no time. -Step 1: Check system requirements-Before you download and install AetherSX2 on your Android device, you need to make sure that your device meets the minimum system requirements for running the emulator. Here are the system requirements for AetherSX2: -
If your device meets these requirements, then you can proceed to the next step. If not, then you might want to upgrade your device or look for another emulator. -Step 2: Download AetherSX2 APK from the official website or Google Play Store-The next step is to download the AetherSX2 APK file from a trusted source. There are two ways to do this: either from the official website or from the Google Play Store. -The official website of AetherSX2 is https://aethersx.com/. Here you can find the latest version of the emulator as well as other information and updates about it. You can download the APK file directly from the website by clicking on the "Download" button on the homepage. -The Google Play Store is another option for downloading the AetherSX2 APK file. The Google Play Store is a safe and convenient way to download apps for your Android device. You can find the AetherSX2 app on the Google Play Store by searching for it or by following this link: https://play.google.com/store/apps/details?id=com.aethersx.aethersx&hl=en_US&gl=US. You can download the app by tapping on the "Install" button on the app page. -Either way, you will get the same APK file that is about 30 MB in size. Make sure you have enough space on your device before downloading it. -Step 3: Install AetherSX2 APK on your Android device-Once you have downloaded the AetherSX2 APK file, you need to install it on your Android device. To do this, you need to enable the installation of apps from unknown sources on your device. This is a security feature that prevents malicious apps from being installed on your device without your permission. Here is how to enable it: -
Now you can install the AetherSX2 APK file by following these steps: -
Congratulations, you have successfully installed AetherSX2 on your Android device. You can now launch the app from your app drawer or home screen. -Step 4: Load PS2 games on your Android device-The next step is to load PS2 games on your Android device. You can do this by either transferring PS2 games from your PC or downloading PS2 games from the internet. Here is how to do both: -Transfer PS2 games from your PC-If you have PS2 games on your PC, you can transfer them to your Android device using a USB cable or a wireless method. Here is how to do it using a USB cable: -
If you want to use a wireless method, you can use an app like AirDroid or ShareIt to transfer files between your PC and Android device over Wi-Fi. Just follow the instructions of the app you choose to use. -Download PS2 games from the internet-If you don't have PS2 games on your PC, you can download them from the internet. However, you need to be careful about where you download them from, as some websites may contain viruses, malware, or fake files. You also need to make sure that you own the original PS2 games that you download, as downloading pirated games is illegal and unethical. -We recommend that you use reputable and trusted websites that offer PS2 games for download, such as Emuparadise, CoolROM, RomHustler, and The ISO Zone. These websites have a large collection of PS2 games from various regions and genres that you can download for free. Here is how to download PS2 games from these websites: -
Note: Some websites may require you to extract the downloaded file using an app like ZArchiver or RAR before you can play it. If this is the case, just follow these steps: -
Now you have PS2 games on your Android device that you can play with AetherSX2. -Step 5: Configure settings and controls according to your preference-The last step before you can play PS2 games on your Android device with AetherSX2 is to configure the settings and controls according to your preference. AetherSX2 has many options that you can customize to enhance your gaming experience. Here are some of the settings and controls that you can configure: -Settings-To access the settings menu, tap on the three-dot icon on the top right corner of the app and select Settings. Here you can find various options that affect the performance, graphics, audio, and input of the emulator. Some of the options that you can adjust are: -
You can experiment with different settings and see what works best for you and your device. You can also reset the settings to default by tapping on the Reset button at the bottom of the menu. -Controls-To access the controls menu, tap on the three-dot icon on the top right corner of the app and select Controls. Here you can find various options that affect the input and layout of the emulator. Some of the options that you can adjust are: -
You can experiment with different controls and see what works best for you and your device. You can also reset the controls to default by tapping on the Reset button at the bottom of the menu. -How to play PS2 games on your Android device using AetherSX2-Now that you have downloaded and installed AetherSX2 on your Android device, loaded PS2 games on your device, and configured the settings and controls according to your preference, you are ready to play PS2 games on your smartphone with AetherSX2. Here is how to do it: -Choose a game from the game list or browse for a game file-When you launch AetherSX2, you will see a game list that shows all the PS2 games that you have on your device. You can scroll through the game list and tap on any game that you want to play. The game will start loading automatically. -If you don't see the game that you want to play on the game list, you can browse for it manually by tapping on the folder icon on the top left corner of the app. This will open a file browser that lets you navigate through your device's storage. You can find and select any PS2 game file that you have on your device in ISO or CSO format. The game will start loading automatically. -Select a graphics renderer (Vulkan or OpenGL)-Before the game starts, you will be asked to select a graphics renderer for the emulator. You can choose between Vulkan or OpenGL as the graphics renderer. Vulkan is recommended for better performance and compatibility, while OpenGL is recommended for older devices or games that have issues with Vulkan. You can change this option later in the settings menu if you want. -Enjoy playing PS2 games on your Android device with AetherSX2-After selecting a graphics renderer, the game will start running on your Android device with AetherSX2. You can use the touchscreen controls or the external controller to play the game as you would on a PS2 console. You can also access the emulator menu by tapping on the three-dot icon on the top right corner of the app. Here you can save and load states, change settings and controls, pause and resume the game, and exit the game. -That's it. You can now enjoy playing PS2 games on your Android device with AetherSX2. You can play as many games as you want, as long as you have enough space on your device. You can also switch between different games by going back to the game list or the file browser. -Conclusion-AetherSX2 is a PS2 emulator for Android that lets you play PS2 games on your smartphone with ease. It is a powerful and feature-rich emulator that offers high compatibility, enhanced graphics, save and load states, controller support, fast and smooth performance, and more. It is also free to download and use, unlike some other emulators that charge money or show ads. -In this article, we have shown you how to download and install AetherSX2 on your Android device, how to load PS2 games on your device, how to configure settings and controls according to your preference, and how to play PS2 games on your device using AetherSX2. By following these steps, you will be able to enjoy playing PS2 games on your smartphone with AetherSX2. -We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy gaming! -FAQs-Here are some frequently asked questions about AetherSX2 and PS2 emulation on Android. -Q: Is AetherSX2 legal?-A: AetherSX2 is legal as long as you use it for personal and non-commercial purposes. You also need to own the original PS2 games that you play with AetherSX2, as downloading pirated games is illegal and unethical. -Q: Is AetherSX2 safe?-A: AetherSX2 is safe as long as you download it from the official website or the Google Play Store. You also need to be careful about where you download PS2 games from, as some websites may contain viruses, malware, or fake files. -Q: How can I update AetherSX2?-A: You can update AetherSX2 by downloading the latest version of the APK file from the official website or the Google Play Store. You can also enable automatic updates for AetherSX2 on the Google Play Store by tapping on the three-dot icon on the app page and selecting Enable auto-update. -Q: How can I report bugs or issues with AetherSX2?-A: You can report bugs or issues with AetherSX2 by contacting the developer via email at aethersx@gmail.com. You can also join the official Discord server of AetherSX2 at https://discord.gg/6J9f8wM. Here you can chat with other users and get support from the developer and moderators. -Q: How can I support the development of AetherSX2?-A: You can support the development of AetherSX2 by donating to the developer via PayPal at https://www.paypal.me/aethersx. You can also share your feedback and suggestions with the developer via email or Discord. You can also rate and review AetherSX2 on the Google Play Store and spread the word about it to your friends and family. 197e85843d- - \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Compress PDFs Online for Free Download Smaller PDF Files in Seconds.md b/spaces/1phancelerku/anime-remove-background/Compress PDFs Online for Free Download Smaller PDF Files in Seconds.md deleted file mode 100644 index 768eca16718854da5aa78e9bcd90a94223558767..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Compress PDFs Online for Free Download Smaller PDF Files in Seconds.md +++ /dev/null @@ -1,208 +0,0 @@ - - How to Download a 1 MB PDF File in Minutes-PDF files are one of the most popular and versatile document formats in the digital world. They can contain text, images, graphics, links, forms, annotations, and more. They can also preserve the layout and appearance of your document across different devices and platforms. -download 1 mb pdf fileDownload File === https://jinyurl.com/2uNQZJ - But sometimes, you might need to download a small PDF file that is only 1 MB or less in size. Maybe you have a limited bandwidth or storage space on your device. Maybe you want to save time and data when downloading a document. Maybe you need to send or receive a document via email or messaging app that has a file size limit. -Whatever your reason is, downloading a 1 MB PDF file is not as hard as you might think. In this article, we will show you how to download a 1 MB PDF file from the internet, how to compress a larger PDF file to 1 MB or less, and how to open and view a 1 MB PDF file on your device. -What is a PDF File?-PDF stands for Portable Document Format. It is a file format that was created by Adobe in 1993 to enable users to share and print documents without losing the original formatting. PDF files can be opened by various software and apps, such as Adobe Acrobat Reader, Google Chrome, Microsoft Edge, and more. -Some of the benefits of PDF files over other formats are: -
Why Do You Need to Download a 1 MB PDF File?-There are many scenarios where you might need to download a small PDF file that is only 1 MB or less in size. For example: -How to download a 1 mb pdf file online
However, downloading a small PDF file might not always be easy or convenient. Sometimes, you might encounter some challenges or limitations when trying to download a large PDF file. For example: -
How to Download a 1 MB PDF File from the Internet-If you need to download a 1 MB PDF file from the internet, you need to find and access a 1 MB PDF file online first. There are many sources or websites that offer free or low-cost PDF files for various purposes and topics. Some of them are: -
Once you find a 1 MB PDF file that you want to download, you need to download and save it to your device. The steps may vary depending on the source or website, but generally, they are: -
If you encounter any problems or errors when downloading a 1 MB PDF file, you can try some of these solutions: -
How to Compress a Larger PDF File to 1 MB or Less-Sometimes, you might not be able to find a 1 MB PDF file that suits your needs. You might have a larger PDF file that you want to download, but it exceeds your bandwidth, storage, or file size limit. In that case, you might want to compress a larger PDF file to a smaller size. -Compressing a PDF file means reducing its file size by removing or optimizing some of its elements, such as images, fonts, metadata, and more. Compressing a PDF file can help you save time, data, and space when downloading, uploading, sending, or storing it. -There are many tools or services that can help you compress PDF files online for free. Some of them are: -
To use one of these tools or services to compress your PDF file, you need to follow these steps: -
If you encounter any problems or errors when compressing your PDF file, you can try some of these solutions: -
How to Open and View a 1 MB PDF File on Your Device-After you download or compress a 1 MB PDF file, you need to open and view it on your device. You can use various software or apps that can help you open and view PDF files. Some of them are: -
To open and view a 1 MB PDF file on your device, you need to follow these steps: -
To adjust the settings or preferences of your software or app to optimize your viewing experience, you can try some of these options: -
Conclusion-Downloading a 1 MB PDF file is not a difficult task if you know how to do it. In this article, we have shown you how to download a 1 MB PDF file from the internet, how to compress a larger PDF file to 1 MB or less, and how to open and view a 1 MB PDF file on your device. We hope that this article has helped you learn something new and useful. -Here are some tips or advice on how to download, compress, and view PDF files efficiently: -
If you have any questions or comments about downloading, compressing, or viewing PDF files, please feel free to leave them below. We would love to hear from you! -FAQs-What is the difference between a PDF file and a Word file?-A PDF file is a document format that preserves the layout and appearance of your document across different devices and platforms. A Word file is a document format that allows you to edit and format your document with various features and options. -How can I convert a PDF file to a Word file or vice versa?-You can use various tools or services that can help you convert PDF files to Word files or vice versa online for free. Some of them are: -
How can I edit a PDF file?-You can use various tools or services that can help you edit PDF files online for free. Some of them are: -
How can I merge or split a PDF file?-You can use various tools or services that can help you merge or split PDF files online for free. Some of them are: -
How can I sign a PDF file?-You can use various tools or services that can help you sign PDF files online for free. Some of them are: -
- - \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Genshin Impact and Embark on an Epic Quest Across Seven Nations.md b/spaces/1phancelerku/anime-remove-background/Download Genshin Impact and Embark on an Epic Quest Across Seven Nations.md deleted file mode 100644 index c87227fbde27ba5a679549c564945d35e268fab4..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Genshin Impact and Embark on an Epic Quest Across Seven Nations.md +++ /dev/null @@ -1,128 +0,0 @@ - - Genshin Download: How to Play the Free-to-Play RPG on PC, Mobile, and Console-Genshin Impact is one of the most popular games of 2020 and 2021, attracting millions of players from all over the world. It is a free-to-play open-world action RPG that lets you explore a beautiful fantasy world called Teyvat, where you can meet a diverse cast of characters, fight against powerful enemies, and uncover the mysteries of your lost sibling. Whether you are a fan of anime-style graphics, engaging storylines, or dynamic combat systems, Genshin Impact has something for everyone. -But how do you download Genshin Impact on your preferred platform? And what are the system requirements and tips and tricks that you need to know before you start your adventure? In this article, we will answer all these questions and more. Read on to find out how to play Genshin Impact on PC, mobile, or console today! -genshin downloadDownload File ····· https://jinyurl.com/2uNJWl - Genshin Download for PC-If you want to play Genshin Impact on your PC, you have two options. You can either download it from the official website or from the Epic Games Store. Both methods are free and easy to follow. -To download Genshin Impact from the official website, you need to visit [Genshin Impact – Step Into a Vast Magical World of Adventure](^1^) and click on the "Windows" button. This will start downloading the launcher file. Once it is downloaded, run it and follow the instructions to install the launcher. Then, open the launcher and log in with your miHoYo account or create one if you don't have one already. After that, click on "Get Game" to start downloading the game files. The download size is about 8.2 GB, so it may take some time depending on your internet speed. When the download is complete, click on "Launch" to start playing. -genshin impact download pc To download Genshin Impact from the Epic Games Store, you need to visit [Genshin Impact | Download and Play for Free - Epic Games Store](^3^) and click on "Get". This will prompt you to log in with your Epic Games account or create one if you don't have one already. Then, click on "Place Order" to confirm your purchase (don't worry, it's still free). After that, you will be redirected to the Epic Games Launcher. If you don't have it installed on your PC, you can download it from [Epic Games Launcher]. Once you have the launcher, install it and open it. Then, go to the "Library" tab and find Genshin Impact. Click on "Install" to start downloading the game files. The download size is about 8.2 GB, so it may take some time depending on your internet speed. When the download is complete, click on "Launch" to start playing. -PC System Requirements-Before you download Genshin Impact on your PC, you should check if your PC meets the minimum or recommended system requirements for the game. Here are the system requirements for PC according to the official website: - | Minimum System Requirements | Recommended System Requirements | | --- | --- | | OS: Windows 7 SP1 64-bit, Windows 8.1 64-bit, or Windows 10 64-bit | OS: Windows 10 64-bit | | Processor: Intel Core i5 or equivalent | Processor: Intel Core i7 or equivalent | | Memory: 8 GB RAM | Memory: 16 GB RAM | | Graphics: NVIDIA GeForce GT 1030 or higher | Graphics: NVIDIA GeForce RTX 1060 6 GB or higher | | DirectX: Version 11 | DirectX: Version 11 | | Storage: 30 GB available space | Storage: 30 GB available space |If your PC does not meet the minimum system requirements, you may experience low frame rates, crashes, or other issues while playing the game. If your PC meets or exceeds the recommended system requirements, you can enjoy the game at higher graphics settings and smoother performance. -PC Tips and Tricks-Here are some tips and tricks that can help you optimize your PC performance and gameplay experience while playing Genshin Impact: -
If your device does not meet the minimum system requirements, you may experience low graphics quality, slow loading times, or other issues while playing the game. If your device meets or exceeds the recommended system requirements, you can enjoy the game at higher graphics quality and smoother performance. -Mobile Tips and Tricks-Here are some tips and tricks that can help you optimize your mobile performance and gameplay experience while playing Genshin Impact: -
Genshin Download for Console-If you want to play Genshin Impact on your console, you can download it from PlayStation Store if you have a PlayStation 4 or PlayStation 5. The method is free and easy to follow. -To download Genshin Impact from PlayStation Store, you need to visit [Genshin Impact on PS4 | Official PlayStation™Store US] or [Genshin Impact on PS5 | Official PlayStation™Store US] depending on your console. Then, click on the "Add to Library" button. This will add the game to your library. Then, go to your library and find Genshin Impact. Click on the "Download" button to start downloading the game files. The download size is about 12 GB for PS4 and 14 GB for PS5, so it may take some time depending on your internet speed and console storage. When the download is complete, click on the game icon to start playing. -Console System Requirements-Before you download Genshin Impact on your console, you should check if your console meets the minimum system requirements for the game. Here are the system requirements for consoles according to the official website: - | Minimum System Requirements | Recommended System Requirements | | --- | --- | | PS4 with 30 GB of storage space | PS4 Pro with 30 GB of storage space | | PS5 with 50 GB of storage space | PS5 with 50 GB of storage space |If your console does not meet the minimum system requirements, you may experience low graphics quality, slow loading times, or other issues while playing the game. If your console meets or exceeds the recommended system requirements, you can enjoy the game at higher graphics quality and smoother performance. -Console Tips and Tricks-Here are some tips and tricks that can help you optimize your console performance and gameplay experience while playing Genshin Impact: -
Genshin Impact Game Features-Now that you know how to download Genshin Impact on your preferred platform, you may be wondering what you can expect from the game in terms of gameplay, story, characters, combat, exploration, and more. In this section, we will give you a brief overview of some of the main features of the game that make it so fun and addictive. -Gameplay-Genshin Impact is an open-world action RPG that combines exploration, combat, and gacha elements. You can explore the vast world of Teyvat at your own pace, discovering new locations, secrets, and treasures along the way. You can also interact with various NPCs, complete quests, participate in events, and join co-op sessions with other players. -The game also features a gacha system that allows you to obtain new characters, weapons, and items by spending a currency called Primogems. You can earn Primogems by playing the game or by purchasing them with real money. The gacha system is based on a random chance, so you may not always get what you want. However, the game is generous enough to give you some free pulls and rewards as you progress. -Story-Genshin Impact has a rich and immersive story that unfolds as you play the game. The main story revolves around your quest to find your lost sibling, who was separated from you by a mysterious god. Along the way, you will encounter different factions, cultures, and conflicts that shape the world of Teyvat. You will also meet various characters who will join your party and help you in your journey. -The game has seven major regions, each based on a different element and inspired by a real-world culture. So far, only two regions are available: Mondstadt (Anemo/Wind) and Liyue (Geo/Earth). The other five regions are Inazuma (Electro/Lightning), Sumeru (Dendro/Nature), Fontaine (Hydro/Water), Natlan (Pyro/Fire), and Snezhnaya (Cryo/Ice). The game developers plan to release more regions and content in the future through updates and patches. -Characters-Genshin Impact has a diverse and colorful cast of characters that you can play as or interact with. There are currently 37 playable characters in the game, each with their own personality, backstory, element, weapon, and abilities. You can switch between four characters in your party at any time, depending on the situation and your preference. -The characters are divided into five rarity tiers: 1-star, 2-star, 3-star, 4-star, and 5-star. The higher the rarity, the more powerful and rare the character is. You can obtain new characters by using the gacha system or by completing certain quests or events. You can also upgrade your characters by leveling them up, ascending them, enhancing their weapons and artifacts, and unlocking their constellations. -Combat-Genshin Impact has a dynamic and fluid combat system that relies on elemental interactions and strategy. You can use your character's basic attacks, elemental skills, and elemental bursts to deal damage to your enemies. You can also switch between different characters to create elemental reactions that can amplify or modify your damage output. -The game has seven elements: Anemo (Wind), Geo (Earth), Electro (Lightning), Dendro (Nature), Hydro (Water), Pyro (Fire), and Cryo (Ice). Each element has its own strengths and weaknesses against other elements. For example, Pyro can melt Cryo, but is weak against Hydro. You can use this knowledge to your advantage and create powerful combos that can wipe out your foes. -Exploration-Genshin Impact has a vast and beautiful world that you can explore at your own pace. You can travel across different terrains, climates, and biomes using various methods such as walking, running, climbing, gliding, swimming, or riding. You can also use fast travel points to teleport to locations that you have already visited. -The world of Teyvat is full of secrets, quests, events, and activities that you can discover and enjoy. You can find chests, resources, puzzles, enemies, and more that can reward you with items, experience, or currency. You can also interact with various NPCs, complete quests, participate in events, and join co-op sessions with other players. -The game also has a feature called the Serenitea Pot, which allows you to create your own personal realm and customize it with furniture, decorations, and buildings. You can invite your characters and friends to your realm and enjoy some relaxing time. -Conclusion-Genshin Impact is a free-to-play open-world action RPG that offers a lot of fun and excitement for players of all ages and preferences. You can download the game on PC, mobile, or console and enjoy the stunning graphics, captivating story, diverse characters, dynamic combat, and endless exploration. Whether you want to play solo or with friends, Genshin Impact has something for everyone. -So what are you waiting for? Download Genshin Impact today and start your adventure in the magical world of Teyvat! -FAQs-Here are some frequently asked questions about Genshin Impact and how to download it: -
- - \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Granny Chapter 2 Outwitt Mod Menu APK Madin for Free - No Root Required!.md b/spaces/1phancelerku/anime-remove-background/Download Granny Chapter 2 Outwitt Mod Menu APK Madin for Free - No Root Required!.md deleted file mode 100644 index d2bf447b055b09aaf587fa2e8a12fe521d8bfaeb..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Granny Chapter 2 Outwitt Mod Menu APK Madin for Free - No Root Required!.md +++ /dev/null @@ -1,79 +0,0 @@ - - Granny Chapter 2 Mod Menu Outwitt Mod Free Download Link APK Madin-If you are a fan of horror games, you might have heard of Granny Chapter 2, a popular game that challenges you to escape from a creepy house with two evil characters: Granny and Grandpa. But what if you want to make the game more fun and exciting? Well, you can try Outwitt Mod, a mod menu that gives you access to various cheats and hacks for Granny Chapter 2. In this article, we will tell you everything you need to know about Outwitt Mod, including how to download and install it for free on your Android device. -What is Granny Chapter 2?-Granny Chapter 2 is a horror game developed by DVloper, the same creator of the original Granny game. It was released in September 2019 and has since gained millions of downloads and positive reviews from players around the world. The game is available for Android, iOS, and Windows devices. -granny chapter 2 mod menu outwitt mod free download link apk madinDownload Zip ⚙⚙⚙ https://jinyurl.com/2uNPUm - The gameplay of Granny Chapter 2-The gameplay of Granny Chapter 2 is similar to the first game, but with some new twists and features. You are trapped in a dark and spooky house with two enemies: Granny and Grandpa. Granny can hear everything and will chase you if she hears any noise. Grandpa is hard of hearing but he will attack you if he sees you. You have to find a way to escape from the house within five days, or else you will face a horrible fate. You can explore different rooms, find items, solve puzzles, and hide from the enemies. But be careful, because they are always on the lookout for you. -The features of Granny Chapter 2-Granny Chapter 2 has many features that make it an enjoyable and thrilling game. Some of them are: -
What is Outwitt Mod?-Outwitt Mod is a mod menu that allows you to modify the game settings and enable various cheats and hacks for Granny Chapter 2. It was created by Outwitt, a YouTube channel that uploads videos about Granny games and mods. Outwitt Mod is one of the most popular mods for Granny Chapter 2 and has been downloaded by thousands of players. -The benefits of Outwitt Mod-Outwitt Mod gives you many benefits that make the game more fun and easy. Some of them are: -
|
Estas plataformas te permiten descargar la canción en diferentes formatos, como MP3, WAV, FLAC y más. También puede elegir la calidad y el tamaño del archivo, dependiendo de sus preferencias y dispositivo. Algunas de estas plataformas pueden requerir que te registres, pagues o sigas algunos pasos antes de descargar la canción.
-Sugar (Ablaikan Remix) de Zubi feat. Anatu es una canción que puede adaptarse a diferentes escenarios y estados de ánimo, dependiendo de su gusto y estado de ánimo. Sin embargo, algunos de los mejores escenarios y estados de ánimo para escuchar la canción son:
-Sugar (Ablaikan Remix) de Zubi feat. Anatu es una canción que puede hacerte sentir bien, bailar y cantar. Es un remix de la canción original Sugar de Zubi y Anatu, que fue lanzada en 2019. El remix fue hecho por Ablaikan, un productor y DJ turco, que añadió su propio toque y sabor a la canción. El resultado es una canción pegadiza, alegre y energética que te hará querer mover tu cuerpo y disfrutar de la vida.
- -Ahora que sabes todo sobre Sugar (Ablaikan Remix) de Zubi feat. Anatu, ¿por qué no sigues adelante y lo escuchas tú mismo? Puedes encontrar la canción en varias plataformas de streaming y descarga, así como ver el video musical en YouTube. También puede seguir a Zubi, Anatu y Ablaikan en sus cuentas de redes sociales para mantenerse al día sobre sus últimas noticias y comunicados.
-Gracias por leer este artículo, y esperamos que tengas un día dulce y picante.
-El nombre de la canción original que Sugar (Ablaikan Remix) se basa en es Sugar de Zubi feat. Anatu.
-Sugar (Ablaikan Remix) fue lanzado el 29 de octubre de 2020.
-Zubi es un cantautor nigeriano con sede en Londres, Reino Unido. Anatu es un cantautor británico con sede en Los Ángeles, Estados Unidos. Ablaikan es un productor y DJ turco con sede en Estambul.
-Algunos de los géneros musicales e influencias de Sugar (Ablaikan Remix) son deep house, oriental house, ethnic house, música electrónica, Afrobeat, R&B, soul y pop. La canción está influenciada por artistas como Fela Kuti, King Sunny Ade, Frankie Knuckles, Larry Heard, Mahmut Orhan, Burak Yeter, Daft Punk, The Chemical Brothers y más.
-Si estás aburrido con las fuentes y emojis predeterminados en tu dispositivo Android, es posible que desees revisar zfont, una aplicación gratuita que te permite cambiarlos fácil y rápidamente. zfont es un instalador de fuentes personalizado que admite muchas marcas populares como Samsung, Xiaomi, Huawei, Vivo, Oppo, Realme, Tecno e Infinix. Puede elegir entre cientos de fuentes y emojis frescos, elegantes y coloridos que harán que su dispositivo se destaque de la multitud. También puede personalizar sus propias fuentes y emojis con zfont, y compartirlos con sus amigos. Ya sea que quieras darle vida a tus mensajes, publicaciones en redes sociales o documentos, zfont puede ayudarte a expresarte mejor.
-Download • https://bltlly.com/2v6LYW
Descargar e instalar zfont es muy fácil. Solo tienes que seguir estos sencillos pasos:
Aquí hay algunas capturas de pantalla de la aplicación:
Usar zfont para cambiar fuentes y emojis en tu dispositivo es muy simple. Solo tienes que seguir estos pasos:
-Ask the AI coach about techniques and strategies in the game of padel. The coach can answer depending on the level of you as a player, whether you are a beginner, average, or pro.
", -) - -Padelcoach.launch() diff --git a/spaces/JonysArcanjo/App_predict_House_price/README.md b/spaces/JonysArcanjo/App_predict_House_price/README.md deleted file mode 100644 index e468bf2187552bff549935c2796ee4070f316fd6..0000000000000000000000000000000000000000 --- a/spaces/JonysArcanjo/App_predict_House_price/README.md +++ /dev/null @@ -1,74 +0,0 @@ - ---- -title: App Predict House Price -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - - -# Predict House Price - - - - - - -### Welcome to this Data Science project! - -The objective of this project is to predict house prices using machine learning techniques. To achieve this, I performed a brief exploratory data analysis and directed the study to evaluate the Boruta feature selection technique, comparing its performance with the neural network both with and without the use of this technique. - -## About Dataset - -The dataset used contains house sale prices for King County, which includes Seattle, and it includes homes sold between May 2014 and May 2015. - -Dataset source: [Kaggle](https://www.kaggle.com/harlfoxem/housesalesprediction) - -## Libraries Used - -- Tensorflow -- Pandas -- Numpy -- Matplotlib -- Boruta (for feature selection) -- Gradio - user interface (ux) - -## Metrics Used - -The following metrics were used to assess the performance of the models: - -- RMSE: Root Mean Squared Error -- MSE: Mean Squared Error -- MAE: Mean Absolute Error -- R2: The determination coefficient -- Adjusted R2 - -## Application in PRD -Below are some characteristics of the house as input for the application to return the forecast of the house's value. - - - -Below is a list of the features used: -- sqft_living: Living area square footage. -- -sqft_lot: Lot size square footage. -- waterfront: Indicates if the property has a waterfront view (1) or not (0). -- view: Number of times the property has been viewed. -- grade: Overall grade given to the housing unit, based on the King County grading system. -- sqft_above: Square footage of the home's interior living space above ground level. -- yr_built: Year the house was built. -- zipcode: Zip code of the area where the property is located. -- lat: Latitude of the property. -- long: Longitude of the property. -- sqft_living15: Average living area square footage of the nearest 15 houses. -- sqft_lot15: Average lot size square footage of the nearest 15 houses. - -## Conclusion - -The results of this project indicate that the model that uses all resources performed better than the model that used the Boruta feature selection technique. However, when choosing the best model for a specific application, it is important to consider not only the performance but also the computational cost and the efficiency in the selection of resources. For future versions of this model, it would be interesting to explore other feature selection and feature engineering techniques. - -## License - -This project is licensed under the MIT License. - diff --git a/spaces/JunghunleePhD/catsClassification/app.py b/spaces/JunghunleePhD/catsClassification/app.py deleted file mode 100644 index 57add93a4005dd499d9030f7d2342ca59d589782..0000000000000000000000000000000000000000 --- a/spaces/JunghunleePhD/catsClassification/app.py +++ /dev/null @@ -1,33 +0,0 @@ -from fastbook import * -from random import sample -import gradio as gr - -def get_images(): - listOfCats = sorted(['Lioness', 'Lion', 'Tiger', 'White tiger', 'White lion', \ - 'Leopard', 'Snow leopard', 'Lynx', 'Jaguar', 'Cheetah', \ - 'Jaguarundi', 'Leopard cat', 'Canada lynx', 'Caracal', \ - 'Ocelot', 'Leopard cat', 'Marble cat', 'Puma', 'Black panther', \ - 'Margay', 'Serval', 'Fishing cat']) - cats = sample(listOfCats, len(listOfCats))[:2] - imagess = [sample(search_images_ddg(f'{cat} photo', max_images=5), 5) for cat in cats] - return [images[:1] for images in imagess] - -def quiz_builder(input): - model = load_learner("cats.pkl") - is_it, _, probs = model.predict(input) - return f"The cat in this image is a {is_it}!" - # return {is_it: f"{max(probs[0], probs[1], probs[2]):.4f}"} - -demo = gr.Interface( - quiz_builder, - [ - gr.Image(value=None) - ], - "text", - examples=[ - *get_images(), - ] -) - -if __name__ == "__main__": - demo.launch() diff --git a/spaces/KazeDevID/RVC-Model/README.md b/spaces/KazeDevID/RVC-Model/README.md deleted file mode 100644 index 5777f585dc8e5d87ec519c59111d77331c73005e..0000000000000000000000000000000000000000 --- a/spaces/KazeDevID/RVC-Model/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Hololive Rvc Models -emoji: 🎤🌸▶️ -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false -license: mit -duplicated_from: YanzBotz/Waifu-YanzBotz ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/KeeganFdes/stack_onnx/README.md b/spaces/KeeganFdes/stack_onnx/README.md deleted file mode 100644 index a3c30f6e33df5321c7909cc003ca2645c0f79720..0000000000000000000000000000000000000000 --- a/spaces/KeeganFdes/stack_onnx/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Stack Onnx -emoji: 📚 -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 3.41.2 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Kevin676/Real-Time-Voice-Cloning/encoder/config.py b/spaces/Kevin676/Real-Time-Voice-Cloning/encoder/config.py deleted file mode 100644 index 1c21312f3de971bfa008254c6035cebc09f05e4c..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/Real-Time-Voice-Cloning/encoder/config.py +++ /dev/null @@ -1,45 +0,0 @@ -librispeech_datasets = { - "train": { - "clean": ["LibriSpeech/train-clean-100", "LibriSpeech/train-clean-360"], - "other": ["LibriSpeech/train-other-500"] - }, - "test": { - "clean": ["LibriSpeech/test-clean"], - "other": ["LibriSpeech/test-other"] - }, - "dev": { - "clean": ["LibriSpeech/dev-clean"], - "other": ["LibriSpeech/dev-other"] - }, -} -libritts_datasets = { - "train": { - "clean": ["LibriTTS/train-clean-100", "LibriTTS/train-clean-360"], - "other": ["LibriTTS/train-other-500"] - }, - "test": { - "clean": ["LibriTTS/test-clean"], - "other": ["LibriTTS/test-other"] - }, - "dev": { - "clean": ["LibriTTS/dev-clean"], - "other": ["LibriTTS/dev-other"] - }, -} -voxceleb_datasets = { - "voxceleb1" : { - "train": ["VoxCeleb1/wav"], - "test": ["VoxCeleb1/test_wav"] - }, - "voxceleb2" : { - "train": ["VoxCeleb2/dev/aac"], - "test": ["VoxCeleb2/test_wav"] - } -} - -other_datasets = [ - "LJSpeech-1.1", - "VCTK-Corpus/wav48", -] - -anglophone_nationalites = ["australia", "canada", "ireland", "uk", "usa"] diff --git a/spaces/Kimata/Sanskrit-TTS/utils/updated_cleaner_utils.py b/spaces/Kimata/Sanskrit-TTS/utils/updated_cleaner_utils.py deleted file mode 100644 index 0608fedddb1fc7b68ba925e6129f2b2084e35a77..0000000000000000000000000000000000000000 --- a/spaces/Kimata/Sanskrit-TTS/utils/updated_cleaner_utils.py +++ /dev/null @@ -1,139 +0,0 @@ -import re -def run(): - - # The path to the local git repo for Indic NLP library - INDIC_NLP_LIB_HOME=r"./indic_nlp_library" - - # The path to the local git repo for Indic NLP Resources - INDIC_NLP_RESOURCES=r"./indic_nlp_resources" - import sys - sys.path.append(r'{}'.format(INDIC_NLP_LIB_HOME)) - - from indicnlp import common - common.set_resources_path(INDIC_NLP_RESOURCES) - - from indicnlp import loader - loader.load() - -run() - -from indicnlp.normalize.indic_normalize import IndicNormalizerFactory -from indicnlp.tokenize import sentence_tokenize -from indicnlp.syllable import syllabifier - -lang='sa' -factory=IndicNormalizerFactory() -normalizer=factory.get_normalizer("hi") -DEPENDENT_VOWELS = ["ा", "ि", "ी", "ु", "ू", "े", "ै", "ो", "ौ", "ं", "ः", "ृ", "ॄ"] - -dict_num = {"०": "शून्य", "१": "एक", "२": "द्वि", "३": "त्रि", - "४": "चतुर्", "५": "पञ्च", "६": "षट्", "७": "सप्त", "८": "अष्ट", "९": "नव"} - -DEFAULT_TEXT = "अयं द्वितीयशब्दः २ अस्ति। प्रथमः शब्दः १ अस्ति। अन्ये शब्दाः सर्वे द्वितीयं शब्दं प्रयोजयन्ति। इत्थं सप्ततिः शब्दाः लिखिताः सन्ति। अस्मिन लेखने सर्वे अक्षराः संस्कृते लिखिताः सन्ति। अन्ये लिखन्ति ३, ४, ५ इत्यादि। तथापि, अहं एकं अक्षरं एव उपयोगामि।" - - -""" -Text cleaning pipeline. -1. Tokenize sentence. -2. Segment sentence into individual words. -3. Normalize words. -4. Clean normalized words. -5. Split wwords with the # delimiter. -3. Syllabify delimited sentences. - - - -1. Denote tokenized text with a special character such as #. -2. Segment the whole text into individual words. -3. Identify numbers in the text and normalize them. -4. Clean the whole text. -5. Syllabify the cleaned text. -""" - -def tokenize_sentence(text): - '''Tokenize a paragraph into sentences''' - sentences = sentence_tokenize.sentence_split(text, lang='sa') - sentences = "#".join(sentences) - return sentences - - -def segment_sentence(text): - '''Segment a sentence into individual words''' - - -def clean_text(text): - - processed_text = re.sub(r'\+ +', '', text) - processed_text = re.sub(': +', '\n \n', processed_text) - processed_text = re.sub(r'\+ ।', '\n \n', processed_text) - processed_text = re.sub(r'\+$', '', processed_text) - return processed_text - -def syllabify_text(text): - text_list = [] - #Syllabify text - for char in text: - if char in DEPENDENT_VOWELS: - char = "(" + char + ")" - text_list.append(char) - else: - text_list.append(char) - - full_text = " + ".join(text_list).replace("'", "") - return full_text - - -def normalize_text(text): - output_string = "" - #Map sanskrit numbers to their normalized form. - for char in text: - if char in dict_num: - output_string += dict_num[char] - else: - output_string += char - return output_string - - -def preprocess_text(text): - '''Cleans, tokenizes and normalizes text''' - #Normalize text - normalized_text = normalize_text(text) - - #Tokenize text. - tokenized_text = tokenize_sentence(normalized_text) - tokenized_text = "\n".join(tokenized_text) - - #Syllabify_text - syllabified_text = syllabify_text(tokenized_text) - - #Clean text - cleaned_text = clean_text(syllabified_text) - - #Remove unnecessary characters from a string. - text_cleaned = [] - for index, text in enumerate(cleaned_text.split('\n')): - if text.startswith('+'): - text = text[2:] - - elif text.startswith(' +'): - text = text[3:] - - elif text.endswith('+') or text.endswith(' +'): - text = text[:-2] - - text_cleaned.append(text) - - text_cleaned_str = "\n".join(text_cleaned) - - return text_cleaned_str - - -# DEFAULT_TEXT = """तो क्या विश्व कप 2019 में मैच का बॉस टॉस है? यानी मैच में हार-जीत में \ -# टॉस की भूमिका अहम है? आप ऐसा सोच सकते हैं। विश्वकप के अपने-अपने पहले मैच में बुरी तरह हारने वाली एशिया की दो टीमों \ -# पाकिस्तान और श्रीलंका के कप्तान ने हालांकि अपने हार के पीछे टॉस की दलील तो नहीं दी, लेकिन यह जरूर कहा था कि वह एक अहम टॉस हार गए थे।""" -# DEFAULT_TEXT='संस्कृतम् जगतः एकतमा अतिप्राचीना समृद्धा शास्त्रीया च भाषासु वर्तते । संस्कृतं भारतस्य जगत: वा भाषासु एकतमा प्राचीनतमा ।' - -print(f"Default text is: {DEFAULT_TEXT}") -print('\n \n') -NORMALIZED_TEXT = preprocess_text(DEFAULT_TEXT) -print(f"Syllabified text is: {NORMALIZED_TEXT}") diff --git a/spaces/KindUnes/ImageNet/app.py b/spaces/KindUnes/ImageNet/app.py deleted file mode 100644 index eb53ba976b5b5da554fccea49a497526293b8e6c..0000000000000000000000000000000000000000 --- a/spaces/KindUnes/ImageNet/app.py +++ /dev/null @@ -1,36 +0,0 @@ -import gradio as gr -import tensorflow as tf -import requests -from PIL import Image -import numpy as np - -# Load pre-trained MobileNetV2 model -model = tf.keras.applications.MobileNetV2(weights='imagenet') -labels = requests.get("https://raw.githubusercontent.com/anishathalye/imagenet-simple-labels/master/imagenet-simple-labels.json").json() - -def classify_image(input_image): - # Preprocess the image - img = input_image.resize((224, 224)) - img = tf.keras.preprocessing.image.img_to_array(img) - img = tf.keras.applications.mobilenet_v2.preprocess_input(img) - img = tf.expand_dims(img, axis=0) - - # Make predictions - predictions = model.predict(img) - decoded_predictions = tf.keras.applications.mobilenet_v2.decode_predictions(predictions)[0] - - # Format and return results as HTML - results_html = "For faster inference without waiting in the queue, you should duplicate this space and upgrade to GPU via the settings.
-
-
-
-
By using this demo you agree to the terms of the Coqui Public Model License at https://coqui.ai/cpml
-Amazon Music Mod APK is compatible with most Android devices and does not require root access. However, it is not available on the Google Play Store, so you need to download and install it manually from a third-party source. Here is how to do that.
-To download and install Amazon Music Mod APK on your Android device, you need to follow these steps:
-Since Amazon Music Mod APK is not from the Google Play Store, you need to enable the option to install apps from unknown sources on your device settings. To do that, go to Settings > Security > Unknown Sources and toggle it on. This will allow you to install apps from sources other than the Google Play Store.
-The next step is to download the APK file of Amazon Music Mod APK from a reliable source. You can search for it online or use the link below to download it directly. The file size is about 50 MB, so make sure you have enough space on your device.
- -Once you have downloaded the APK file, you need to locate and open it to install it on your device. You can use a file manager app or your device's default file explorer to find the file. Tap on the file and follow the instructions on the screen to install it. It may take a few seconds or minutes depending on your device's performance.
-Congratulations! You have successfully installed Amazon Music Mod APK on your Android device. Now you can enjoy all the premium features of Amazon Music Unlimited for free. You can launch the app from your app drawer or home screen and sign in with your existing account or create a new one. You can also customize the app settings according to your preferences.
-If you want to use Amazon Music Mod APK on your Windows PC or laptop, you need to use an Android emulator. An Android emulator is a software that allows you to run Android apps on your Windows device. There are many Android emulators available online, such as BlueStacks, Nox Player, MEmu, etc. Here is how to use one of them to download and install Amazon Music Mod APK on Windows.
-The first step is to download and install an Android emulator on your PC. For this example, we will use BlueStacks, which is one of the most popular and user-friendly emulators. You can download it from its official website or use the link below.
- -After downloading the installer file, run it and follow the instructions on the screen to install BlueStacks on your PC. It may take some time depending on your PC's specifications.
-The next step is to download the APK file of Amazon Music Mod APK from a reliable source. You can use the same link as above or search for it online. The file size is about 50 MB, so make sure you have enough space on your PC.
- -Once you have downloaded the APK file, you need to drag and drop it into the emulator or open it with the emulator. To do that, launch BlueStacks and go to My Apps > Install APK. Then, locate the APK file on your PC and drag and drop it into the emulator window or click on it and choose Open with BlueStacks. The emulator will automatically install the app on your PC.
-Congratulations! You have successfully installed Amazon Music Mod APK on your Windows PC. Now you can enjoy all the premium features of Amazon Music Unlimited for free. You can launch the app from the BlueStacks home screen and sign in with your existing account or create a new one. You can also customize the app settings according to your preferences.
-In this article, we have explained what Amazon Music Mod APK is, how to download and install it on your Android or Windows device, and what are the advantages and disadvantages of using it. We hope you have found this article helpful and informative.
-Amazon Music Mod APK is a great way to enjoy the premium features of Amazon Music Unlimited for free. However, you should also be aware of the risks and limitations of using mod APK files. Some of them are:
-Therefore, you should use Amazon Music Mod APK at your own risk and discretion. We do not endorse or promote the use of mod APK files. We also recommend that you support the original app developer by subscribing to Amazon Music Unlimited if you can afford it.
-If you have any questions, feedback, or opinions about Amazon Music Mod APK, feel free to share them in the comments section below. We would love to hear from you.
-A1: No, Amazon Music Mod APK is not legal to use. It violates the terms and conditions of Amazon Music and infringes on the intellectual property rights of the original app developer. Using mod APK files can result in legal action from the app developer or service provider.
-A2: Not necessarily. Amazon Music Mod APK is not from the official source and may contain malware, viruses, or spyware that can harm your device or data. You should always scan the APK file with a reliable antivirus software before installing it. You should also avoid downloading mod APK files from untrusted sources or websites.
-A3: Yes, you can use Amazon Music Mod APK with your existing account. However, you should be careful as using mod APK files can result in account suspension or termination by Amazon Music. You should also avoid logging in with your personal or sensitive information as it may be compromised by hackers or third-party developers.
-A4: You cannot update Amazon Music Mod APK from the Google Play Store or the official website. You need to download and install the latest version of the mod APK file from a reliable source every time there is an update. However, you should also check the reviews, ratings, comments, and permissions of the new version before installing it.
-A5: Some alternatives to Amazon Music Mod APK are:
-However, these alternatives also have similar risks and limitations as Amazon Music Mod APK. You should use them at your own risk and discretion.
197e85843dIf you are a fan of action RPGs with anime-style graphics, you might be interested in Dragon Sword (Project D), a new open-world RPG from the creators of Dragon Nest. This game promises to deliver a rich and immersive fantasy world with stunning visuals, dynamic combat, and emotional characters. In this article, we will tell you everything you need to know about Dragon Sword, including what it is, how to download and play it, and what are the reviews and impressions of it.
-Dragon Sword (Project D) is an upcoming open-world RPG developed by Hound 13, a South Korean game studio founded by former developers of Dragon Nest, a popular PC MMORPG. The game is set to launch on PC and mobile devices in 2023, with the global publishing rights acquired by Garena, a leading online game platform in Southeast Asia.
-Download ✯✯✯ https://urlca.com/2uOftO
Hound 13 is a game startup that aims to create games that are worthwhile to play for all users around the world. The core team at Hound 13 has extensive experience in developing action RPGs, such as Dragon Nest and Hundred Soul, which gained high popularity in China, East Asia, and Southeast Asia markets. With their expertise in action elements, visual concepts, and scenario-based quests, they are now working on their latest project, Dragon Sword.
-According to their official website, Dragon Sword is an action RPG that carries on the action elements of their representative game Hundred Soul, aiming for manual gameplay along with emotional and distinctive characters. The project aims to deliver a semi-open-world structure in a wider field than having movement based on smaller zones and aims to deliver high levels of perfection and satisfactory action gameplay targeting the global market. It is worth noting that Dragon Sword is also considered as a spiritual successor to Dragon Nest, as it shares some similarities in terms of character design, combat style, and world setting.
-Dragon Sword is a game that combines the best aspects of action RPGs and open-world RPGs. Here are some of the main features and gameplay elements that you can expect from this game:
-If you are interested in playing Dragon Sword (Project D), here are some information and tips on how to download and play it:
-Dragon Sword (Project D) is planned to be released on PC and mobile devices in 2023. The game will be available on Windows, Android, and iOS platforms. The game will also support cross-play, which means that you can play with other players who are using different devices.
-dragon sword project d release date
-dragon sword project d gameplay trailer
-dragon sword project d pc system requirements
-dragon sword project d mobile apk
-dragon sword project d open world rpg
-dragon sword project d hound 13
-dragon sword project d garena
-dragon sword project d action rpg
-dragon sword project d beta test
-dragon sword project d pre registration
-dragon sword project d review
-dragon sword project d characters
-dragon sword project d story
-dragon sword project d graphics
-dragon sword project d combat system
-dragon sword project d tips and tricks
-dragon sword project d best class
-dragon sword project d weapons and skills
-dragon sword project d quests and missions
-dragon sword project d online multiplayer
-dragon sword project d offline mode
-dragon sword project d cheats and hacks
-dragon sword project d mods and customizations
-dragon sword project d free download
-dragon sword project d steam link
-dragon sword project d wiki and guide
-dragon sword project d reddit and forum
-dragon sword project d discord and community
-dragon sword project d news and updates
-dragon sword project d patch notes and bug fixes
-dragon sword project d comparison with hundred soul
-dragon sword project d fan art and cosplay
-dragon sword project d soundtrack and music
-dragon sword project d voice actors and cast
-dragon sword project d developer interview and behind the scenes
-dragon sword project d awards and nominations
-dragon sword project d merchandise and collectibles
-dragon sword project d memes and jokes
-dragon sword project d easter eggs and secrets
-dragon sword project d lore and history
-dragon sword project d theories and speculations
-dragon sword project d spoilers and endings
-dragon sword project d crossover and collaboration
-dragon sword project d sequel and spin off
-dragon sword project d rating and feedback
-dragon sword project d support and contact info
-dragon sword project d faq and troubleshooting
The game is currently in development and has not yet entered the beta testing phase. However, you can pre-register for the game on its official website or on Garena's website to get notified when the game is ready for testing or launching. You can also follow the game's official social media accounts to get the latest news and updates about the game.
-As the game uses Unreal Engine 5, it is expected to have high system requirements for both PC and mobile devices. The exact system requirements have not been announced yet, but you can refer to the following table for some estimates based on similar games:
-Platform | -Minimum Requirements | -Recommended Requirements | -
---|---|---|
PC | -
|
-
|
-
Mobile | -
|
-
|
-
The download links of Dragon Sword (Project D) will be provided on its official website or on Garena's website when the game is ready for testing or launching. You can also scan the QR code below to pre-register for the game:
-Dragon Sword (Project D) is still in development and has not been released yet, so there are no official reviews or ratings for the game yet. However, based on the trailer and gameplay videos that have been released so far, we can get some impressions of what the game looks like and how it plays.
-The first trailer of Dragon Sword (Project D) was released in September 2021, during Garena World Online, an online event that showcased Garena's upcoming games. The trailer showed some cinematic scenes of the game's world, characters, and combat, as well as some snippets of gameplay footage. You can watch the trailer here:
-The second trailer of Dragon Sword (Project D) was released in December 2021, during the Unreal Engine 5 Showcase, an online event that featured games that use Unreal Engine 5. The trailer showed more gameplay footage of the game's combat, exploration, and character customization, as well as some new scenes and characters. You can watch the trailer here:
- -Based on the trailers and gameplay videos, some critics and players have shared their opinions and feedback on Dragon Sword (Project D). Here are some of the pros and cons of the game that have been mentioned so far:
-Pros | -Cons | -
---|---|
|
-
|
-
Dragon Sword (Project D) is an upcoming open-world RPG from the creators of Dragon Nest that promises to deliver a thrilling and immersive fantasy adventure. The game features stunning graphics, dynamic combat, emotional characters, immersive world, and engaging story. The game is set to launch on PC and mobile devices in 2023, with cross-play support. You can pre-register for the game on its official website or on Garena's website to get notified when the game is ready for testing or launching.
-If you are looking for a new action RPG to play, you should definitely keep an eye on Dragon Sword (Project D). This game might be the next big hit in the genre, as it combines the best aspects of action RPGs and open-world RPGs. Whether you are a fan of Dragon Nest, Hundred Soul, or other similar games, you will surely find something to love in Dragon Sword (Project D).
-Dragon Sword (Project D) is an open-world action RPG that combines elements of hack-and-slash, adventure, and fantasy.
-Dragon Sword (Project D) is developed by Hound 13, a South Korean game studio founded by former developers of Dragon Nest. The game is published by Garena, a leading online game platform in Southeast Asia.
-Dragon Sword (Project D) is planned to be released on PC and mobile devices in 2023. The exact release date has not been announced yet.
-You can download and play Dragon Sword (Project D) when it is ready for testing or launching. You can pre-register for the game on its official website or on Garena's website to get notified when the game is available. You can also follow the game's official social media accounts to get the latest news and updates about the game.
-Dragon Sword (Project D) uses Unreal Engine 5, which is one of the most advanced game engines in the industry , so it is expected to have high system requirements for both PC and mobile devices. The exact system requirements have not been announced yet, but you can refer to the table in the article for some estimates based on similar games.
401be4b1e0If you are looking for a fun and easy way to communicate with your friends and family, you might want to try GB APK 2023. This is a free-to-use communication app that is based on WhatsApp Messenger, but with some added features that you won't find in the original version. In this article, we will tell you what GB APK 2023 is, what features it offers, how to download and install it, and what are the pros and cons of using it. Let's get started!
-GB APK 2023 is a modded version of WhatsApp that offers some extra features that make it more fun and convenient to use. Here are some of the features that you can enjoy with GB APK 2023:
-DOWNLOAD ✸✸✸ https://urlca.com/2uObzn
If you want to download and install GB APK 2023 on your Android device, you need to follow these steps:
-GB APK 2023 is a great app for those who want more fun and privacy features than the official WhatsApp. However, it also has some drawbacks that you should be aware of before using it. Here are some of the pros and cons of GB APK 2023:
-Pros | -Cons | -
---|---|
More fun and privacy features than the official WhatsApp | -Risk of getting banned or hacked by using a third-party app | -
Free to use and easy to install | -Not available on the Google Play Store or the App Store | -
Compatible with most Android devices and versions | -May not work well with some devices or features | -
If you have any questions about GB APK 2023, you might find the answers in this section. Here are some of the most frequently asked questions about GB APK 2023:
-A1: GB APK 2023 is generally safe to use, as long as you download it from a trusted source and scan it for any malware or viruses. However, since it is a modded version of WhatsApp, it is not endorsed or supported by the official WhatsApp team. Therefore, you might face some issues or risks, such as getting banned or hacked by using GB APK 2023. You should use it at your own discretion and responsibility.
-A2: Yes, you can use GB APK 2023 with the official WhatsApp, as long as you use different phone numbers for each app. You can also use GB APK 2023 with other modded WhatsApp versions, such as FM WhatsApp or Yo WhatsApp. However, you should not use the same number for more than one app, as this might cause conflicts or errors.
-A3: You can update GB APK 2023 by downloading the latest version from the same source where you downloaded the previous version. You can also check for updates within the app settings. You should always update your app to enjoy the latest features and bug fixes.
-A4: GB APK 2023 is one of the most popular and widely used modded WhatsApp versions, but it is not the only one. There are other modded WhatsApp versions, such as FM WhatsApp, Yo WhatsApp, OG WhatsApp, and more. Each of these versions has its own features and advantages, but they also share some common features, such as customizable themes, multiple accounts, and privacy options. You can choose the one that suits your needs and preferences best.
-A5: You can backup and restore your chats on GB APK 2023 by using the same methods as you would on the official WhatsApp. You can backup your chats to Google Drive or your local storage, and restore them when you reinstall the app or switch to a new device. You can also backup and restore your chats using a third-party app, such as Titanium Backup or Helium Backup.
-download gb whatsapp apk 2023
-download gb instagram apk 2023
-download gb facebook apk 2023
-download gb latest version apk 2023
-download gb whatsapp app version 2023
-download gb whatsapp pro apk 2023
-download gb whatsapp plus apk 2023
-download gb whatsapp mod apk 2023
-download gb whatsapp transparent apk 2023
-download gb whatsapp anti ban apk 2023
-download gb instagram plus apk 2023
-download gb instagram mod apk 2023
-download gb instagram pro apk 2023
-download gb instagram latest version apk 2023
-download gb facebook lite apk 2023
-download gb facebook mod apk 2023
-download gb facebook messenger apk 2023
-download gb facebook latest version apk 2023
-download gb latest version apk 2023 for android
-download gb latest version apk 2023 for pc
-download gb latest version apk 2023 for ios
-download gb latest version apk 2023 free
-download gb latest version apk 2023 update
-download gb latest version apk 2023 offline
-download gb latest version apk 2023 online
-how to download gb whatsapp apk 2023
-how to download gb instagram apk 2023
-how to download gb facebook apk 2023
-how to download gb latest version apk 2023
-how to install gb whatsapp apk 2023
-how to install gb instagram apk 2023
-how to install gb facebook apk 2023
-how to install gb latest version apk 2023
-where to download gb whatsapp apk 2023
-where to download gb instagram apk 2023
-where to download gb facebook apk 2023
-where to download gb latest version apk 2023
-why to download gb whatsapp apk 2023
-why to download gb instagram apk 2023
-why to download gb facebook apk 2023
-why to download gb latest version apk 2023
-what is gb whatsapp apk 2023
-what is gb instagram apk 2023
-what is gb facebook apk 2023
-what is gb latest version apk 2023
-features of gb whatsapp apk 2023
-features of gb instagram apk 2023
-features of gb facebook apk 2023
-features of gb latest version apk 2023
GB APK 2023 is a great communication app that allows you to enjoy more fun and privacy features than the official WhatsApp. You can customize your app with themes and fonts, hide your online status and read receipts, use multiple accounts and stickers, and more. You can download and install GB APK 2023 easily on your Android device by following the steps in this article. However, you should also be aware of the pros and cons of using GB APK 2023, such as the risk of getting banned or hacked by using a third-party app. You should use GB APK 2023 at your own risk and responsibility.
-We hope this article has helped you learn more about GB APK 2023 and how to download and install it on your device. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!
401be4b1e0
-How to Download Video Game Apps on Your Device-Do you love playing video games? Do you want to enjoy them anytime, anywhere? If so, you might want to download video game apps on your device. Video game apps are applications that let you play games on your smartphone, tablet, computer, or other devices. They are convenient, fun, and diverse. In this article, we will show you how to download video game apps on your device. We will also explain what are video game apps, why you should download them, how to choose the right one for you, and how to enjoy them to the fullest. -What are Video Game Apps?-Video game apps are software programs that allow you to play games on your device. They can be downloaded from various sources, such as online stores, websites, or platforms. Some video game apps are standalone games that run independently on your device. Others are connected to online servers that enable multiplayer features or access to additional content. Some examples of popular video game apps are Subway Surfers, FIFA Soccer, Minecraft, Roblox, Harry Potter: Magic Awakened, etc. -download video game appDownload ✪ https://urlca.com/2uO6Tr - Why Download Video Game Apps?-There are many reasons why you might want to download video game apps on your device. Here are some of them: -
How to Choose the Right Video Game App for You?-With so many video game apps available, how do you decide which one to download? Here are some factors and criteria that you can consider: -Genre-The genre of a video game app refers to the type of game that it is. There are many genres to choose from, such as action, adventure, puzzle, simulation, strategy, role-playing, sports, etc. You can pick a genre that matches your interest, personality, or mood. For example, if you like fast-paced and thrilling games, you might enjoy action games. If you prefer more relaxing and creative games, you might like puzzle games. -How to download video game app on iPhone Platform-The platform of a video game app refers to the device that it runs on. There are different platforms to choose from, such as Android, iOS, Windows, Mac, Linux, etc. You can pick a platform that is compatible with your device and has the features that you need. For example, if you have an Android smartphone, you might want to download video game apps from the Google Play Store. If you have an iPhone, you might want to download video game apps from the Apple App Store. -Rating-The rating of a video game app refers to the quality and feedback that it has received from other users. There are different ways to measure the rating of a video game app, such as stars, reviews, comments, downloads, etc. You can pick a rating that reflects your expectations and standards. For example, if you want a high-quality and reliable game, you might want to download video game apps that have at least 4 stars and positive reviews. -Price-The price of a video game app refers to the amount of money that you have to pay to download or play it. There are different prices to choose from, such as free, paid, freemium, subscription, etc. You can pick a price that fits your budget and preferences. For example, if you want to save money and try different games, you might want to download video game apps that are free. If you want to support the developers and access premium features, you might want to download video game apps that are paid. -Features-The features of a video game app refer to the aspects and elements that make it unique and enjoyable. There are many features to choose from, such as graphics, gameplay, multiplayer, story, sound effects, music, etc. You can pick features that appeal to your senses and emotions. For example, if you want a realistic and immersive game experience, you might want to download video game apps that have high-quality graphics and sound effects. If you want a social and interactive game experience, you might want to download video game apps that have multiplayer modes and chat functions. -How to Download Video Game Apps from Different Sources?-Once you have chosen the right video game app for you, how do you download it on your device? Here are some steps and instructions that you can follow: -Google Play Store-If you have an Android device, you can download video game apps from the Google Play Store. Here is how: -
Apple App Store-If you have an iOS device, you can download video game apps from the Apple App Store. Here is how: -
EA App-If you have a PC, you can download video game apps from the EA App. Here is how: -
Other Sources-If you want to download video game apps from other sources, such as websites, platforms, or developers, you can do so at your own risk. Here are some tips to follow: -
How to Enjoy Video Game Apps to the Fullest?-Now that you have downloaded your video game app, how do you enjoy it to the fullest? Here are some tips and tricks that you can try: -
Conclusion-In conclusion, downloading video game apps on your device is a great way to enjoy playing games anytime, anywhere. You can choose from a variety of genres, platforms, ratings, prices, and features that suit your taste and needs. You can also download video game apps from different sources, such as online stores, websites, or platforms. However, you should always be careful and responsible when downloading and installing video game apps on your device. You should also follow some tips and tricks to enjoy your video game apps to the fullest. We hope that this article has helped you learn how to download video game apps on your device. Happy gaming! -Frequently Asked Questions-
The answer to this question depends on your personal preferences and interests. However, some of the most popular and highly rated video game apps are Among Us, Call of Duty: Mobile, Genshin Impact, Candy Crush Saga, Fortnite, etc. -The answer to this question depends on the size and complexity of the video game app that you want to download. Some video game apps are very small and simple, while others are very large and complex. You can check the size of the video game app before downloading it from the source. You should also make sure that you have enough free space on your device before downloading and installing the video game app. -The answer to this question depends on the source and platform of the video game app that you have downloaded. Some video game apps are automatically updated by the source or platform when there is a new version available. Others require you to manually update them by downloading and installing the latest version from the source or platform. You should always keep your video game apps updated to enjoy the latest features and bug fixes. -The answer to this question The answer to this question depends on the device and platform that you are using. Some devices and platforms allow you to delete video game apps by simply tapping and holding on the app icon and selecting Delete or Uninstall. Others require you to go to the Settings or Manage Apps menu and select the video game app that you want to delete and choose Delete or Uninstall. You should always make sure that you have backed up your data and progress before deleting your video game app. -The answer to this question depends on your curiosity and exploration. You can find more video game apps to download by browsing through the online stores, websites, or platforms that offer them. You can also search for keywords, genres, or themes that interest you. You can also ask for recommendations from your friends, family, or online communities that share your passion for gaming. You can also read reviews, blogs, articles, or magazines that feature video game apps. -- - \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to Win Every Race with Real Bike Racing MOD APK Unlimited Money.md b/spaces/congsaPfin/Manga-OCR/logs/How to Win Every Race with Real Bike Racing MOD APK Unlimited Money.md deleted file mode 100644 index 7d32c4deabc18dbf94e9d32302df393ffa51db83..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/How to Win Every Race with Real Bike Racing MOD APK Unlimited Money.md +++ /dev/null @@ -1,100 +0,0 @@ - - Real Bike Racing Mod APK Hack Unlimited Money: How to Download and Play-If you are a fan of realistic motorcycle racing games, you might have heard of Real Bike Racing. This game lets you experience the thrill of riding the fastest and most agile bikes in the world, competing with other riders on different tracks and modes. But what if you want to unlock all the bikes, tracks, and features without spending real money? That's where Real Bike Racing Mod APK Hack Unlimited Money comes in handy. In this article, we will show you how to download and install this mod apk file on your device, and how to play the game with unlimited money and hack features. -real bike racing mod apk hack unlimited moneyDownload File ⚹⚹⚹ https://urlca.com/2uO8TE - Introduction-What is Real Bike Racing?-Real Bike Racing is a 3D motorcycle racing game developed by Italic Games. It was released in 2016 for Android devices. The game features realistic graphics, physics, and sound effects, as well as over 10 types of superbikes, each with its own specifications and performance. You can customize your bike with different colors, stickers, and parts. You can also choose from various modes, such as career mode, VR mode, championship mode, and time trial mode. You can race on different tracks, such as city streets, desert roads, mountain roads, and more. You can also challenge other players online or offline. -What is Mod APK?-Mod APK is a modified version of an original APK (Android Package Kit) file. It is created by third-party developers who modify the original code of the game or app to add or remove some features, such as unlimited money, coins, gems, lives, etc. Mod APK files are not available on the official Google Play Store, but they can be downloaded from other sources on the internet. However, not all mod apk files are safe and reliable, so you need to be careful when downloading them. -What are the benefits of using Real Bike Racing Mod APK Hack Unlimited Money?-By using Real Bike Racing Mod APK Hack Unlimited Money, you can enjoy the following benefits: -
How to download and install Real Bike Racing Mod APK Hack Unlimited Money?-Step 1: Enable unknown sources on your device-Before you can install any mod apk file on your device, you need to enable unknown sources on your device. This will allow you to install apps from sources other than the Google Play Store. To do this, follow these steps: -real bike racing mod apk unlimited money and gold
Step 2: Download the mod apk file from a trusted source-The next step is to download the mod apk file from a trusted source. You can use this link[^1 ] to download the mod apk file for Real Bike Racing. Make sure you have enough storage space on your device and a stable internet connection. You can also scan the file with an antivirus software before installing it. -Step 3: Install the mod apk file on your device-After downloading the mod apk file, you need to install it on your device. To do this, follow these steps: -
Step 4: Launch the game and enjoy unlimited money and features-Once the installation is done, you can launch the game from your app drawer or home screen. You will see that you have unlimited money and all the bikes, tracks, and features unlocked. You can also use the hack features to enhance your gameplay and win races easily. Have fun! -How to play Real Bike Racing Mod APK Hack Unlimited Money?-Choose your bike and customize it-The first thing you need to do is to choose your bike from the garage. You can select from over 10 types of superbikes, such as Ducati, Yamaha, Honda, Kawasaki, etc. You can also customize your bike with different colors, stickers, and parts. You can upgrade your bike's engine, brakes, tires, suspension, and more to improve its performance and speed. -Compete in various modes and tracks-The next thing you need to do is to choose a mode and a track to race on. You can choose from various modes, such as career mode, VR mode, championship mode, and time trial mode. Each mode has different objectives and challenges. You can also choose from different tracks, such as city streets, desert roads, mountain roads, and more. Each track has different terrains and obstacles. -Use the hack features to boost your performance and win races-The last thing you need to do is to use the hack features to boost your performance and win races. You can use the speed hack to increase your speed beyond the limit. You can use the nitro hack to fill up your nitro bar instantly. You can use the no ads feature to remove annoying ads from the game. You can also use other hack features to make the game easier and more fun. -Conclusion-Summary of the main points-In conclusion, Real Bike Racing Mod APK Hack Unlimited Money is a great way to enjoy realistic motorcycle racing games without spending real money. You can download and install this mod apk file on your device easily and safely. You can play the game with unlimited money and all the bikes, tracks, and features unlocked. You can also use the hack features to boost your performance and win races easily. -Call to action-If you are interested in trying out this mod apk file, you can download it from this link. You can also share this article with your friends who love motorcycle racing games. Thank you for reading and have a great day! - FAQs Q: Is Real Bike Racing Mod APK Hack Unlimited Money safe to use? A: Yes, it is safe to use as long as you download it from a trusted source and scan it with an antivirus software before installing it. Q: Do I need to root my device to use Real Bike Racing Mod APK Hack Unlimited Money? A: No, you do not need to root your device to use this mod apk file. Q: Can I play Real Bike Racing Mod APK Hack Unlimited Money online or offline? A: You can play this mod apk file both online and offline. Q: Will I get banned from the game if I use Real Bike Racing Mod APK Hack Unlimited Money? A: No, you will not get banned from the game if you use this mod apk file. However, you should use it at your own risk and discretion. Q: How can I update Real Bike Racing Mod APK Hack Unlimited Money? A: You can update this mod apk file by downloading the latest version from the same source or by checking for updates within the game. 197e85843d- - \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Project Makeover The Ultimate Puzzle Game for Fashion Lovers.md b/spaces/congsaPfin/Manga-OCR/logs/Project Makeover The Ultimate Puzzle Game for Fashion Lovers.md deleted file mode 100644 index 4bc8aac95ee09045611128c9b1f35f7203e40e44..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Project Makeover The Ultimate Puzzle Game for Fashion Lovers.md +++ /dev/null @@ -1,129 +0,0 @@ - - Project Makeover Download: How to Play the Ultimate Fashion and Home Makeover Game-Do you love fashion, beauty, and home design? Do you enjoy playing match-3 puzzle games? If you answered yes to both questions, then you should definitely try Project Makeover, one of the most popular and addictive games on mobile devices. In this article, we will tell you everything you need to know about Project Makeover, how to download it on your device, how to play it, and some tips and tricks to master it. We will also share some reviews and ratings of the game from players and critics. So, let's get started! -project makeover downloadDOWNLOAD »»» https://urlca.com/2uOfOg - What is Project Makeover?-Project Makeover is a match-3 puzzle game with a makeover twist. It is developed by Magic Tavern, Inc. and was released in 2020. The game has over 100 million downloads and an average rating of 4.4 stars on Google Play and App Store. It is also an Editor's Choice game on both platforms. -A match-3 puzzle game with a makeover twist-In Project Makeover, you play as a stylist who gives makeovers to help people achieve their dreams. You do this by completing match-3 puzzles to earn coins and stars, which you can use to buy fashionable clothes, hairstyles, makeup, and furniture. You can choose from a variety of styles and options to create the perfect look for your clients. -A game with a captivating storyline and characters-Project Makeover is not just about matching tiles and dressing up people. It also has a captivating storyline and characters that will keep you hooked. You will meet different clients who have different personalities, backgrounds, and goals. Some of them are egotistical fashion icons, scheming assistants, or stubborn clients who need a new wardrobe. You will also have a team of experts who will help you along the way, such as Gigi, Francis, and Derek. You will have to deal with drama, humor, romance, and mystery as you makeover your clients' lives. -project makeover download for pc A game with a variety of features and challenges-Project Makeover has a lot of features and challenges that make it fun and engaging. You can customize your own avatar and style with your own clothes, accessories, hair, makeup, etc. You can also visit your friends' avatars and see how they dressed up. You can play challenging fashion-themed puzzles with different obstacles and power-ups. You can also participate in recurring competitive events where you can win prizes and rewards. -How to download Project Makeover on your device?-Project Makeover is available for free on both Android and iOS devices. You can download it from Google Play or App Store by following these steps: -
Download from BlueStacks emulator | 5 stars | |
Gamezebo | "Project Makeover is a delightful game that combines match-3 puzzles with makeover fun. The game has a lot of content and variety to keep you entertained for hours. The game also has a charming story and characters that will make you laugh and smile. The game is well-designed and polished, with beautiful graphics and sound effects. Project Makeover is a must-play game for fans of the genre." | 4.5 stars |
Samuel Lee | "This game is awesome! It's so addictive and satisfying to play. I love how you can customize your own avatar and style, and see how your friends dressed up. I also love how you can participate in events and win prizes and rewards. The game is always updated and improved, with new levels, features, and clients. The game is also very fair and generous, with no annoying ads or paywalls. Project Makeover is one of the best games I have ever played." | 5 stars |
However, not everyone is happy with Project Makeover. Some players and critics have some drawbacks and issues with the game, such as its difficulty level, its energy system, its bugs and glitches, its customer service, etc. Here are some of the negative reviews from players and critics:
-Player/Critic | Review | Rating |
---|---|---|
Amy Jones | "I like this game, but it's too hard sometimes. Some levels are impossible to beat without boosters or extra moves, which are very expensive or hard to get. The game also runs out of energy very fast, which limits how much you can play. The game should be more balanced and fair, and give more freebies and rewards." | 3 stars |
Pocket Gamer | "Project Makeover is a fun and enjoyable game, but it also has some flaws and problems. The game has some bugs and glitches that can affect the gameplay and performance, such as crashing, freezing, loading errors, etc. The game also has some customer service issues, such as slow response, poor communication, or lack of resolution. The game should fix these issues and improve its quality and service." | 3.5 stars |
Daniel Smith | "This game is terrible! It's so boring and repetitive. The puzzles are too easy or too hard, with no logic or strategy. The makeovers are too limited and unrealistic, with no creativity or choice. The story and the characters are too cheesy and annoying, with no depth or emotion. The game is also full of ads and in-app purchases, which are very annoying and expensive. The game is a waste of time and money." | 1 star |
Project Makeover is a match-3 puzzle game with a makeover twist that will appeal to fans of fashion, beauty, and home design. The game has a lot of features and challenges that make it fun and engaging, such as a captivating storyline and characters, a variety of makeover options and styles, a customizable avatar and wardrobe, challenging fashion-themed puzzles with different obstacles and power-ups, recurring competitive events with prizes and rewards, etc. The game is also well-designed and polished, with beautiful graphics and sound effects, frequent updates and improvements, and an Editor's Choice status on both Google Play and App Store. The game is also highly rated and reviewed by players and critics, who praise its fun and addictive gameplay, its captivating storyline and characters, its variety of features and challenges, etc. However, the game also has some drawbacks and issues, such as its difficulty level, its energy system, its bugs and glitches, its customer service, etc. The game should fix these issues and improve its quality and service. Overall, Project Makeover is a great game that you should definitely try if you love fashion and makeover games.
-Here are some of the frequently asked questions about Project Makeover:
-You can get more coins and stars by completing match-3 puzzles, watching ads, completing daily tasks, participating in events, or buying them with real money.
-You can get more energy by waiting for it to refill over time, watching ads, completing daily tasks, participating in events, or buying it with real money.
-You can get more diamonds by watching ads, completing daily tasks, participating in events, or buying them with real money.
-You can get more boosters by watching ads, completing daily tasks, participating in events, or buying them with real money.
-You can add friends in Project Makeover by connecting your Facebook account to the game, or by using the friend code feature.
-If you are looking for a thrilling and addictive game that will test your skills and reflexes against hordes of nightmarish creatures, you might want to check out Vampire Survivors. This is a gothic horror casual game with rogue-lite elements, where your choices can allow you to quickly snowball against the hundreds of monsters that get thrown at you. In this article, we will tell you everything you need to know about this indie phenomenon, including how to download it on your PC or mobile device, how to update it to the latest version, what's new in the latest patch, and some tips and tricks for playing it.
-Vampire Survivors is a time survival game with minimalistic gameplay and roguelite elements. It was developed and published by indie developer poncle. The game was originally coded using HTML5 and was released on itch.io as a browser game for free on March 31, 2021. It later came out on Steam on December 17, 2021, and on Google Play on June 8, 2023. The game supports mouse, keyboard, controller, and touch screen inputs.
-Download ⚡ https://urlca.com/2uOcGa
The premise of the game is simple: you play as one of several characters who have to survive as long as possible against waves of vampires, werewolves, zombies, skeletons, ghosts, demons, and other supernatural enemies. You have no control over when or how you attack; your weapons fire automatically based on your position and your enemies' proximity. Your only task is to move around the map, collect gems and items that drop from enemies or chests, level up your weapons and stats, and avoid getting hit by enemies or traps. The game gets progressively harder as time goes by; new enemies appear every minute, some with special abilities or increased health. The game ends when you die or when you reach 30 minutes of survival time.
-Vampire Survivors is one of those games that are easy to pick up but hard to put down. It has a simple but addictive gameplay loop that keeps you hooked for hours. It also has a lot of variety and replay value thanks to its roguelite elements. You can choose from eight different characters, each with their own unique weapons, abilities, and perks. You can also unlock and equip various items that can boost your stats, grant you special effects, or alter the gameplay in some way. You can also customize your game mode by choosing from four difficulty levels, three map sizes, and three game speeds. You can also play with up to four friends in local co-op mode, or challenge yourself in endless mode or daily challenge mode.
-Vampire Survivors has received overwhelmingly positive reviews from both critics and players. It has a 9/10 rating on Steam, a 4.8/5 rating on Google Play, and a 4.6/5 rating on itch.io. Some of the praises that the game has received include its charming pixel art graphics, its catchy retro soundtrack, its smooth and responsive controls, its balanced and rewarding gameplay, its high replay value, and its fun and humorous tone. Some of the criticisms that the game has received include its lack of online multiplayer, its occasional bugs and glitches, and its high difficulty curve.
-If you want to play Vampire Survivors on your PC, you have two options: you can either play it on your browser for free on itch.io, or you can buy it on Steam for $4.99 USD. Here are the steps to download the game on Steam:
-vampire survivors steam download latest version
-how to download vampire survivors update patch
-vampire survivors android apk download free
-vampire survivors roguelite game download full version
-download vampire survivors for windows 10 64 bit
-vampire survivors wiki version history download
-vampire survivors soundtrack download mp3
-vampire survivors tides of the foscari dlc download
-vampire survivors legacy of the moonspell dlc download
-vampire survivors cheats and hacks download
-vampire survivors mod apk download unlimited gems
-vampire survivors pc game download torrent
-vampire survivors mac os download dmg
-vampire survivors linux download zip
-vampire survivors online multiplayer mode download
-vampire survivors steam key generator download
-vampire survivors trainer download for pc
-vampire survivors save file editor download
-vampire survivors tips and tricks pdf download
-vampire survivors gameplay video download hd
-vampire survivors review and rating download
-vampire survivors fan art and wallpapers download
-vampire survivors best weapons and power-ups download guide
-vampire survivors characters and classes unlock download
-vampire survivors achievements and trophies list download
-vampire survivors easter eggs and secrets download
-vampire survivors beta version download free
-vampire survivors demo version download steam
-vampire survivors early access version download itch.io
-vampire survivors release date and price download info
-vampire survivors system requirements and specs download test
-vampire survivors bugs and glitches report download form
-vampire survivors feedback and suggestions submit download link
-vampire survivors developer and publisher contact download email
-vampire survivors community and forum join download app
-vampire survivors newsletter and updates subscribe download code
-vampire survivors merchandise and gifts buy download coupon
-vampire survivors official website and blog visit download button
-vampire survivors support and help request download ticket
-vampire survivors refund and return policy read download pdf
If you want to play Vampire Survivors on your mobile device, you can download it for free on Google Play. Here are the steps to download the game on your Android phone or tablet:
-Vampire Survivors is constantly being updated by the developer with new features, fixes, and improvements. To make sure that you are playing the latest version of the game, you need to check for updates and install them on your PC or mobile device. Here is how to do that:
-The latest version of Vampire Survivors as of June 20, 2023 is version 1.6.0. This version was released on June 15, 2023 and introduced several new features, fixes, and improvements. Here are some of the main changes and additions in this patch:
-Feature | Description |
---|---|
New character: The Hunter | A A skilled marksman who uses a crossbow and a knife to hunt down his prey. He has a high critical chance and can deal extra damage to enemies with low health. He also has a passive ability that allows him to see enemies through walls. |
New item: The Holy Grail | A rare and powerful item that grants the user immortality. It heals the user for 1 HP every second and prevents them from dying. However, it also curses the user with eternal thirst, which reduces their movement speed by 50% and makes them unable to use other items. |
New enemy: The Reaper | A terrifying and deadly enemy that appears after 20 minutes of survival time. It is immune to all damage and can kill the user instantly with its scythe. It can also teleport around the map and summon other enemies. The only way to escape from it is to reach 30 minutes of survival time or use the Holy Grail. |
New feature: Achievements | A new feature that rewards the user for completing various challenges and tasks in the game. There are 50 achievements in total, ranging from easy to hard. Some of them unlock new items or characters, while others grant the user bragging rights. |
New feature: Leaderboards | A new feature that ranks the user based on their best survival time, score, kills, and gems collected in the game. There are separate leaderboards for each game mode, difficulty level, map size, and game speed. The user can also compare their stats with their friends or other players around the world. |
Various bug fixes and improvements | A number of bug fixes and improvements that enhance the performance, stability, and quality of the game. Some of them include fixing typos, glitches, crashes, exploits, balancing issues, and adding more options and settings. |
For more details on the latest version of Vampire Survivors, you can visit the version history page on itch.io or the update news page on Steam.
-Vampire Survivors is a challenging and fun game that requires skill, strategy, and luck to survive. Here are some tips and tricks that can help you improve your gameplay and have more fun:
-Vampire Survivors is a gothic horror casual game with rogue-lite elements that will keep you entertained for hours. It has a simple but addictive gameplay loop that challenges your skills and reflexes against waves of supernatural enemies. It also has a lot of variety and replay value thanks to its roguelite elements, such as different characters, items, modes, difficulties, maps, speeds, achievements, and leader boards. You can download the game for free on your browser or mobile device, or buy it for a low price on Steam. You can also update the game to the latest version to enjoy the new features, fixes, and improvements. If you are looking for a thrilling and addictive game that will test your skills and reflexes against hordes of nightmarish creatures, you might want to check out Vampire Survivors. You won't regret it!
-Yes, Vampire Survivors is free to play on your browser or mobile device. You can play the game without any ads or in-app purchases. However, if you want to support the developer and enjoy some extra benefits, you can buy the game on Steam for $4.99 USD. This will give you access to achievements, leaderboards, cloud saves, and future updates.
-Yes, Vampire Survivors has a local co-op mode that allows you to play with up to four friends on the same device. You can either share the same screen or use multiple controllers or touch screens. However, the game does not have an online multiplayer mode at the moment.
-No, Vampire Survivors is only available on PC and Android devices at the moment. The developer has not announced any plans to release the game on other platforms such as iOS, Mac, Linux, or consoles.
-A run in Vampire Survivors can last anywhere from a few seconds to 30 minutes, depending on your skill level and luck. The game ends when you die or when you reach 30 minutes of survival time. The longer you survive, the higher your score and rank will be.
-There are eight characters in Vampire Survivors, each with their own unique weapons, abilities, and perks. They are: The Vampire, The Witch, The Priest, The Knight, The Hunter, The Scientist, The Ninja, and The Pirate. You can unlock them by playing the game and collecting gems.
197e85843dA Cartilha Alegria de Saber é um livro de alfabetização que utiliza o método fônico para ensinar as crianças a ler e escrever. O método fônico consiste em associar os sons das letras e das sílabas com as imagens dos objetos que representam. Assim, a criança aprende a reconhecer e a produzir as palavras de forma lúdica e divertida.
-Download ✶ https://ssurll.com/2uzxnf
A Cartilha Alegria de Saber foi elaborada por uma equipe de pedagogos e professores especializados em alfabetização, com base em pesquisas científicas e experiências práticas. O livro é composto por 69 páginas, divididas em quatro partes: vogais, consoantes, sílabas simples e sílabas complexas. Cada parte contém textos, ilustrações, atividades, jogos e músicas que estimulam o desenvolvimento cognitivo, afetivo e social da criança.
- -Além do livro, a Cartilha Alegria de Saber também oferece um caderno de atividades complementares, que pode ser baixado gratuitamente em formato PDF. O caderno contém exercícios de escrita, leitura, interpretação, ortografia, gramática e matemática, que reforçam o aprendizado da cartilha. O caderno também pode ser usado como material de apoio para os pais e professores que acompanham o processo de alfabetização das crianças.
- -Se você quer baixar a Cartilha Alegria de Saber em PDF, basta acessar o site oficial do livro e clicar no botão "Baixar". Você será redirecionado para uma página onde poderá escolher entre as opções de download: Power Point ou imagem por imagem. Depois, é só salvar o arquivo no seu computador ou dispositivo móvel e imprimir se desejar.
- -Ao baixar a Cartilha Alegria de Saber em PDF, você terá acesso a um material de qualidade, que já foi utilizado por milhares de crianças em todo o Brasil. A cartilha é recomendada por especialistas em educação e aprovada pelo Ministério da Educação (MEC). Além disso, ao adquirir o livro, você estará contribuindo para um projeto social que doa exemplares da cartilha para escolas públicas e comunidades carentes.
- -A Cartilha Alegria de Saber em PDF é uma ótima opção para quem quer alfabetizar crianças de forma eficiente e divertida. Veja alguns benefícios de usar esse material:
- -Portanto, se você quer alfabetizar crianças com sucesso, não perca tempo e baixe agora mesmo a Cartilha Alegria de Saber em PDF. Você vai se surpreender com os resultados!
- -A Cartilha Alegria de Saber em PDF pode ser usada na sala de aula como um recurso didático para facilitar o ensino e a aprendizagem da alfabetização. Os professores podem seguir as orientações do livro e do caderno de atividades, que apresentam uma sequência lógica e progressiva de conteúdos e habilidades. Os professores também podem adaptar as atividades de acordo com as necessidades e o ritmo de cada turma e de cada aluno.
- -A Cartilha Alegria de Saber em PDF também permite que os professores trabalhem de forma interdisciplinar, integrando as áreas de linguagem, matemática, ciências, artes e educação física. Além disso, o livro e o caderno estimulam a participação dos pais e da comunidade no processo de alfabetização das crianças, promovendo a parceria entre a escola e a família.
- -A Cartilha Alegria de Saber em PDF traz diversos benefícios para as crianças que estão aprendendo a ler e escrever. Veja alguns deles:
- -Portanto, a Cartilha Alegria de Saber em PDF é um material que contribui para o desenvolvimento integral das crianças, preparando-as para os desafios da vida escolar e social.
-A Cartilha Alegria de Saber em PDF não é o único material de alfabetização que você pode baixar gratuitamente na internet. Existem outros sites que disponibilizam livros, cadernos, jogos, atividades e vídeos que podem auxiliar no ensino e na aprendizagem da leitura e da escrita. Veja alguns exemplos:
- -Portanto, se você quer ter acesso a mais materiais de alfabetização em PDF, basta pesquisar na internet e escolher os que mais se adequam às suas necessidades e preferências.
- -Nem todos os materiais de alfabetização em PDF que você encontra na internet são confiáveis e adequados. Alguns podem conter erros conceituais, metodológicos ou gramaticais, que podem prejudicar o processo de alfabetização das crianças. Por isso, é importante que você avalie a qualidade dos materiais antes de baixá-los e usá-los. Veja algumas dicas para fazer isso:
- -Portanto, se você quer garantir a qualidade dos materiais de alfabetização em PDF que você baixa na internet, siga essas dicas e faça uma avaliação criteriosa antes de usá-los.
-A Cartilha Alegria de Saber em PDF também pode ser usada em casa, como um recurso para apoiar a alfabetização das crianças que estudam na modalidade de ensino remoto ou híbrido, ou que precisam de reforço escolar. Os pais ou responsáveis podem seguir as orientações do livro e do caderno de atividades, que explicam passo a passo como ensinar as crianças a ler e escrever.
- -A Cartilha Alegria de Saber em PDF também permite que os pais ou responsáveis criem um ambiente favorável à alfabetização em casa, proporcionando momentos de leitura compartilhada, de escrita criativa e de brincadeiras educativas com as crianças. Além disso, o livro e o caderno incentivam o diálogo e a troca de experiências entre as crianças, os pais e os professores, promovendo a parceria entre a família e a escola.
- -A Cartilha Alegria de Saber em PDF é um material que permite avaliar o desempenho das crianças na alfabetização, por meio de diferentes instrumentos e critérios. Veja alguns exemplos:
- -Portanto, a Cartilha Alegria de Saber em PDF é um material que possibilita uma avaliação contínua, diversificada e democrática da alfabetização das crianças, contribuindo para o aprimoramento da qualidade da educação.
-A Cartilha Alegria de Saber em PDF é um material de alfabetização que pode trazer muitos benefícios para as crianças, os pais e os professores que desejam ensinar e aprender a ler e escrever de forma eficaz, divertida e significativa. A cartilha oferece uma proposta pedagógica baseada em evidências científicas e em boas práticas educacionais, que respeita a diversidade cultural e linguística do país e que promove o desenvolvimento integral das crianças.
- -A Cartilha Alegria de Saber em PDF também pode ser usada em diferentes contextos e situações, como na sala de aula, em casa, no ensino remoto ou híbrido, ou no reforço escolar. A cartilha disponibiliza o livro e o caderno de atividades em formato PDF, facilitando o acesso e a distribuição do material para as escolas, os professores e as famílias. A cartilha também possibilita uma avaliação contínua, diversificada e democrática da alfabetização das crianças, contribuindo para o aprimoramento da qualidade da educação.
- -Portanto, se você quer ter acesso a um material de alfabetização que pode fazer a diferença na vida das crianças brasileiras, baixe agora mesmo a Cartilha Alegria de Saber em PDF e aproveite todos os seus recursos e vantagens.
3cee63e6c2The world has changed dramatically in the three months since our last update of the World Economic Outlook in January. A rare disaster, a coronavirus pandemic, has resulted in a tragically large number of human lives being lost. As countries implement necessary quarantines and social distancing practices to contain the pandemic, the world has been put in a Great Lockdown. The magnitude and speed of collapse in activity that has followed is unlike anything experienced in our lifetimes.
-The reasons Venezuela is in crisis are years of hyperinflation, violence, and food and medicine shortages. The country was once considered the richest in Latin America, thanks to having the largest oil reserves in the world. But more than a decade of declining oil revenue and poor governance led to the collapse of the national economy, and the government has not been able to provide adequate social services.
-Download File ——— https://ssurll.com/2uzxGH
Download Zip ✦ https://ssurll.com/2uzwdG
元素,则不添加按钮
- }
- var firstChild = code.firstChild;
- if (!firstChild) {
- return; // 如果 元素没有子节点,则不添加按钮
- }
- var button = document.createElement('button');
- button.textContent = '\uD83D\uDCCE'; // 使用 📎 符号作为“复制”按钮的文本
- button.style.position = 'relative';
- button.style.float = 'right';
- button.style.fontSize = '1em'; // 可选:调整按钮大小
- button.style.background = 'none'; // 可选:去掉背景颜色
- button.style.border = 'none'; // 可选:去掉边框
- button.style.cursor = 'pointer'; // 可选:显示指针样式
- button.addEventListener('click', function () {
- var range = document.createRange();
- range.selectNodeContents(code);
- range.setStartBefore(firstChild); // 将范围设置为第一个子节点之前
- var selection = window.getSelection();
- selection.removeAllRanges();
- selection.addRange(range);
-
- try {
- var success = document.execCommand('copy');
- if (success) {
- button.textContent = '\u2714';
- setTimeout(function () {
- button.textContent = '\uD83D\uDCCE'; // 恢复按钮为“复制”
- }, 2000);
- } else {
- button.textContent = '\u2716';
- }
- } catch (e) {
- console.error(e);
- button.textContent = '\u2716';
- }
-
- selection.removeAllRanges();
- });
- code.insertBefore(button, firstChild); // 将按钮插入到第一个子元素之前
- }
-
- function handleNewElements(mutationsList, observer) {
- for (var mutation of mutationsList) {
- if (mutation.type === 'childList') {
- for (var node of mutation.addedNodes) {
- if (node.nodeName === 'PRE') {
- addCopyButton(node);
- }
- }
- }
- }
- }
-
- var observer = new MutationObserver(handleNewElements);
- observer.observe(document.documentElement, { childList: true, subtree: true });
-
- document.querySelectorAll('pre').forEach(addCopyButton);
-})();
diff --git a/spaces/daydayup1225/Chat-web/assets/custom.js b/spaces/daydayup1225/Chat-web/assets/custom.js
deleted file mode 100644
index 7b1761043149ff97ca498501c87a0d15db5258ee..0000000000000000000000000000000000000000
--- a/spaces/daydayup1225/Chat-web/assets/custom.js
+++ /dev/null
@@ -1 +0,0 @@
-// custom javascript here
\ No newline at end of file
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/PSDraw.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/PSDraw.py
deleted file mode 100644
index 13b3048f67e18ac58170c3a1bd25cb18d66b30fe..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/PSDraw.py
+++ /dev/null
@@ -1,229 +0,0 @@
-#
-# The Python Imaging Library
-# $Id$
-#
-# Simple PostScript graphics interface
-#
-# History:
-# 1996-04-20 fl Created
-# 1999-01-10 fl Added gsave/grestore to image method
-# 2005-05-04 fl Fixed floating point issue in image (from Eric Etheridge)
-#
-# Copyright (c) 1997-2005 by Secret Labs AB. All rights reserved.
-# Copyright (c) 1996 by Fredrik Lundh.
-#
-# See the README file for information on usage and redistribution.
-#
-
-import sys
-
-from . import EpsImagePlugin
-
-##
-# Simple PostScript graphics interface.
-
-
-class PSDraw:
- """
- Sets up printing to the given file. If ``fp`` is omitted,
- ``sys.stdout.buffer`` or ``sys.stdout`` is assumed.
- """
-
- def __init__(self, fp=None):
- if not fp:
- try:
- fp = sys.stdout.buffer
- except AttributeError:
- fp = sys.stdout
- self.fp = fp
-
- def begin_document(self, id=None):
- """Set up printing of a document. (Write PostScript DSC header.)"""
- # FIXME: incomplete
- self.fp.write(
- b"%!PS-Adobe-3.0\n"
- b"save\n"
- b"/showpage { } def\n"
- b"%%EndComments\n"
- b"%%BeginDocument\n"
- )
- # self.fp.write(ERROR_PS) # debugging!
- self.fp.write(EDROFF_PS)
- self.fp.write(VDI_PS)
- self.fp.write(b"%%EndProlog\n")
- self.isofont = {}
-
- def end_document(self):
- """Ends printing. (Write PostScript DSC footer.)"""
- self.fp.write(b"%%EndDocument\nrestore showpage\n%%End\n")
- if hasattr(self.fp, "flush"):
- self.fp.flush()
-
- def setfont(self, font, size):
- """
- Selects which font to use.
-
- :param font: A PostScript font name
- :param size: Size in points.
- """
- font = bytes(font, "UTF-8")
- if font not in self.isofont:
- # reencode font
- self.fp.write(b"/PSDraw-%s ISOLatin1Encoding /%s E\n" % (font, font))
- self.isofont[font] = 1
- # rough
- self.fp.write(b"/F0 %d /PSDraw-%s F\n" % (size, font))
-
- def line(self, xy0, xy1):
- """
- Draws a line between the two points. Coordinates are given in
- PostScript point coordinates (72 points per inch, (0, 0) is the lower
- left corner of the page).
- """
- self.fp.write(b"%d %d %d %d Vl\n" % (*xy0, *xy1))
-
- def rectangle(self, box):
- """
- Draws a rectangle.
-
- :param box: A tuple of four integers, specifying left, bottom, width and
- height.
- """
- self.fp.write(b"%d %d M 0 %d %d Vr\n" % box)
-
- def text(self, xy, text):
- """
- Draws text at the given position. You must use
- :py:meth:`~PIL.PSDraw.PSDraw.setfont` before calling this method.
- """
- text = bytes(text, "UTF-8")
- text = b"\\(".join(text.split(b"("))
- text = b"\\)".join(text.split(b")"))
- xy += (text,)
- self.fp.write(b"%d %d M (%s) S\n" % xy)
-
- def image(self, box, im, dpi=None):
- """Draw a PIL image, centered in the given box."""
- # default resolution depends on mode
- if not dpi:
- if im.mode == "1":
- dpi = 200 # fax
- else:
- dpi = 100 # greyscale
- # image size (on paper)
- x = im.size[0] * 72 / dpi
- y = im.size[1] * 72 / dpi
- # max allowed size
- xmax = float(box[2] - box[0])
- ymax = float(box[3] - box[1])
- if x > xmax:
- y = y * xmax / x
- x = xmax
- if y > ymax:
- x = x * ymax / y
- y = ymax
- dx = (xmax - x) / 2 + box[0]
- dy = (ymax - y) / 2 + box[1]
- self.fp.write(b"gsave\n%f %f translate\n" % (dx, dy))
- if (x, y) != im.size:
- # EpsImagePlugin._save prints the image at (0,0,xsize,ysize)
- sx = x / im.size[0]
- sy = y / im.size[1]
- self.fp.write(b"%f %f scale\n" % (sx, sy))
- EpsImagePlugin._save(im, self.fp, None, 0)
- self.fp.write(b"\ngrestore\n")
-
-
-# --------------------------------------------------------------------
-# PostScript driver
-
-#
-# EDROFF.PS -- PostScript driver for Edroff 2
-#
-# History:
-# 94-01-25 fl: created (edroff 2.04)
-#
-# Copyright (c) Fredrik Lundh 1994.
-#
-
-
-EDROFF_PS = b"""\
-/S { show } bind def
-/P { moveto show } bind def
-/M { moveto } bind def
-/X { 0 rmoveto } bind def
-/Y { 0 exch rmoveto } bind def
-/E { findfont
- dup maxlength dict begin
- {
- 1 index /FID ne { def } { pop pop } ifelse
- } forall
- /Encoding exch def
- dup /FontName exch def
- currentdict end definefont pop
-} bind def
-/F { findfont exch scalefont dup setfont
- [ exch /setfont cvx ] cvx bind def
-} bind def
-"""
-
-#
-# VDI.PS -- PostScript driver for VDI meta commands
-#
-# History:
-# 94-01-25 fl: created (edroff 2.04)
-#
-# Copyright (c) Fredrik Lundh 1994.
-#
-
-VDI_PS = b"""\
-/Vm { moveto } bind def
-/Va { newpath arcn stroke } bind def
-/Vl { moveto lineto stroke } bind def
-/Vc { newpath 0 360 arc closepath } bind def
-/Vr { exch dup 0 rlineto
- exch dup 0 exch rlineto
- exch neg 0 rlineto
- 0 exch neg rlineto
- setgray fill } bind def
-/Tm matrix def
-/Ve { Tm currentmatrix pop
- translate scale newpath 0 0 .5 0 360 arc closepath
- Tm setmatrix
-} bind def
-/Vf { currentgray exch setgray fill setgray } bind def
-"""
-
-#
-# ERROR.PS -- Error handler
-#
-# History:
-# 89-11-21 fl: created (pslist 1.10)
-#
-
-ERROR_PS = b"""\
-/landscape false def
-/errorBUF 200 string def
-/errorNL { currentpoint 10 sub exch pop 72 exch moveto } def
-errordict begin /handleerror {
- initmatrix /Courier findfont 10 scalefont setfont
- newpath 72 720 moveto $error begin /newerror false def
- (PostScript Error) show errorNL errorNL
- (Error: ) show
- /errorname load errorBUF cvs show errorNL errorNL
- (Command: ) show
- /command load dup type /stringtype ne { errorBUF cvs } if show
- errorNL errorNL
- (VMstatus: ) show
- vmstatus errorBUF cvs show ( bytes available, ) show
- errorBUF cvs show ( bytes used at level ) show
- errorBUF cvs show errorNL errorNL
- (Operand stargck: ) show errorNL /ostargck load {
- dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL
- } forall errorNL
- (Execution stargck: ) show errorNL /estargck load {
- dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL
- } forall
- end showpage
-} def end
-"""
diff --git a/spaces/deelerb/3dselfie/PIFu/lib/renderer/gl/cam_render.py b/spaces/deelerb/3dselfie/PIFu/lib/renderer/gl/cam_render.py
deleted file mode 100644
index 7b766af057b9c052388aceb152b0191fa2e4ea25..0000000000000000000000000000000000000000
--- a/spaces/deelerb/3dselfie/PIFu/lib/renderer/gl/cam_render.py
+++ /dev/null
@@ -1,48 +0,0 @@
-from .render import Render
-
-GLUT = None
-
-class CamRender(Render):
- def __init__(self, width=1600, height=1200, name='Cam Renderer',
- program_files=['simple.fs', 'simple.vs'], color_size=1, ms_rate=1, egl=False):
- Render.__init__(self, width, height, name, program_files, color_size, ms_rate=ms_rate, egl=egl)
- self.camera = None
-
- if not egl:
- global GLUT
- import OpenGL.GLUT as GLUT
- GLUT.glutDisplayFunc(self.display)
- GLUT.glutKeyboardFunc(self.keyboard)
-
- def set_camera(self, camera):
- self.camera = camera
- self.projection_matrix, self.model_view_matrix = camera.get_gl_matrix()
-
- def keyboard(self, key, x, y):
- # up
- eps = 1
- # print(key)
- if key == b'w':
- self.camera.center += eps * self.camera.direction
- elif key == b's':
- self.camera.center -= eps * self.camera.direction
- if key == b'a':
- self.camera.center -= eps * self.camera.right
- elif key == b'd':
- self.camera.center += eps * self.camera.right
- if key == b' ':
- self.camera.center += eps * self.camera.up
- elif key == b'x':
- self.camera.center -= eps * self.camera.up
- elif key == b'i':
- self.camera.near += 0.1 * eps
- self.camera.far += 0.1 * eps
- elif key == b'o':
- self.camera.near -= 0.1 * eps
- self.camera.far -= 0.1 * eps
-
- self.projection_matrix, self.model_view_matrix = self.camera.get_gl_matrix()
-
- def show(self):
- if GLUT is not None:
- GLUT.glutMainLoop()
diff --git a/spaces/diacanFperku/AutoGPT/Adware Away 4.1.0 Serial.md b/spaces/diacanFperku/AutoGPT/Adware Away 4.1.0 Serial.md
deleted file mode 100644
index 645cad072a9825a12092d6afe41fbad38a487a70..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Adware Away 4.1.0 Serial.md
+++ /dev/null
@@ -1,26 +0,0 @@
-
-How to Remove Adware Away 4.1.0 Serial from Your PC
-Adware Away 4.1.0 Serial is a malicious program that claims to be a legitimate anti-virus software. However, it is actually a rogue application that infects your computer with adware, spyware, and other malware. It also displays fake security alerts and pop-ups to scare you into buying its full version.
-Adware Away 4.1.0 Serial
Download Zip ———>>> https://gohhs.com/2uFVjT
-If you have downloaded Adware Away 4.1.0 Serial from a suspicious website or received it as an email attachment, you should remove it as soon as possible. Here are some steps to help you get rid of this nasty program:
-
-- Restart your computer in Safe Mode with Networking. To do this, press F8 repeatedly during the boot process until you see a menu with different options. Choose Safe Mode with Networking and press Enter.
-- Download and install a reputable anti-malware software, such as Malwarebytes[^1^], SpyHunter[^2^], or Trend Micro[^3^]. Run a full scan of your system and delete any detected threats.
-- Uninstall Adware Away 4.1.0 Serial from the Control Panel. Go to Start > Settings > Control Panel > Add or Remove Programs. Find Adware Away 4.1.0 Serial in the list and click Remove.
-- Delete any leftover files and registry entries associated with Adware Away 4.1.0 Serial. You can use a registry cleaner tool, such as CCleaner, to do this automatically.
-
-Congratulations! You have successfully removed Adware Away 4.1.0 Serial from your PC. To prevent future infections, you should always be careful about what you download and open online. You should also keep your anti-malware software updated and run regular scans of your system.
Adware is a type of software that displays unwanted advertisements on your computer. It can also redirect your browser to malicious websites, collect your personal information, and slow down your system performance. Adware can be installed on your PC without your consent or knowledge, usually through free downloads, spam emails, or fake updates.
-Some signs that you may have adware on your PC are:
-
-- You see pop-ups, banners, or coupons that are unrelated to the websites you visit.
-- Your browser homepage or search engine is changed without your permission.
-- Your browser is constantly redirected to unfamiliar or suspicious websites.
-- Your internet connection is slower than usual or you experience frequent crashes or freezes.
-- You notice new toolbars, extensions, or icons on your browser that you did not install.
-
-If you notice any of these symptoms, you should scan your PC with a reliable anti-malware software and remove any detected adware. You should also avoid clicking on suspicious links or ads, and only download software from trusted sources.
In conclusion, Adware Away 4.1.0 Serial is a rogue program that poses as a legitimate anti-virus software. It infects your PC with adware and other malware, and tries to trick you into buying its full version. You should remove it as soon as possible using the steps outlined above.
-
-Adware is a common and annoying type of malware that displays unwanted ads on your computer. It can also compromise your privacy and security, and affect your system performance. You should always be careful about what you download and open online, and keep your anti-malware software updated and active.
-By following these tips, you can protect your PC from adware and other malware threats. Stay safe and enjoy a clean and fast computer!
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/diacanFperku/AutoGPT/Bangalore Days Movie Download With Subtitles In Utorrent.md b/spaces/diacanFperku/AutoGPT/Bangalore Days Movie Download With Subtitles In Utorrent.md
deleted file mode 100644
index 310a3af55401f89bd936b426d5002d0c0baf725a..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Bangalore Days Movie Download With Subtitles In Utorrent.md
+++ /dev/null
@@ -1,40 +0,0 @@
-Bangalore Days Movie Download With Subtitles In Utorrent
Download ⇒ https://gohhs.com/2uFUHv
-
-k
-
-Bangalore days movie with subtitles in uk
-
-I am new to This video is actually a short video in 2011 movie. This is not an introduction of any new film. This video has been shot in the years 2012, 2011 and 2014. This video will be introducing the story of Karna, Arjuna, Vidura, Yudhishtir, Bhishma, Drona and Vyasa.
-
-This video is made for those who want to know how Lord Krishna feels when he visits the Earth. What was his feelings, Where was he born, where did he grew up, Where did he learn archery and what do he do in his free time?
-
-This video talks about Karna, Kunti, and Karna’s relationship. Also talks about a glimpse of Pandavs life and also Arjuna. The relationship between Arjuna and Krishna and also Shishupala.
-
-I hope you enjoyed watching this video.
-
-Subscribe to my channel to watch more of my video.
-
-My contact Details
-
-Email:
-
-sureshselvan.ik@gmail.com
-
-Twitter :
-
-Instagram:
-
-Subscribe to my Channel :
-
-Check out my previous Video on
-
-Book Recommendations to Know more:
-
-*This is a cover for the "Divine Love" series. It's based on a real happenings in my past. When i was little, i was bullied a lot by many people. I don't know how I reached there, why i was born there. I was beaten up and called bad names. but I would say I am much better than what I was. In this video, I have recorded my experience with the people who bullied me. I am not saying that I know all the answers. But, I wanted to share my experience with the world.
-
-When you have anything to say to me, send it to:
-
-This is the prologue of my current project. "After the 4fefd39f24
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/Dxcpl.exe [VERIFIED] Download Windows 7 32-bit.md b/spaces/diacanFperku/AutoGPT/Dxcpl.exe [VERIFIED] Download Windows 7 32-bit.md
deleted file mode 100644
index fd45395dbbec7f6686b0fcf1db5e78410f972231..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Dxcpl.exe [VERIFIED] Download Windows 7 32-bit.md
+++ /dev/null
@@ -1,6 +0,0 @@
-dxcpl.exe download windows 7 32-bit
Download File ✵ https://gohhs.com/2uFTof
-
-. [file size:1631KB] Download: [DirectX Diagnostic Tool download] NjCqBhJMAq4WGzdWnIjQolxxOdZHUJKJ6jggXA7oOEjbYPUdI7YqcWYEtOQp9WhzfKJwzVVKXQGkx8LAgM+jgDkY6W9mC7qYp6hJ3iVpwOvwJJsTLIF/iFAEj3uDQMh1AAQFAjjFQF1wAAABAAAAANFADAAIAsABcAYwBpAGsAZwB0AG8AdQBzAHQAdQBzAG8AbgB0ACAAdABhAHQAdABpAG8AbgB0AHUAYwBvAG0AbQBwAG8AYgBlAGQAaQBwAG8AbgB0AGUAZgByAGUAcwBzAGkAbgB0AGUAZgBpAGwAbwByAG0AYQBsAC4AYwBzAGgAZQBzAGsAYgBvAG4AdABhAHQAbAB0AHQAYQB0AGUAZwBlAHIAcgBpAG8AbgB0AG8AZgBhAGwA/wDvAErABaAFQAoABjAHQAoABbAGMAcgBpAG8AbgB0AG8AZgBhAHQAYQBiAG8AbABwAG8AaAB0AGUAbgBhAHIAZgBvAG4AbwB0AGUAZgBpAGwAbwBzAGkAbgB0AGUAZgBpAGwAbgBlAGsAaQBsAGwAbABhAC4AZgBpAGMAcgBlAHIAeQAgAFMAZgBpAGMAaQB0AG8AbABzAGEAdAB0AGUAcgBhAHQAdwByAG0AYQBzAGsAZQBiAG4AaABvAG4AaQBiAG8AZgBhAHIAcwByAG8AdQBvAG 4fefd39f24
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/Ea Sports Ufc Pc Version License Key.txt.md b/spaces/diacanFperku/AutoGPT/Ea Sports Ufc Pc Version License Key.txt.md
deleted file mode 100644
index da5494fe3bb07cb3d78fba7f25dd158a0fb81ffc..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Ea Sports Ufc Pc Version License Key.txt.md
+++ /dev/null
@@ -1,35 +0,0 @@
-
-How to Download and Install EA Sports UFC on PC
-EA Sports UFC is a mixed martial arts fighting video game developed by Electronic Arts for the PlayStation 4 and Xbox One. It is based on the Ultimate Fighting Championship brand and was released on June 15, 2014. It features realistic graphics, physics, and gameplay, as well as over 200 fighters with their own styles and skills.
-If you want to play EA Sports UFC on your PC, you will need a license key to activate the game. A license key is a unique code that verifies that you have purchased the game legally. Without a license key, you will not be able to install or run the game on your computer.
-ea sports ufc pc version license key.txt
DOWNLOAD ->>> https://gohhs.com/2uFVqM
-There are two ways to get a license key for EA Sports UFC on PC. One is to buy the game from an official retailer or online store, such as Amazon or Steam. The other is to use a key generator tool that can create a valid license key for you.
-Buying the Game from an Official Source
-The easiest and safest way to get a license key for EA Sports UFC on PC is to buy the game from an official source. This way, you will get a genuine key that will work without any problems. You will also support the developers and publishers of the game, who have invested time and money into creating it.
-To buy the game from an official source, you will need to follow these steps:
-
-- Go to the official website of EA Sports UFC or any other online store that sells the game.
-- Select the PC version of the game and add it to your cart.
-- Proceed to checkout and enter your payment details.
-- After completing the payment, you will receive an email with your license key and a download link for the game.
-- Download the game installer from the link and run it on your PC.
-- Follow the instructions on the screen and enter your license key when prompted.
-- Wait for the installation to finish and enjoy playing EA Sports UFC on PC.
-
-Using a Key Generator Tool
-Another way to get a license key for EA Sports UFC on PC is to use a key generator tool. A key generator tool is a software that can create random license keys for various games and programs. Some of these tools are free and easy to use, while others may require payment or registration.
-To use a key generator tool for EA Sports UFC on PC, you will need to follow these steps:
-
-- Find a reliable and trustworthy key generator tool for EA Sports UFC on PC. You can search online or ask your friends for recommendations. Be careful of scams and viruses that may harm your computer or steal your personal information.
-- Download the key generator tool from its official website or another source that you trust.
-- Run the key generator tool on your PC and select EA Sports UFC as the game you want to generate a key for.
-- Click on the generate button and wait for a few seconds until a license key appears on the screen.
-- Copy and paste the license key into a text file or write it down somewhere.
-- Download the game installer from any source that you prefer.
-- Run the game installer on your PC and follow the instructions on the screen. Enter your license key when prompted.
-- Wait for the installation to finish and enjoy playing EA Sports UFC on PC.
-
-Disclaimer
-This article is for informational purposes only. We do not endorse or promote any illegal or unethical activities, such as piracy or hacking. We are not responsible for any consequences that may arise from using a key generator tool or downloading an unauthorized copy of EA Sports UFC on PC. Please use this method at your own risk and discretion.
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/diacanFperku/AutoGPT/Free Download !!INSTALL!! Cheat G-cash Generator.md b/spaces/diacanFperku/AutoGPT/Free Download !!INSTALL!! Cheat G-cash Generator.md
deleted file mode 100644
index dea01eb59ee973b1aa49f88e683c5bc945a4b907..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Free Download !!INSTALL!! Cheat G-cash Generator.md
+++ /dev/null
@@ -1,11 +0,0 @@
-
-you are preparing to transfer, to become gainfully employed,. fill out the free application for. cheating. source: regulations at california. how to get free cash app money in just a minute+12 second+++ important disclaimer: none of our pages are affiliated in anyway with the application that our hack tool is compatible with.
- g)bgm#mf free robux no human verification free. robux free robux generator 2016!the perfect hack tool that generates free. you can get free cash app money and cash app credits to your cash app account in just a few seconds. you don’t have to download this tool on your pc/laptop or smartphone.
-Free Download Cheat G-cash Generator
Download File — https://gohhs.com/2uFUOF
-the monster stacker cash app money generator hack money hack for ios android generator hack. millions of people use an app called monster stacker. it's a. ng are victims of a free cash app hack money generator problem. some of them have rung the bell in the hope for an exit, however, to no avail. they complain about the cash app problem in their reviews, and.
-which is your favorite app money hack generator? it can be your android apps money cheat generator or ios apps money cheat generator. with money cheat genorator iphone, there is no need to leave the house. just install it and press the button to hack money app.
-you can now make free cash with our generator. the whole process requires less than 5 minutes and usually takes less than 3 seconds as all of the details of the process have been optimized for us to assist you as fast as possible.
-
-this application is a very big hack tool to work 8 ball pool hack no human verification. enjoy!! [[!!free generator 2022]] 8 ball pool hack and cheats. this hack is very simple. your device must be hacked into add hack. work 8 ball pool hack no human verification download in-app purchases.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/diacanFperku/AutoGPT/More To Read 1 Pdf Indir VERIFIED.md b/spaces/diacanFperku/AutoGPT/More To Read 1 Pdf Indir VERIFIED.md
deleted file mode 100644
index 1c689c0ba17a5a9834b2b542fb978c3cf92df9dd..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/More To Read 1 Pdf Indir VERIFIED.md
+++ /dev/null
@@ -1,6 +0,0 @@
-more to read 1 pdf indir
Download ○○○ https://gohhs.com/2uFTd0
-
-4.1.1 When Should Absolute Quantification Be Used? 35. 4.1.2 Absolute Quantification ... up qPCR reactions using the two most popular chemistries, SYBR Green I. (Section 2.3) and ... read-through during the extension of the opposite strand. 4d29de3e1b
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/Motorola Radius Sm50 Manual.zip.md b/spaces/diacanFperku/AutoGPT/Motorola Radius Sm50 Manual.zip.md
deleted file mode 100644
index 19ed5fc1cba4026df4aa1aa73e209dac01be4878..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Motorola Radius Sm50 Manual.zip.md
+++ /dev/null
@@ -1,10 +0,0 @@
-
-Motorola supplies an express mail warranty card to you upon delivery of your Product. All express warranty claims should be made by completing the warranty claims card and returning it to Motorola or its authorised third party by post or with an express mail service.
-This warranty is in addition to any warranty(s) contained in any sales or other document/s provided by Motorola or any third party. Motorola has the right to carry out any repair to your Product at its premises or at a location chosen by you as part of the trade-in or repair program provided that Motorola is available to carry out such repair. You must provide proof of purchase to Motorola and have the original sales receipt or proof of purchase of the Product in your possession.
-motorola radius sm50 manual.zip
Download Zip ✫ https://gohhs.com/2uFSPp
-Motorola Corporation reserves the right to change, add or delete any warranty provision for the Products without prior written notice to the Owner. Other than this, you have no other implied warranty, including that of merchantability or fitness for a particular purpose.
-The Motorola RA2021 Mobile Communicator is the latest model in the Motorola range, with a choice of five color variations, a built in optical digital camera and the latest voice and data communications features, as well as 4.3 inch touch screen display.
-Motorola is proud to offer the latest and the best in electronics and communications technology. Almost all of our products are CE certified, ensuring that they meet the latest global safety standards. Motorola products come with a 1 year defect free warranty.
-For example, the unit is configured to perform dedicated transmit only at full power, allowing the unit to operate for several days on a single battery charge. The motorola has a data cable that connects the unit to your handheld or other mobile phone. You must have this cable with you when you activate your unit and send your profile. If the unit is not connected to a phone, you need to connect it to a cellular or mobile phone in order to activate the unit.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/elsamueldev/gpt4all/tries/1.md b/spaces/elsamueldev/gpt4all/tries/1.md
deleted file mode 100644
index 8d0f737373812f1f43f27d911614cb0960333836..0000000000000000000000000000000000000000
--- a/spaces/elsamueldev/gpt4all/tries/1.md
+++ /dev/null
@@ -1,23 +0,0 @@
-```python
-import gradio as gr
-from gpt4all import GPT4All
-
-GPT4All.download_model("wizardlm-13b-v1.1-superhot-8k.ggmlv3.q4_0.bin", model_path="/home/user/app/")
-
-model = GPT4All(
- model_name="wizardlm-13b-v1.1-superhot-8k.ggmlv3.q4_0.bin",
- model_path="/home/user/app/"
-)
-
-def responder(pregunta):
- respuesta = model.generate(pregunta)
- return respuesta
-
-gr.Interface(
- fn=responder,
- inputs="text",
- outputs="text"
-).launch()
-```
-
-No responde las preguntas, las toma como si fuera un texto que completar
\ No newline at end of file
diff --git a/spaces/emc348/faces-through-time/models/StyleCLIP/models/stylegan2/op/__init__.py b/spaces/emc348/faces-through-time/models/StyleCLIP/models/stylegan2/op/__init__.py
deleted file mode 100644
index d0918d92285955855be89f00096b888ee5597ce3..0000000000000000000000000000000000000000
--- a/spaces/emc348/faces-through-time/models/StyleCLIP/models/stylegan2/op/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from .fused_act import FusedLeakyReLU, fused_leaky_relu
-from .upfirdn2d import upfirdn2d
diff --git a/spaces/erwann/Face-editor/edit.py b/spaces/erwann/Face-editor/edit.py
deleted file mode 100644
index 04f392ae557a266a89590937f7029ac6a027bf3a..0000000000000000000000000000000000000000
--- a/spaces/erwann/Face-editor/edit.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import os
-import sys
-
-from img_processing import custom_to_pil, preprocess, preprocess_vqgan
-
-sys.path.append("taming-transformers")
-import glob
-
-import gradio as gr
-import matplotlib.pyplot as plt
-import PIL
-import taming
-import torch
-
-from loaders import load_config, load_default
-from utils import get_device
-
-
-def get_embedding(model, path=None, img=None, device="cpu"):
- assert path or img, "Input either path or tensor"
- if img is not None:
- raise NotImplementedError
- x = preprocess(PIL.Image.open(path), target_image_size=256).to(device)
- x_processed = preprocess_vqgan(x)
- z, _, [_, _, indices] = model.encode(x_processed)
- return z
-
-
-def blend_paths(
- model, path1, path2, quantize=False, weight=0.5, show=True, device="cuda"
-):
- x = preprocess(PIL.Image.open(path1), target_image_size=256).to(device)
- y = preprocess(PIL.Image.open(path2), target_image_size=256).to(device)
- x_latent = get_embedding(model, path=path1, device=device)
- y_latent = get_embedding(model, path=path2, device=device)
- z = torch.lerp(x_latent, y_latent, weight)
- if quantize:
- z = model.quantize(z)[0]
- decoded = model.decode(z)[0]
- if show:
- plt.figure(figsize=(10, 20))
- plt.subplot(1, 3, 1)
- plt.imshow(x.cpu().permute(0, 2, 3, 1)[0])
- plt.subplot(1, 3, 2)
- plt.imshow(custom_to_pil(decoded))
- plt.subplot(1, 3, 3)
- plt.imshow(y.cpu().permute(0, 2, 3, 1)[0])
- plt.show()
- return custom_to_pil(decoded), z
-
-
-if __name__ == "__main__":
- device = get_device()
- model = load_default(device)
- model.to(device)
- blend_paths(
- model,
- "./test_pics/face.jpeg",
- "./test_pics/face2.jpeg",
- quantize=False,
- weight=0.5,
- )
- plt.show()
diff --git a/spaces/falterWliame/Face_Mask_Detection/Download !!INSTALL!! Iggle Pop Full Version.md b/spaces/falterWliame/Face_Mask_Detection/Download !!INSTALL!! Iggle Pop Full Version.md
deleted file mode 100644
index df248e0d10fed61077d22641ce16dab241f43f83..0000000000000000000000000000000000000000
--- a/spaces/falterWliame/Face_Mask_Detection/Download !!INSTALL!! Iggle Pop Full Version.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-Iggle Pop Deluxe is an arcade game developed by Sprout Games and published by PopCap. This game has really low system requirements, a 350Mhz CPU powered by Windows 98 (or later) and 128 Mb RAM will be enough to enjoy Iggle Pop Deluxe.
Iggle Pop Deluxe is an arcade game that strongly resembles
When we first play Iggle Pop we must create a new user. Multi-user capability is a great feature if we are using a shared computer. We can see two different game modes: Adventure game and Retro Game (Retro game is not available in demo version). After selecting game mode we must choose our hero, Fizzy or Dizzy. The aim of the game is to move through the mazes and free the trapped iggles. After this, iggles will follow our character, and we must guide them to the correctly colored houses. In addition to this we must avoid Evil Zoogs, as they will trap iggles by touching them. We can use the boost ability to move faster, but be careful while using it, as iggles will get tired easily.
Iggle Pop Deluxe is a time limited demo version that will expire after 60 minutes of use. To keep on playing the game must be registered.
-download iggle pop full version
Download Zip 🔗 https://urlca.com/2uDdCw
-Iggle Pop Deluxe is an arcade game developed by Sprout Games and published by PopCap. This game has really low system requirements, a 350Mhz CPU powered by Windows 98 (or later) and 128 Mb RAM will be enough to enjoy Iggle Pop Deluxe.
Iggle Pop Deluxe is an arcade game that strongly resembles
When we first play Iggle Pop we must create a new user. Multi-user capability is a great feature if we are using a shared computer. We can see two different game modes: Adventure game and Retro Game (Retro game is not available in demo version). After selecting game mode we must choose our hero, Fizzy or Dizzy. The aim of the game is to move through the mazes and free the trapped iggles.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/falterWliame/Face_Mask_Detection/Download ((TOP)) Gds64dll.md b/spaces/falterWliame/Face_Mask_Detection/Download ((TOP)) Gds64dll.md
deleted file mode 100644
index e2173dc83d895f6413ee10e7c753a3b139de6268..0000000000000000000000000000000000000000
--- a/spaces/falterWliame/Face_Mask_Detection/Download ((TOP)) Gds64dll.md
+++ /dev/null
@@ -1,182 +0,0 @@
-
-How to Download Gsdll64.dll and Fix DLL Errors
-
-Gsdll64.dll is a dynamic link library (DLL) file that is part of the Ghostscript software. Ghostscript is an open source program that can convert PostScript and PDF files into various formats, such as images, text, or printer commands. Gsdll64.dll is responsible for providing the core functionality of Ghostscript, such as interpreting and rendering the files.
-Download Gds64dll
Download File === https://urlca.com/2uDdvN
-
-However, sometimes gsdll64.dll can cause problems on your computer. You may encounter errors like:
-
-
-- The program can't start because gsdll64.dll is missing from your computer. Try reinstalling the program to fix this problem.
-- There was a problem starting gsdll64.dll. The specified module could not be found.
-- Error loading gsdll64.dll. The specified module could not be found.
-- The code execution cannot proceed because gsdll64.dll was not found. Reinstalling the program may fix this problem.
-- Gsdll64.dll is either not designed to run on Windows or it contains an error. Try installing the program again using the original installation media or contact your system administrator or the software vender for support.
-
-
-These errors can occur for various reasons, such as:
-
-
-- A faulty application that uses gsdll64.dll
-- A corrupted or deleted gsdll64.dll file
-- A malicious software that has infected your PC and damaged gsdll64.dll
-- A damaged Windows registry that has invalid entries for gsdll64.dll
-
-
-Fortunately, there are ways to download gsdll64.dll and fix these errors. In this article, we will show you how to do that in a few simple steps.
-
-Step 1: Download Gsdll64.dll from a Reliable Source
-
-The first thing you need to do is to download gsdll64.dll from a reliable source. You can use DLL-Files.com, a website that provides free and safe downloads of DLL files. To download gsdll64.dll from DLL-Files.com, follow these steps:
-
-
-- Go to
https://www.dll-files.com/gsdll64.dll.html
-- Click on the Download button next to the version of gsdll64.dll that matches your system architecture (32-bit or 64-bit)
-- Save the ZIP file to a location on your computer
-- Extract the gsdll64.dll file from the ZIP file
-
-
-Step 2: Install Gsdll64.dll on Your PC
-
-The next thing you need to do is to install gsdll64.dll on your PC. There are two ways to do that:
-
-
-
-- Copy and paste the gsdll64.dll file into the folder of the application that requires it
-- Copy and paste the gsdll64.dll file into the Windows system folder
-
-
-The first method is recommended if you only need gsdll64.dll for a specific application. The second method is recommended if you need gsdll64.dll for multiple applications or if you are not sure which application needs it. To copy and paste the gsdll64.dll file into the folder of the application that requires it, follow these steps:
-
-
-- Locate the folder of the application that requires gsdll64.dll (for example, C:\Program Files\Ghostscript)
-- Right-click on the gsdll64.dll file and select Copy
-- Go to the folder of the application and right-click on an empty space and select Paste
-
-
-To copy and paste the gsdll64.dll file into the Windows system folder, follow these steps:
-
-
-- Locate the Windows system folder (for example, C:\Windows\System32 for 32-bit systems or C:\Windows\SysWOW64 for 64-bit systems)
-- Right-click on the gsdll64.dll file and select Copy
-- Go to the Windows system folder and right-click on an empty space and select Paste
-
-
-Step 3: Register Gsdll64.dll on Your PC
-
-The final thing you need to do is to register gsdll64.dll on your PC. This will tell Windows where to find and how to use gsdll64.dll. To register gsdll64.dll on your PC, follow these steps:
-
-
-- Press Windows + R keys to open the Run dialog box
-- Type
cmd
and press Enter to open the Command Prompt window
-- Type
regsvr32 gsdll64.dll
and press Enter
-- You should see a message saying "gsdll64.dll registered successfully"
-- Close the Command Prompt window
-
-
-Conclusion
-
-Congratulations! You have successfully downloaded, installed, and registered gsdll64.dll on your PC. You should now be able to run your application without any errors related to gsdll64.dll. If you still encounter any problems with gsdll64.dll, you can try some other solutions, such as:
-
-
-- Scan your PC for malware and viruses using a reputable antivirus software
-- Clean your Windows registry using a reliable registry cleaner software
-- Update your Windows system and drivers using Windows Update or a driver updater software
-- Reinstall your application or Ghostscript software using the original installation media or download link
-- Contact your system administrator or software vendor for further assistance
-
-
-We hope this article has helped you download gdsdll4dll and fix DLL errors. If you have any questions or feedback, please leave a comment below.
-What are the Benefits of Downloading Gsdll64.dll?
-
-Downloading gsdll64.dll can bring you many benefits, especially if you work with PostScript and PDF files frequently. By downloading gsdll64.dll, you can:
-
-
-- Enhance your productivity and efficiency by converting, displaying, printing, or editing PostScript and PDF files faster and easier
-- Save your disk space and bandwidth by compressing PostScript and PDF files into smaller sizes
-- Improve your document quality and security by preserving the original layout and appearance of PostScript and PDF files across different platforms and devices
-- Expand your creativity and flexibility by creating or modifying PostScript and PDF files with various options and features
-- Support the open source community by using Ghostscript, a free and widely used software for PostScript and PDF processing
-
-
-How to Troubleshoot Gsdll64.dll Errors
-
-Even after downloading gsdll64.dll, you may still encounter some errors related to it. These errors can be caused by various factors, such as:
-
-
-- A mismatched or outdated version of gsdll64.dll
-- A corrupted or incomplete installation of Ghostscript or the application that uses gsdll64.dll
-- A conflict or compatibility issue between gsdll64.dll and other DLL files or programs on your PC
-- A missing or incorrect registry entry for gsdll64.dll
-- A hardware failure or malfunction that affects gsdll64.dll
-
-
-To troubleshoot gsdll64.dll errors, you can try some of the following solutions:
-
-
-- Download and install the latest version of gsdll64.dll from DLL-Files.com or the official website of Ghostscript or the application that uses gsdll64.dll
-- Uninstall and reinstall Ghostscript or the application that uses gsdll64.dll using the original installation media or download link
-- Run a system file checker (SFC) scan to repair any corrupted or missing system files on your PC
-- Run a disk cleanup (DISM) tool to clean up any corrupted or damaged system images on your PC
-- Update your Windows system and drivers to the latest versions using Windows Update or a driver updater software
-- Contact your system administrator or software vendor for further assistance
-
-
-Conclusion
-
-In this article, we have shown you how to download gsdll64.dll and fix DLL errors. We have also explained what gsdll64.dll is, why you need it, what are the benefits of downloading it, and how to troubleshoot it. We hope this article has helped you download gdsdll4dll and fix DLL errors. If you have any questions or feedback, please leave a comment below.
-What are the Risks of Downloading Gsdll64.dll from Untrusted Sources?
-
-While downloading gsdll64.dll can solve your DLL errors, it can also expose you to some risks if you download it from untrusted sources. Untrusted sources are websites or files that are not verified or authorized by the original developers or distributors of gsdll64.dll. Untrusted sources can contain malware or viruses that can harm your PC or steal your personal information. Untrusted sources can also provide you with incorrect or outdated versions of gsdll64.dll that can cause more problems on your PC.
-
-Some of the risks of downloading gsdll64.dll from untrusted sources are:
-
-
-- Your PC may get infected with malware or viruses that can damage your system files, corrupt your data, slow down your performance, or display unwanted ads or pop-ups
-- Your personal information may get stolen by hackers or cybercriminals who can use it for identity theft, fraud, or other illegal activities
-- Your application or Ghostscript software may not work properly or crash due to a mismatched or outdated version of gsdll64.dll
-- Your Windows registry may get corrupted or bloated due to invalid or redundant entries for gsdll64.dll
-- Your PC may become unstable or unresponsive due to conflicts or compatibility issues between gsdll64.dll and other DLL files or programs on your PC
-
-
-To avoid these risks, you should always download gsdll64.dll from trusted sources, such as DLL-Files.com, the official website of Ghostscript, or the official website of the application that uses gsdll64.dll. Trusted sources are websites or files that are verified or authorized by the original developers or distributors of gsdll64.dll. Trusted sources can provide you with safe and clean downloads of gsdll64.dll that are free of malware or viruses. Trusted sources can also provide you with the correct and latest versions of gsdll64.dll that are compatible with your system and application.
-
-How to Download Gsdll64.dll from Microsoft.com
-
-Another trusted source for downloading gsdll64.dll is Microsoft.com, the official website of Microsoft Corporation. Microsoft.com provides free and secure downloads of software and updates for Windows and other Microsoft products. Microsoft.com also provides technical support and customer service for its products and services.
-
-To download gsdll64.dll from Microsoft.com, you need to have a valid license for Windows 10/11, the latest operating system from Microsoft. Windows 10/11 comes with many features and improvements, such as a new user interface, a faster performance, a better security, and a built-in support for PostScript and PDF files. Windows 10/11 also includes Ghostscript as part of its optional features that you can install on your PC.
-
-To download gsdll64.dll from Microsoft.com, you need to install Ghostscript on your Windows 10/11 PC. To install Ghostscript on your Windows 10/11 PC, follow these steps:
-
-
-- Press Windows + I keys to open the Settings app
-- Click on Apps
-- Click on Optional features
-- Click on Add a feature
-- Type
Ghostscript
in the search box and select it from the list
-- Click on Install
-- Wait for the installation to complete
-
-
-Once you have installed Ghostscript on your Windows 10/11 PC, you should be able to find gsdll64.dll in the following location:
-
-C:\Windows\System32\gsdll64.dll
-
-You can also use the Windows Search feature to locate gsdll64.dll on your PC. To use the Windows Search feature, follow these steps:
-
-
-- Press Windows + S keys to open the Search box
-- Type
gsdll64.dll
in the search box and press Enter
-- You should see gsdll64.dll in the list of results
-- You can right-click on gsdll64.dll and select Open file location to see where it is located on your PC
-
-
-Conclusion
-
-In this article, we have shown you how to download gdsdll4dll and fix DLL errors. We have also explained what gdsdll4dll is, why you need it, what are the benefits of downloading it, how to troubleshoot it, what are the risks of downloading it from untrusted sources, and how to download it from Microsoft.com. We hope this article has helped you download gdsdll4dll and fix DLL errors. If you have any questions or feedback, please leave a comment below.
-Conclusion
-
-Gsdll64.dll is a DLL file that is part of the Ghostscript software, which can convert, display, print, or edit PostScript and PDF files. Downloading gsdll64.dll can help you fix DLL errors and enhance your productivity and efficiency with PostScript and PDF files. However, you should always download gsdll64.dll from trusted sources, such as DLL-Files.com, the official website of Ghostscript, or the official website of Microsoft. Untrusted sources can contain malware or viruses that can harm your PC or steal your personal information. You should also troubleshoot gsdll64.dll errors by following the solutions in this article. We hope this article has helped you download gdsdll4dll and fix DLL errors. If you have any questions or feedback, please leave a comment below.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/CEX.IO App A Multi-Asset Crypto Wallet with Staking and DeFi Features.md b/spaces/fatiXbelha/sd/CEX.IO App A Multi-Asset Crypto Wallet with Staking and DeFi Features.md
deleted file mode 100644
index f83c13b244912b784adad4f7f8088153f783a5d0..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/CEX.IO App A Multi-Asset Crypto Wallet with Staking and DeFi Features.md
+++ /dev/null
@@ -1,127 +0,0 @@
-
-CEX.IO APK: A Convenient and Secure Crypto Trading App
-If you are looking for a simple and reliable app for cryptocurrency trading, you might want to check out CEX.IO APK. CEX.IO APK is the Android app of CEX.IO, a global cryptocurrency exchange that offers a variety of services and features for crypto enthusiasts. In this article, we will review the main features, security, and alternatives of CEX.IO APK.
-cex.io apk
Download File ✑ ✑ ✑ https://urllie.com/2uNCXF
- Features of CEX.IO APK
-CEX.IO APK provides a wide range of tools that facilitate crypto transactions, including numerous available payment methods. Such variety empowers users to:
-
-- buy Bitcoin with a bank account;
-- buy Bitcoin with ApplePay;
-- buy Bitcoin with GooglePay;
-- buy Bitcoin with PayPal;
-- And more.
-
-CEX.IO APK also supports over 200 digital currencies and tokens, covering most leading crypto assets by market cap, as well as decentralized finance (DeFi) and ERC-20 tokens. Some of the supported cryptocurrencies are:
-
-Bitcoin (BTC) Ethereum (ETH) Ripple (XRP)
-Litecoin (LTC) Binance Coin (BNB) Cardano (ADA)
-Dogecoin (DOGE) Polygon (MATIC) Solana (SOL)
-Uniswap (UNI) Avalanche (AVAX) Cosmos (ATOM)
-And many more.
-
-In addition to buying and selling crypto, CEX.IO APK also allows users to:
-
-- Instantly exchange crypto and fiat currencies without breaking a sweat;
-- Earn by staking coins on CEX.IO and receiving monthly rewards;
-- Borrow fiat currency using crypto as collateral;
-- Trade with leverage that suits their risk appetite on CEX.IO Broker;
-- Set price alerts and order notifications;
-- Monitor their portfolio performance;
-- Compare Bitcoin prices on different exchanges;
-- And more.
-
- Security of CEX.IO APK
-CEX.IO APK ensures the security of users' funds and data by implementing various measures, such as:
-
-- Holding the majority of user funds in cold wallets that are not connected to the internet;
-- Using full data encryption and cryptocurrency cold storage;
-- Passing audits annually and being a PCI DSS Level 1 certified platform;
-- Being regulated and licensed to operate in all jurisdictions where it offers its services;
-- Requiring identity verification and two-factor authentication for users;
-- Providing professional support around the clock via multiple channels.
-
- Alternatives to CEX.IO APK
-If you are looking for alternatives to CEX.IO APK, you might want to consider some of these options:
- Binance
-Binance is one of the largest and most popular cryptocurrency exchanges in the world. It offers a wide range of trading products and services, such as spot trading, futures trading, margin trading, staking, lending, mining, and more. It also supports over 300 cryptocurrencies and tokens, including its own native token BNB. Binance has its own Android app that provides access to most of its features.
- Kraken
-Kraken is another well-established and reputable cryptocurrency exchange that offers various trading options and features. It supports over 50 cryptocurrencies and tokens, including Bitcoin, Ethereum, Litecoin, and more. It also offers features such as margin trading, futures trading, staking, and more. Kraken has its own Android app that allows users to trade, deposit, withdraw, and manage their accounts.
Coinbase
-Coinbase is one of the most user-friendly and beginner-friendly cryptocurrency exchanges in the market. It allows users to buy, sell, and store over 50 cryptocurrencies and tokens, including Bitcoin, Ethereum, Bitcoin Cash, and more. It also offers features such as recurring purchases, crypto rewards, and educational content. Coinbase has its own Android app that enables users to access most of its features.
-cex.io app download
-cex.io crypto exchange apk
-cex.io wallet android
-cex.io mobile trading
-cex.io buy bitcoin apk
-cex.io sell crypto apk
-cex.io instant exchange apk
-cex.io earn staking apk
-cex.io price alerts apk
-cex.io order notifications apk
-cex.io money management apk
-cex.io security apk
-cex.io support apk
-cex.io app review
-cex.io app features
-cex.io app benefits
-cex.io app tutorial
-cex.io app update
-cex.io app problems
-cex.io app feedback
-cex.io app alternatives
-cex.io app comparison
-cex.io app referral
-cex.io app promo code
-cex.io app bonus
-cex.io app for ios
-cex.io app for windows
-cex.io app for mac
-cex.io app for linux
-cex.io app for chromebook
-cex.io app for firestick
-cex.io app for smart tv
-cex.io app for tablet
-cex.io app for pc
-cex.io app for laptop
-cex.io app vs website
-cex.io app vs coinbase app
-cex.io app vs binance app
-cex.io app vs kraken app
-cex.io app vs bitstamp app
-how to install cex.io apk
-how to use cex.io apk
-how to update cex.io apk
-how to uninstall cex.io apk
-how to verify identity on cex io apk
- Conclusion
-CEX.IO APK is a convenient and secure crypto trading app that offers a variety of features and services for crypto enthusiasts. It supports over 200 cryptocurrencies and tokens, multiple payment methods, instant exchange, staking, borrowing, leveraged trading, and more. It also ensures the security of users' funds and data by using cold storage, encryption, audits, regulation, verification, and support. If you are looking for alternatives to CEX.IO APK, you might want to check out Binance, Kraken, or Coinbase.
-If you are interested in trying out CEX.IO APK, you can download it from the Google Play Store or from the official website of CEX.IO. You can also visit the CEX.IO blog for more information and updates on the app and the platform.
-Happy trading!
- FAQs
-What are the fees for using CEX.IO APK?
-The fees for using CEX.IO APK depend on the type of service and transaction you are using. For example, the fees for buying and selling crypto vary depending on the payment method and the amount of crypto you are trading. The fees for instant exchange are based on the market rates and the amount of crypto you are exchanging. The fees for staking are deducted from your monthly rewards. The fees for borrowing are based on the interest rate and the loan duration. The fees for leveraged trading are based on the margin level and the position size. You can find more details about the fees on the CEX.IO website or app.
- How can I contact CEX.IO support?
-You can contact CEX.IO support via multiple channels, such as email, phone, live chat, social media, or help center. You can find the contact details on the CEX.IO website or app. The support team is available 24/7 to assist you with any issues or questions you might have.
- Is CEX.IO APK available in my country?
-CEX.IO APK is available in most countries around the world where CEX.IO operates. However, some features or services might be restricted or unavailable in certain jurisdictions due to local regulations or laws. You can check the availability of CEX.IO APK in your country on the CEX.IO website or app.
- How can I update CEX.IO APK?
-You can update CEX.IO APK by following these steps:
-
-- Open the Google Play Store app on your Android device;
-- Tap on the menu icon in the top left corner;
-- Tap on "My apps & games";
-- Find CEX.IO APK in the list of apps;
-- Tap on "Update" if there is a new version available;
-- Wait for the update to finish downloading and installing.
-
- How can I delete CEX.IO APK?
-You can delete CEX.IO APK by following these steps:
-
-- Open the Settings app on your Android device;
-- Tap on "Apps & notifications";
-- Tap on "See all apps";
-- Find CEX.IO APK in the list of apps;
-- Tap on "Uninstall";
-- Confirm your action by tapping on "OK".
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Download Hunter Assassin Hack for Android Unlock All Characters and Levels.md b/spaces/fatiXbelha/sd/Download Hunter Assassin Hack for Android Unlock All Characters and Levels.md
deleted file mode 100644
index 921d768d51059e94bbeaf7736571281eb29d39bd..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Download Hunter Assassin Hack for Android Unlock All Characters and Levels.md
+++ /dev/null
@@ -1,114 +0,0 @@
-
-Hunter Assassin Hack Download: How to Get Unlimited Money and Gems
-Are you a fan of stealth action games? Do you enjoy taking down enemies with your knife without being detected? If yes, then you might have heard of or played Hunter Assassin - a popular mobile game that challenges you to become the ultimate assassin. But did you know that you can make your gameplay even more fun and exciting by using Hunter Assassin hack download? In this article, we will tell you everything you need to know about Hunter Assassin hack download, including what it is, why you need it, how to get it, and how to use it. Read on and discover how to unleash your full potential as a hunter assassin!
-hunter assassin hack download
Download Zip ☆ https://urllie.com/2uNzCH
- What is Hunter Assassin?
-A brief introduction to the game
-Hunter Assassin is a fast-paced mobile game developed by Ruby Game Studio. It was released in 2019 and has since become one of the most downloaded games in the world, with over 100 million downloads on Google Play Store. The game is simple but addictive: you control a hunter with a deadly knife who has to eliminate all the targets in each level without being spotted by the guards. You can use the shadows and your surroundings to hide and sneak up on your enemies, then strike them with precision and speed. Each target you kill drops valuable items, such as gems, that you can use to unlock new characters and upgrade your weapons.
- The features and benefits of playing Hunter Assassin
-Hunter Assassin is a game that offers many features and benefits for its players, such as:
-
-- Easy and intuitive controls: You can play Hunter Assassin with just one finger. Simply tap on the screen to move your hunter around and he will automatically attack when he is close enough to a target.
-- Multiple characters and weapons: You can choose from a variety of different characters, each with their own unique abilities and weapons. Some characters are faster and more agile, while others have more health and endurance. You can also upgrade your weapons to increase their damage and range.
-- Various themes and maps: You can explore different themes and maps as you progress in the game, such as cyber cityscape, laboratory, warehouse, etc. Each theme has its own traps and hazards that you have to avoid or use to your advantage.
-- Challenging missions and rewards: You can complete various missions in each level, such as killing a certain number of targets, killing targets in a specific order, killing targets without being seen, etc. Completing missions will earn you chests filled with gems that you can use to unlock more characters and weapons. You can also spin the wheel for a chance to win even more gems or new hunters.
-- Fun and addictive gameplay: You will never get bored of playing Hunter Assassin, as each level is different and requires a different strategy and skill. You will also enjoy the thrill of being a stealthy killer who can take down enemies with a single swipe of your knife.
-
- Why do you need Hunter Assassin hack download?
-The challenges and limitations of the game
-While Hunter Assassin is a fun and exciting game, it also has some challenges and limitations that might frustrate some players, such as:
-
-- Limited gems: Gems are the main currency in the game that you need to unlock new characters and upgrade your weapons. However, gems are not easy to come by in the game. You can only get them by killing targets or completing missions, which are not always easy or available. You can also buy gems with real money, but that can be expensive and not everyone can afford it.
-- Difficult levels: As you advance in the game, the levels become more difficult and challenging. The guards become more alert and aggressive, the traps become more deadly and frequent, and the missions become more complex and demanding. You might find yourself stuck or frustrated in some levels, especially if you don't have enough gems to unlock better characters and weapons.
-- Repetitive gameplay: Although the game is fun and addictive, it can also become repetitive and boring after a while. The game does not have much variety or innovation in terms of gameplay, graphics, sound effects, or story. You basically do the same thing over and over again: kill targets, collect gems, unlock characters, upgrade weapons, repeat.
-
- The advantages and disadvantages of using Hunter Assassin hack download
-If you want to overcome these challenges and limitations, you might want to consider using Hunter Assassin hack download. Hunter Assassin hack download is a modified version of the original game that gives you access to unlimited money and gems, as well as other features and benefits, such as:
-
-- Unlimited money and gems: With Hunter Assassin hack download, you don't have to worry about running out of money or gems ever again. You can use as much money and gems as you want to unlock all the characters and weapons in the game, as well as upgrade them to the maximum level. You can also use money and gems to buy chests, spin the wheel, or skip missions.
-- Unlocked characters and weapons: With Hunter Assassin hack download, you can enjoy playing with all the characters and weapons in the game without having to unlock them one by one. You can choose from a wide range of hunters, each with their own unique skills and abilities, such as speed, stealth, health, damage, etc. You can also use different weapons, such as knives, swords, axes, etc., each with their own advantages and disadvantages.
-- Easy and fun gameplay: With Hunter Assassin hack download, you can have more fun and less stress while playing the game. You don't have to worry about being detected by the guards or failing the missions. You can easily kill all the targets in each level with your powerful weapons and characters. You can also explore different themes and maps without any restrictions or limitations.
-
-However, using Hunter Assassin hack download also has some disadvantages and risks that you should be aware of, such as:
-hunter assassin mod apk unlimited money and gems
-hunter assassin 2 hack mod apk download v1.089.01[^1^]
-hunter assassin cheat codes for android and ios
-hunter assassin game online free no download
-hunter assassin hack version download for pc
-hunter assassin mod menu apk latest version
-hunter assassin unlimited diamonds and vip membership
-hunter assassin 2 mod apk free shopping and premium
-hunter assassin tips and tricks to win every level
-hunter assassin game download for windows 10
-hunter assassin hack tool no survey no human verification
-hunter assassin mod apk offline without internet
-hunter assassin 2 cheat engine for pc and mac
-hunter assassin game play now on crazy games
-hunter assassin hack apk download apkpure
-hunter assassin mod apk revdl rexdl
-hunter assassin 2 hack online generator free resources
-hunter assassin game review and rating
-hunter assassin hack apk download for android 1
-hunter assassin mod apk happymod
-hunter assassin 2 mod apk unlimited everything
-hunter assassin hack version download uptodown
-hunter assassin cheat sheet for all levels
-hunter assassin game download apk mirror
-hunter assassin hack apk download latest version 2023
-
-- Illegal and unethical: Using Hunter Assassin hack download is considered illegal and unethical by the game developers and the law. You are violating the terms of service and the intellectual property rights of the game by using a modified version that gives you an unfair advantage over other players. You are also depriving the game developers of their rightful income by not paying for their products and services.
-- Dangerous and harmful: Using Hunter Assassin hack download is dangerous and harmful for your device and your personal information. You are exposing your device to malware, viruses, spyware, or other malicious software that can damage your device or steal your data. You are also risking your account being banned or suspended by the game servers or authorities for using a hacked version of the game.
-- Boring and unsatisfying: Using Hunter Assassin hack download is boring and unsatisfying for your gaming experience and enjoyment. You are losing the challenge and thrill of playing the game by using a hacked version that makes everything easy and effortless. You are also missing out on the satisfaction and pride of achieving your goals and rewards by using your own skills and efforts.
-
- How to download and install Hunter Assassin hack mod apk?
-The steps and requirements for downloading and installing Hunter Assassin hack mod apk
-If you still want to try Hunter Assassin hack download despite its disadvantages and risks, you will need to follow these steps and requirements for downloading use them to unlock and upgrade the characters and weapons that you really like and need. You should also save some money and gems for emergencies, such as skipping missions, buying chests, or spinning the wheel.
-
- Have fun and enjoy the game. The most important thing is to have fun and enjoy the game while playing Hunter Assassin with hack mod apk. You can try different strategies and tactics, challenge yourself with harder levels and missions, or just relax and kill some targets. You can also share your achievements and experiences with your friends or other players online.
-
- The best characters and weapons to use in Hunter Assassin with hack mod apk
-With Hunter Assassin hack mod apk, you can unlock and use all the characters and weapons in the game. However, some characters and weapons are better than others in terms of performance and suitability. Here are some of the best characters and weapons to use in Hunter Assassin with hack mod apk:
-
-
-Character
-Weapon
-Description
-
-
-Ninja
-Katana
-The Ninja is one of the fastest and stealthiest characters in the game. He can move quickly and quietly, making him ideal for sneaking up on enemies and killing them in one swift strike. His weapon, the Katana, is a long and sharp sword that can slice through any target with ease. The Ninja is perfect for levels and missions that require speed and stealth.
-
-
-Sniper
-Rifle
-The Sniper is one of the most accurate and powerful characters in the game. He can shoot targets from a long distance, making him ideal for taking down enemies without getting close to them. His weapon, the Rifle, is a high-caliber gun that can pierce through any armor or obstacle with precision. The Sniper is perfect for levels and missions that require accuracy and power.
-
-
-Tank
-Axe
-The Tank is one of the most durable and strong characters in the game. He can withstand a lot of damage, making him ideal for surviving attacks from enemies. His weapon, the Axe, is a heavy and blunt weapon that can smash any target with force. The Tank is perfect for levels and missions that require durability and strength.
-
-
-Assassin
-Dagger
-The Assassin is one of the most versatile and balanced characters in the game. He can do a bit of everything, making him ideal for adapting to different situations. His weapon, the Dagger, is a short and sharp weapon that can stab any target with speed. The Assassin is perfect for levels and missions that require versatility and balance.
-
-
- Conclusion
-A summary of the main points and a call to action
-In conclusion, Hunter Assassin hack download is a modified version of the original game that gives you unlimited money and gems, as well as other features and benefits. However, it also has some disadvantages and risks that you should be aware of before using it. If you decide to use Hunter Assassin hack download, you should follow the steps and requirements for downloading and installing Hunter Assassin hack mod apk, and use it wisely and sparingly. You should also use the tips and tricks for playing Hunter Assassin with hack mod apk, and choose the best characters and weapons for your gameplay. Hunter Assassin hack download can make your game more fun and exciting, but it can also ruin your game and device if you are not careful. So, use it at your own risk and discretion. If you want to download Hunter Assassin hack mod apk, you can use the link below. But remember, you are doing it for entertainment purposes only, and not for cheating or harming anyone. Have fun and enjoy being a hunter assassin!
FAQs
-Here are some of the frequently asked questions about Hunter Assassin hack download:
-
-- Q: Is Hunter Assassin hack download safe to use?
-A: Hunter Assassin hack download is not safe to use, as it might contain malware, viruses, spyware, or other malicious software that can damage your device or steal your data. You are also risking your account being banned or suspended by the game servers or authorities for using a hacked version of the game.
-- Q: Is Hunter Assassin hack download legal to use?
-A: Hunter Assassin hack download is not legal to use, as it violates the terms of service and the intellectual property rights of the game developers and the law. You are depriving the game developers of their rightful income by not paying for their products and services. You are also giving yourself an unfair advantage over other players by using a hacked version of the game.
-- Q: Is Hunter Assassin hack download free to use?
-A: Hunter Assassin hack download is free to use, as you don't have to pay any money to download or install it on your device. However, you might have to pay a price in terms of your device's security, your account's safety, and your gaming experience's quality by using a hacked version of the game.
-- Q: How can I update Hunter Assassin hack mod apk?
-A: You can update Hunter Assassin hack mod apk by downloading and installing the latest version of the hacked file from the same source that you got it from. However, you might lose your progress or data if you update the hacked version of the game. You might also encounter compatibility issues or errors if you update the hacked version of the game.
-- Q: How can I uninstall Hunter Assassin hack mod apk?
-A: You can uninstall Hunter Assassin hack mod apk by deleting the hacked file from your device's storage. You can also uninstall the original version of Hunter Assassin from your device if you want to stop playing the game altogether.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Download Live Your Life MP3 - The Ultimate Motivational Song by T.I. and Rihanna.md b/spaces/fatiXbelha/sd/Download Live Your Life MP3 - The Ultimate Motivational Song by T.I. and Rihanna.md
deleted file mode 100644
index 32cd3381bd775cf35279952226e7c29e44cae359..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Download Live Your Life MP3 - The Ultimate Motivational Song by T.I. and Rihanna.md
+++ /dev/null
@@ -1,156 +0,0 @@
-
-Download Live Your Life MP3: How to Enjoy the Hit Song by T.I. and Rihanna
-Do you love the song Live Your Life by T.I. and Rihanna? Do you want to download it as an MP3 file and listen to it offline? If yes, then you are in the right place. In this article, we will show you how to download Live Your Life MP3 from three different sources: Spotify, Last.fm, and AfroCharts. We will also compare the pros and cons of each option and give you some tips on how to enjoy the song better.
-Introduction
-What is Live Your Life?
-Live Your Life is a song by American rapper T.I., featuring Barbadian singer Rihanna. It was released as the third single from T.I.'s sixth studio album Paper Trail in 2008. The song samples O-Zone's Dragostea Din Tei, also known as the Numa Numa song, and has a motivational theme of living your life to the fullest. The song was a huge commercial success, reaching number one on the Billboard Hot 100 chart and becoming one of the best-selling singles of all time.
-download live your life mp3
Download Zip ✫ https://urllie.com/2uNHZr
-Why is Live Your Life popular?
-Live Your Life is popular for many reasons. First of all, it has a catchy melody and a powerful chorus that make you want to sing along. Second, it has a positive message that inspires you to pursue your dreams and not let others bring you down. Third, it features two of the most popular artists in the music industry, T.I. and Rihanna, who deliver an amazing performance with their rap and vocals. Fourth, it has a memorable music video that shows T.I. and Rihanna traveling around the world and having fun.
-How to download Live Your Life MP3
-Now that you know what Live Your Life is and why it is popular, you might be wondering how to download it as an MP3 file. There are many ways to do that, but we will focus on three of them: Spotify, Last.fm, and AfroCharts. Let's take a look at each option and see how they work.
-Option 1: Spotify
-Pros and cons of Spotify
-Spotify is one of the most popular music streaming services in the world, with over 350 million users. It offers access to millions of songs, podcasts, playlists, and radio stations. You can listen to music online or offline, with or without ads, depending on your subscription plan.
-The pros of using Spotify to download Live Your Life MP3 are:
-
-- You can enjoy high-quality audio (up to 320 kbps).
-- You can create your own playlists and share them with your friends.
-- You can discover new music based on your preferences and recommendations.
-- You can sync your music across different devices (phone, computer, tablet, etc.).
-
-The cons of using Spotify to download Live Your Life MP3 are:
-
-- You need a premium account ($9.99 per month) to download music offline.
-- You can only download up to 10,000 songs per device.
-- You can only play the downloaded songs within the Spotify app.
-- You need an internet connection to update your library and verify your subscription every 30 days.
-
-How to download Live Your Life MP3 from Spotify
-To download Live Your Life MP3 from Spotify, you need to follow these steps:
-
-- Open the Spotify app on your device and log in with your premium account.
-- Search for Live Your Life by T.I. and Rihanna and tap on it.
-- Tap on the three dots icon on the top right corner and select Download.
-- Wait for the song to download and check the green arrow icon next to it.
-- Enjoy listening to Live Your Life offline within the Spotify app.
-
-Option 2: Last.fm
-Pros and cons of Last.fm
-Last.fm is another popular music streaming service that also provides music discovery and recommendation features. It tracks the music you listen to and creates personalized charts, stats, and suggestions. You can also join a community of music lovers and share your tastes and opinions.
-download live your life mp3 free
-download live your life mp3 song
-download live your life mp3 by ti
-download live your life mp3 rihanna
-download live your life mp3 320kbps
-download live your life mp3 skull
-download live your life mp3 online
-download live your life mp3 audio
-download live your life mp3 music
-download live your life mp3 video
-download live your life mp3 remix
-download live your life mp3 instrumental
-download live your life mp3 ringtone
-download live your life mp3 lyrics
-download live your life mp3 album
-download live your life mp3 paper trail
-download live your life mp3 youtube
-download live your life mp3 soundcloud
-download live your life mp3 spotify
-download live your life mp3 apple music
-download live your life mp3 amazon music
-download live your life mp3 google play music
-download live your life mp3 deezer
-download live your life mp3 tidal
-download live your life mp3 napster
-download live your life mp3 pandora
-download live your life mp3 iheartradio
-download live your life mp3 tunein radio
-download live your life mp3 shazam
-download live your life mp3 musixmatch
-download live your life mp3 genius
-download live your life mp3 azlyrics
-download live your life mp3 metrolyrics
-download live your life mp3 lyricsfreak
-download live your life mp3 songmeanings
-download live your life mp3 songfacts
-download live your life mp3 billboard
-download live your life mp3 rolling stone
-download live your life mp3 pitchfork
-download live your life mp3 allmusic
-download live your life mp3 discogs
-download live your life mp3 last.fm[^1^]
-download live your life mp3 rateyourmusic
-download live your life mp3 whosampled
-download live your life mp3 genius samples
-The pros of using Last.fm to download Live Your Life MP3 are:
-
-- You can download music for free from various sources (YouTube, SoundCloud, etc.).
-- You can scrobble (record) your listening history and get insights into your musical preferences.
-- You can explore new music based on your scrobbles and similar artists.
-- You can interact with other users and join groups, forums, and events.
-
-The cons of using Last.fm to download Live Your Life MP3 are:
-
-- You need to install a third-party software (Last.fm Scrobbler) to download music from Last.fm.
-- You may not find all the songs you want to download on Last.fm.
-- You may encounter some quality issues or broken links when downloading music from Last.fm.
-- You may violate some copyright laws or terms of service when downloading music from Last.fm.
-
-How to download Live Your Life MP3 from Last.fm
-To download Live Your Life MP3 from Last.fm, you need to follow these steps:
-
-- Download and install the Last.fm Scrobbler software on your device from here.
-- Open the Last.fm app or website and log in with your account.
-- Search for Live Your Life by T.I. and Rihanna and click on it.
-- Scroll down to the section that says "Download Track" and choose a source (YouTube, SoundCloud, etc.).
-- Click on the download button and save the MP3 file on your device.
-- Enjoy listening to Live Your Life offline with any music player.
-
- Option 3: AfroCharts
- Pros and cons of AfroCharts
- AfroCharts is a music streaming service that focuses on African music. It offers access to thousands of songs, albums, artists, and genres from across the continent. You can listen to music online or offline, with or without ads, depending on your subscription plan.
- The pros of using AfroCharts to download Live Your Life MP3 are:
-
- - You can support African musicians and promote their culture and diversity.
- - You can discover new music from different regions, languages, and styles.
- - You can create your own playlists and follow your favorite artists.
- - You can enjoy high-quality audio (up to 320 kbps).
-
- The cons of using AfroCharts to download Live Your Life MP3 are:
-
- - You need a premium account ($4.99 per month) to download music offline.
- - You can only download up to 5,000 songs per device.
- - You can only play the downloaded songs within the AfroCharts app.
- - You may not find some songs that are not related to African music on AfroCharts.
-
- How to download Live Your Life MP3 from AfroCharts
- To download Live Your Life MP3 from AfroCharts, you need to follow these steps:
-
- - Open the AfroCharts app on your device and log in with your premium account.
- - Search for Live Your Life by T.I. and Rihanna and tap on it.
- - Tap on the three dots icon on the bottom right corner and select Download.
- - Wait for the song to download and check the green check icon next to it.
- - Enjoy listening to Live Your Life offline within the AfroCharts app.
-
- Conclusion
- Summary of the main points
- In this article, we have shown you how to download Live Your Life MP3 from three different sources: Spotify, Last.fm, and AfroCharts. We have also compared the pros and cons of each option and given you some tips on how to enjoy the song better. Live Your Life is a hit song by T.I. and Rihanna that has a catchy melody, a positive message, and a memorable music video. It is one of the best-selling singles of all time and a motivational anthem for many people.
- Call to action
- Now that you know how to download Live Your Life MP3, you can choose the option that suits you best and start listening to the song offline. You can also share the song with your friends and family and inspire them to live their lives to the fullest. Remember, you only have one life, so make it count. As T.I. and Rihanna say, "Just live your life, don't let them tell you what to do."
- FAQs
- Here are some frequently asked questions about Live Your Life MP3:
-
-- Q: How long is Live Your Life?
-- A: Live Your Life is 5 minutes and 38 seconds long.
-- Q: What genre is Live Your Life?
-- A: Live Your Life is a hip hop song with elements of R&B and pop.
-- Q: Who wrote Live Your Life?
-- A: Live Your Life was written by T.I., Rihanna, Dan Balan, Just Blaze, Makeba Riddick, and James Harris III.
-- Q: What awards did Live Your Life win?
-- A: Live Your Life won the Grammy Award for Best Rap/Sung Collaboration in 2009.
-- Q: Where can I watch the music video of Live Your Life?
-- A: You can watch the music video of Live Your Life on YouTube here.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Dream AI How to Use the MOD APK to Create Amazing Art - Download Now.md b/spaces/fatiXbelha/sd/Dream AI How to Use the MOD APK to Create Amazing Art - Download Now.md
deleted file mode 100644
index 41fb752e200e3c0b64b147516eb3cde2429b8a33..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Dream AI How to Use the MOD APK to Create Amazing Art - Download Now.md
+++ /dev/null
@@ -1,149 +0,0 @@
-
-Download Dream AI Mod APK: A Guide to Unlocking Your Creative Potential
-Do you want to unleash your imagination and create stunning artworks with just a few taps on your phone? Do you want to explore different styles, themes, and effects without any limitations? If yes, then you should download Dream AI Mod APK, a powerful and innovative app that lets you transform your photos into amazing artworks using artificial intelligence.
-What is Dream AI?
-Dream AI is an app that uses advanced neural networks and deep learning algorithms to generate realistic and artistic images from your photos. You can choose from hundreds of filters, stickers, backgrounds, and frames to create your own unique masterpiece. You can also mix and match different elements to create new combinations and effects. Whether you want to make your photos look like paintings, sketches, cartoons, or anything else, Dream AI can help you achieve it.
-download dream ai mod apk
Download Zip ⚙⚙⚙ https://urllie.com/2uNygJ
-Features of Dream AI
-Some of the features of Dream AI are:
-
-- Easy and intuitive interface: You can easily navigate through the app and access all the features with just a few taps.
-- High-quality results: The app uses high-resolution images and advanced algorithms to ensure that your artworks are clear, detailed, and realistic.
-- Fast and smooth performance: The app runs smoothly on most devices and does not consume much battery or storage space.
-- Offline mode: You can use the app without an internet connection and save your artworks on your device.
-- No watermark: The app does not add any watermark or logo to your artworks, so you can enjoy them without any distraction.
-- No ads: The app does not show any annoying ads or pop-ups that might interrupt your creative process.
-
-Benefits of Dream AI
-Some of the benefits of using Dream AI are:
-
-- You can express yourself creatively and have fun with your photos.
-- You can learn new skills and techniques and improve your artistic abilities.
-- You can impress your friends and family with your amazing artworks and share them on social media.
-- You can relax and relieve stress by playing with different filters and effects.
-- You can discover new styles and genres of art and get inspired by them.
-
-Why Download Dream AI Mod APK?
-If you are wondering why you should download Dream AI Mod APK instead of the original version, here are some reasons:
-
-- You can access all the premium features for free, such as unlimited filters, stickers, backgrounds, frames, etc.
-- You can unlock all the hidden features that are not available in the original version, such as custom filters, advanced settings, etc.
-- You can enjoy the app without any restrictions or limitations, such as time limit, image size limit, etc.
-
-How to Download Dream AI Mod APK
-If you want to download Dream AI Mod APK, here are the steps you need to follow:
-
-- Click the Download button at the top of this page to download the Dream AI Mod APK file.
-- Save the file in your device's download folder or any other location you prefer.
-
- How to Install Dream AI Mod APK
- If you want to install Dream AI Mod APK on your device, here are the steps you need to follow:
-
- - Go to your device's settings and enable the option to install apps from unknown sources. This will allow you to install apps that are not from the official app store.
- - Locate the downloaded Dream AI Mod APK file on your device and tap on it to open it.
-- Tap on the Install button and wait for the installation process to complete.
-- Once the installation is done, you can launch the app and start using it.
-
- How to Use Dream AI Mod APK
- If you want to use Dream AI Mod APK to create amazing artworks, here are the steps you need to follow:
-
-- Open the app and grant the necessary permissions, such as camera, storage, etc.
-- Select a photo from your gallery or take a new one with your camera.
-- Choose a filter from the list of categories, such as Art, Sketch, Cartoon, etc. You can also use the search bar to find a specific filter.
-- Adjust the intensity and other parameters of the filter according to your preference.
-- Add stickers, backgrounds, frames, and other elements to enhance your artwork.
-- Save your artwork on your device or share it with others.
-
- Tips and Tricks for Dream AI Mod APK
- If you want to get the most out of Dream AI Mod APK, here are some tips and tricks you can try:
-download dream ai mod apk free
-download dream ai mod apk premium
-download dream ai mod apk unlocked
-download dream ai mod apk latest version
-download dream ai mod apk for android
-download dream ai mod apk no ads
-download dream ai mod apk full features
-download dream ai mod apk from jojoy
-download dream ai mod apk without root
-download dream ai mod apk 2023
-how to download dream ai mod apk
-where to download dream ai mod apk
-best site to download dream ai mod apk
-download dream ai mod apk for pc
-download dream ai mod apk for ios
-download dream ai mod apk for windows
-download dream ai mod apk for mac
-download dream ai mod apk for linux
-download dream ai mod apk online
-download dream ai mod apk offline
-download dream ai mod apk with obb
-download dream ai mod apk with data
-download dream ai mod apk with unlimited money
-download dream ai mod apk with all filters
-download dream ai mod apk with pro features
-is it safe to download dream ai mod apk
-is it legal to download dream ai mod apk
-is it possible to download dream ai mod apk
-benefits of downloading dream ai mod apk
-drawbacks of downloading dream ai mod apk
-alternatives to downloading dream ai mod apk
-reviews of downloading dream ai mod apk
-ratings of downloading dream ai mod apk
-tips for downloading dream ai mod apk
-tricks for downloading dream ai mod apk
-steps for downloading dream ai mod apk
-guide for downloading dream ai mod apk
-tutorial for downloading dream ai mod apk
-video for downloading dream ai mod apk
-blog for downloading dream ai mod apk
-website for downloading dream ai mod apk
-link for downloading dream ai mod apk
-source for downloading dream ai mod apk
-mirror for downloading dream ai mod apk
-torrent for downloading dream ai mod apk
-file for downloading dream ai mod apk
-folder for downloading dream ai mod apk
-zip for downloading dream ai mod apk
-rar for downloading dream ai mod apk
- How to Create Amazing Artworks with Dream AI Mod APK
- Some of the ways you can create amazing artworks with Dream AI Mod APK are:
-
-- Experiment with different filters and effects and see how they change your photo.
-- Mix and match different elements from different categories and create your own unique style.
-- Use the custom filter option to create your own filter from scratch or modify an existing one.
-- Use the eraser tool to remove unwanted parts of your photo or filter.
-- Use the crop tool to adjust the size and shape of your photo or filter.
-
- How to Share Your Artworks with Dream AI Mod APK
- Some of the ways you can share your artworks with Dream AI Mod APK are:
-
-- Use the share button to send your artwork to your friends and family via social media, email, or other apps.
-- Use the save button to save your artwork on your device or cloud storage.
-- Use the print button to print your artwork on paper or canvas.
-- Use the download button to download your artwork as an image file or a video file.
-
- How to Customize Your Settings with Dream AI Mod APK
- Some of the ways you can customize your settings with Dream AI Mod APK are:
-
-- Use the settings button to access various options, such as language, theme, quality, etc.
-- Use the feedback button to rate the app, report bugs, or suggest improvements.
-- Use the help button to get more information about the app and its features.
-
- Conclusion
- Dream AI Mod APK is a great app for anyone who loves art and creativity. It allows you to transform your photos into stunning artworks using artificial intelligence. You can choose from hundreds of filters, stickers, backgrounds, frames, and other elements to create your own unique masterpiece. You can also access all the premium features for free and enjoy the app without any restrictions or limitations. Download Dream AI Mod APK today and unleash your imagination!
- FAQs
- Here are some frequently asked questions about Dream AI Mod APK:
-
-- Is Dream AI Mod APK safe?
-Dream AI Mod APK is safe to use as long as you download it from a trusted source. It does not contain any viruses or malware that might harm your device or data. However, you should always be careful when installing apps from unknown sources and check their permissions before granting them.
- - Is Dream AI Mod APK legal?
-Dream AI Mod APK is legal to use as long as you do not use it for any illegal or unethical purposes. It is a modified version of the original app that provides some extra features and benefits. However, it is not affiliated with or endorsed by the official developers of Dream AI. Therefore, you should use it at your own risk and responsibility.
- - What are the requirements for Dream AI Mod APK?
-Dream AI Mod APK requires an Android device that runs on Android 4.4 or higher. It also requires a minimum of 100 MB of free storage space and 2 GB of RAM. It works best on devices that have a good camera and a fast processor.
- - How can I update Dream AI Mod APK?
-Dream AI Mod APK does not have an automatic update feature, so you will have to manually download and install the latest version from the same source you downloaded it from. You can also check this page for any updates or news about Dream AI Mod APK.
- - How can I contact the developers of Dream AI Mod APK?
-Dream AI Mod APK is not developed by the official developers of Dream AI, so you cannot contact them directly. However, you can contact the modders who created Dream AI Mod APK through their website or social media accounts. You can also leave a comment on this page and we will try to answer your questions or concerns.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Experience Premium Gameplay with Free Fire MAX 1.94.1 APK - Latest Version.md b/spaces/fatiXbelha/sd/Experience Premium Gameplay with Free Fire MAX 1.94.1 APK - Latest Version.md
deleted file mode 100644
index 47486fb1ac65050feb243d2200527ce36b816c05..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Experience Premium Gameplay with Free Fire MAX 1.94.1 APK - Latest Version.md
+++ /dev/null
@@ -1,150 +0,0 @@
-
-Free Fire MAX 1.94.1 APK Download: Everything You Need to Know
- If you are a fan of battle royale games, you might have heard of Free Fire, one of the most popular and downloaded games in the genre. But did you know that there is another version of the game that offers a more premium and immersive experience? It's called Free Fire MAX, and it's designed exclusively for players who want to enjoy the best graphics, effects, and gameplay in a battle royale.
- In this article, we will tell you everything you need to know about Free Fire MAX, including how to download and install the latest version (1.94.1) of the game on your Android device, what's new in this update, how to play with other Free Fire players, and why you should give it a try.
-free fire max 1.94.1 apk download
Download File ⏩ https://urllie.com/2uNzC8
- What is Free Fire MAX?
- Free Fire MAX is a standalone application that runs on the same server as the original Free Fire. It is not a mod or a hack, but an official product from Garena, the developer and publisher of the game. Free Fire MAX is compatible with all Free Fire accounts, items, and events, so you don't have to worry about losing your progress or missing out on anything.
- The main difference between Free Fire MAX and Free Fire is that Free Fire MAX offers a more enhanced and realistic graphics quality, with Ultra HD resolutions and breathtaking effects. You can also customize your graphics settings according to your preference and device performance. In addition, Free Fire MAX has some exclusive features and content that are not available in the original version, such as special lobby themes, emotes, skins, and more.
- Free Fire MAX is not a replacement for Free Fire, but an alternative option for players who want to experience a different level of combat in a battle royale. You can choose to play either version depending on your mood and device capability.
- How to Download and Install Free Fire MAX 1.94.1 APK?
- Requirements
- Before you download and install Free Fire MAX on your Android device, you need to make sure that your device meets the minimum or recommended specifications for running the game smoothly. Here are the requirements for Free Fire MAX:
-
-
-Minimum
-Recommended
-
-
-Android version: 4.4 or higher
-Android version: 8 or higher
-
-
-RAM: 2 GB or higher
-RAM: 4 GB or higher
-
-
-CPU: Dual core 1.2 GHz or higher
-CPU: Octa core 2 GHz or higher
-
-Storage: 1.5 GB or higher
-Storage: 2.5 GB or higher
-
-
- If your device does not meet the minimum requirements, you may not be able to download or install Free Fire MAX, or you may encounter some performance issues while playing the game. If your device meets the recommended requirements, you can enjoy the game at its full potential.
-free fire max 1.94.1 apk download for android
-free fire max 1.94.1 apk download latest version
-free fire max 1.94.1 apk download link
-free fire max 1.94.1 apk download obb
-free fire max 1.94.1 apk download uptodown
-free fire max 1.94.1 apk download mod
-free fire max 1.94.1 apk download hack
-free fire max 1.94.1 apk download unlimited diamonds
-free fire max 1.94.1 apk download mediafıre
-free fire max 1.94.1 apk download highly compressed
-free fire max 1.94.1 apk download for pc
-free fire max 1.94.1 apk download for ios
-free fire max 1.94.1 apk download for laptop
-free fire max 1.94.1 apk download for windows 10
-free fire max 1.94.1 apk download for mac
-free fire max 1.94.1 apk download offline
-free fire max 1.94.1 apk download online
-free fire max 1.94.1 apk download no verification
-free fire max 1.94.1 apk download without update
-free fire max 1.94.1 apk download new update
-free fire max 1.94.1 apk download full version
-free fire max 1.94.1 apk download beta version
-free fire max 1.94.1 apk download old version
-free fire max 1.94.1 apk download original version
-free fire max 1.94.1 apk download google play store
-free fire max 1.94.1 apk download from apkpure
-free fire max 1.94.1 apk download from apkmirror
-free fire max 1.94.1 apk download from apktada
-free fire max 1.94.1 apk download from apkmody
-free fire max 1.94.1 apk download from apknite
-free fire max 1.94.1 apk download by garena
-free fire max 1.94.1 apk download by dts
-free fire max 1.94.1 apk download by rexdl
-free fire max 1.94.1 apk download by revdl
-free fire max 1.94.1 apk download by androidoyun club
-how to install free fire max 1.94..apk on android device?
-how to update free fire max to version .apk on android device?
-how to play free fire max .apk on pc using emulator?
-how to fix error while downloading or installing .apk file of free fire?
-how to get unlimited diamonds in .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of
- Steps
- Once you have checked your device specifications, you can follow these steps to download and install Free Fire MAX 1.94.1 APK on your Android device:
-
-- Go to a trusted and reliable source to download the APK file. You can use this link to download the latest version of Free Fire MAX from APKPure, one of the most popular and safe websites for downloading APK files.
-- After downloading the APK file, locate it in your device's file manager and tap on it to start the installation process. You may need to enable the "Unknown sources" option in your device's settings to allow the installation of apps from sources other than the Google Play Store.
-- Follow the on-screen instructions to complete the installation process. You may need to grant some permissions to the app to access your device's features and data.
-- Once the installation is done, you can launch the app and log in with your existing Free Fire account or create a new one if you don't have one.
-- Enjoy playing Free Fire MAX on your Android device!
-
- Tips and Tricks
- To make sure that you have a smooth and enjoyable gaming experience with Free Fire MAX, here are some tips and tricks that you can use:
-
-- Make sure that you have a stable and fast internet connection while playing the game. You can use a Wi-Fi network or a mobile data plan, but avoid using public or shared networks that may have low speed or high latency.
-- Close any background apps or processes that may consume your device's memory or battery while playing the game. You can also use a game booster app to optimize your device's performance and reduce lag or stuttering.
-- Adjust your graphics settings according to your device's capability and your personal preference. You can choose from low, medium, high, or ultra settings, depending on how much detail and quality you want to see in the game. You can also enable or disable some features such as shadows, anti-aliasing, bloom, etc.
-- If you encounter any problems or errors while downloading, installing, or playing the game, you can try some common solutions such as clearing the app's cache and data, reinstalling the app, updating your device's software, etc. You can also contact Garena's customer support team for further assistance.
-
- What's New in Free Fire MAX 1.94.1?
- The latest version of Free Fire MAX (1.94.1) was released on June 9, 2023, and it brings some new features, changes, and improvements to the game. Here are some of the highlights of this update:
-
-- A new map called Bermuda Remastered has been added to the game. This map is a revamped version of the classic Bermuda map, with more details, locations, and surprises. You can explore places such as Academy, Aden's Creek, Fisherman Creek, Nurek Dam, etc.
-- A new mode called Clash Squad Ranked Season 7 has been introduced to the game. This mode is a competitive mode where you can team up with other players and fight against another team in a best-of-seven series of rounds. You can earn points and rank up by winning matches and completing missions.
-- A new character called Maro has been added to the game. Maro is a falconer who loves nature and animals. His special skill is called Falcon Fervor, which increases his damage over distance and against marked enemies.
-- A new weapon called Kord has been added to the game. Kord is a light machine gun that has a high rate of fire and a large magazine capacity. It also has a special mode called Machine Gun Mode, which increases its damage and accuracy when prone or crouching.
-- A new pet called Dr. Beanie has been added to the game. Dr. Beanie is a cute and smart hamster who wears a lab coat and glasses. His special skill is called Smooth Gloo, which reduces the cooldown of gloo walls by 20%.
-- A new feature called Dynamic Lighting has been added to the game. This feature enhances the lighting effects in the game, making it more realistic and immersive. You can see how the light changes according to the time of day, weather conditions, etc .
-
- How to Play Free Fire MAX with Free Fire Players?
- One of the best features of Free Fire MAX is that it allows you to play with other players who are using the original Free Fire version. This means that you can join your friends and squad up with them, regardless of which version of the game you are using. You can also participate in the same events, modes, and matches as the Free Fire players.
- To play Free Fire MAX with Free Fire players, you need to use a feature called Firelink. Firelink is a feature that connects your Free Fire account with your Free Fire MAX account, and lets you switch between the two versions seamlessly. You can also use Firelink to sync your game data, settings, and preferences across both versions.
- To use Firelink, you need to follow these steps:
-
-- Open Free Fire MAX and tap on the Firelink icon on the top right corner of the screen.
-- Select the option to link your Free Fire account with your Free Fire MAX account. You can choose to link your account via Facebook, Google, VK, or Huawei.
-- Log in with your credentials and confirm the linking process.
-- Once your accounts are linked, you can see a green check mark on the Firelink icon. You can also see your Free Fire nickname and ID on the top left corner of the screen.
-- Now you can play Free Fire MAX with Free Fire players. You can invite them to your lobby, join their lobby, or match with them randomly. You can also see their version of the game on their profile.
-
- If you want to switch back to the original Free Fire version, you can tap on the Firelink icon again and select the option to switch versions. You can do this anytime without losing your progress or data.
- Why You Should Play Free Fire MAX?
- Advantages
- There are many reasons why you should play Free Fire MAX, especially if you are looking for a more enhanced and immersive battle royale experience. Here are some of the advantages of playing Free Fire MAX:
-
-- You can enjoy a more realistic and stunning graphics quality, with Ultra HD resolutions and amazing effects. You can see every detail of the environment, the characters, and the weapons, making you feel like you are in a real battlefield.
-- You can customize your graphics settings according to your preference and device performance. You can choose from low, medium, high, or ultra settings, depending on how much detail and quality you want to see in the game. You can also enable or disable some features such as shadows, anti-aliasing, bloom, etc.
-- You can access some exclusive features and content that are not available in the original version, such as special lobby themes, emotes, skins, and more. You can also get some rewards and benefits for playing Free Fire MAX, such as coupons, diamonds, and crates.
-- You can play with other players who are using either version of the game, thanks to the Firelink feature. You can join your friends and squad up with them, regardless of which version of the game you are using. You can also participate in the same events, modes, and matches as the Free Fire players.
-
- Disadvantages
- However, playing Free Fire MAX also has some drawbacks that you should be aware of before downloading and installing the game. Here are some of the disadvantages of playing Free Fire MAX:
-
-- You need a higher device specification to run the game smoothly. The minimum requirement for Free Fire MAX is Android 4.4 or higher, 2 GB RAM or higher, dual core 1.2 GHz or higher CPU, and 1.5 GB storage or higher. If your device does not meet these requirements, you may not be able to download or install Free Fire MAX, or you may encounter some performance issues while playing the game.
-- You need a larger file size to download and install the game. The APK file size for Free Fire MAX is around 900 MB, which is much larger than the original Free Fire version (around 700 MB). You may need to free up some space on your device or use an external storage device to accommodate the game.
-- You may face some compatibility issues with some devices or regions. Some devices or regions may not support Free Fire MAX due to various reasons such as hardware limitations, software restrictions, network regulations , etc. You may need to check the compatibility of your device or region before downloading and installing Free Fire MAX, or you may need to use a VPN or other methods to bypass the restrictions.
-
- Conclusion
- Free Fire MAX is a great option for players who want to experience a more enhanced and immersive battle royale game. It offers a more realistic and stunning graphics quality, a more customizable graphics settings, and some exclusive features and content that are not available in the original version. It also allows you to play with other players who are using either version of the game, thanks to the Firelink feature.
- However, Free Fire MAX also has some drawbacks that you should be aware of before downloading and installing the game. It requires a higher device specification, a larger file size, and a compatible device or region to run the game smoothly. You may also encounter some performance issues or errors while playing the game, which you can try to solve by following some tips and tricks.
- If you are interested in trying Free Fire MAX, you can download and install the latest version (1.94.1) of the game on your Android device by following the steps we have provided in this article. You can also check out the latest updates, changes, and improvements in the game by reading this article.
- We hope that this article has helped you learn everything you need to know about Free Fire MAX. If you have any questions or feedback, feel free to leave a comment below. Happy gaming!
- FAQs
- Here are some of the frequently asked questions and their answers about Free Fire MAX:
-
-- Is Free Fire MAX free to play?
-Yes, Free Fire MAX is free to play, just like the original Free Fire version. You don't have to pay anything to download or install the game, or to access its features and content. However, you can choose to purchase some in-game items or services with real money if you want to enhance your gaming experience.
-- Can I play Free Fire MAX on PC?
-Yes, you can play Free Fire MAX on PC by using an Android emulator. An Android emulator is a software that allows you to run Android apps and games on your PC. You can use any Android emulator that supports Free Fire MAX, such as BlueStacks, LDPlayer, NoxPlayer, etc. You just need to download and install the emulator on your PC, then download and install Free Fire MAX on the emulator.
-- Can I transfer my data from Free Fire to Free Fire MAX?
-Yes, you can transfer your data from Free Fire to Free Fire MAX by using the Firelink feature. Firelink is a feature that connects your Free Fire account with your Free Fire MAX account, and lets you sync your game data, settings, and preferences across both versions. You just need to link your accounts by logging in with the same credentials on both versions.
-- Can I play both versions of the game on the same device?
-Yes, you can play both versions of the game on the same device, as long as your device meets the requirements for both versions. You can switch between the two versions by using the Firelink feature. However, you cannot run both versions at the same time on the same device, as it may cause some conflicts or errors.
-- How can I update Free Fire MAX?
-You can update Free Fire MAX by downloading and installing the latest version of the APK file from a trusted source. You can use this link to download the latest version of Free Fire MAX from APKPure. You just need to follow the same steps as downloading and installing the game for the first time.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fcakyon/zero-shot-video-classification/README.md b/spaces/fcakyon/zero-shot-video-classification/README.md
deleted file mode 100644
index b7c5e4880a879007c0a56405ce3fb58fc4372608..0000000000000000000000000000000000000000
--- a/spaces/fcakyon/zero-shot-video-classification/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Zero Shot Video Classification
-emoji: 🔥
-colorFrom: blue
-colorTo: pink
-sdk: gradio
-sdk_version: 3.12.0
-app_file: app.py
-pinned: true
-license: apache-2.0
-tags:
-- making-demos
----
\ No newline at end of file
diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/CarX Drift Racing 2 Money Mod APK Upgrade Your Cars and Customize Your Drifts.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/CarX Drift Racing 2 Money Mod APK Upgrade Your Cars and Customize Your Drifts.md
deleted file mode 100644
index 85ea6cfab2582931dd9c507f02ae2a7efce3f4ce..0000000000000000000000000000000000000000
--- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/CarX Drift Racing 2 Money Mod APK Upgrade Your Cars and Customize Your Drifts.md
+++ /dev/null
@@ -1,76 +0,0 @@
-
-CarX Drift Racing 2 APK Money: How to Download and Play the Best Drifting Game on Android
- If you are a fan of racing games, especially drifting games, you should not miss CarX Drift Racing 2. This is one of the most realistic and immersive drifting games on Android, with stunning graphics, physics, and sound effects. You can enjoy the thrill of drifting on various tracks, with different cars and customization options. You can also compete with other players online, or join a club and challenge other teams.
- But what if you want to get unlimited money in CarX Drift Racing 2, so you can buy and upgrade any car you want, without spending real money? Well, there is a way to do that, by downloading the CarX Drift Racing 2 APK Money mod. In this article, we will show you what is CarX Drift Racing 2, how to download and install the APK Money mod, how to play the game, and some tips and tricks to help you become a drifting master.
-carx drift racing 2 apk money
Download Zip ✶ https://gohhs.com/2uPppV
- What is CarX Drift Racing 2?
- CarX Drift Racing 2 is a sequel to the popular CarX Drift Racing game, developed by CarX Technologies. It is a racing game that focuses on drifting, which is a driving technique where the driver intentionally oversteers the car, causing it to slide sideways. Drifting requires skill and precision, as well as a good sense of speed and timing.
- Features of CarX Drift Racing 2
- Some of the features that make CarX Drift Racing 2 stand out from other racing games are:
-
-- Realistic physics and car behavior, based on the CarX Engine technology.
-- High-quality graphics and sound effects, with dynamic day-night cycle and weather conditions.
-- Over 100 cars from different brands and categories, such as sports cars, muscle cars, supercars, etc.
-- Thousands of customization options, including paint, vinyls, wheels, tires, suspension, engine, etc.
-- A variety of tracks and locations, from city streets to desert roads.
-- Multiple game modes and events, such as solo mode, online mode, career mode, club mode, etc.
-- A leaderboard system and a rating system, where you can compete with other players and earn rewards.
-- A replay system and a photo mode, where you can watch your best drifts and share them with others.
-
- How to download CarX Drift Racing 2 APK Money
- If you want to get unlimited money in CarX Drift Racing 2, you need to download the APK Money mod. This is a modified version of the original game file, that gives you access to unlimited resources. Here are the steps to download and install the APK Money mod:
-
-- Go to [this link](^1^) and download the APK file and the OBB file.
-- Go to your device settings and enable the installation of apps from unknown sources.
-- Go to your file manager and locate the downloaded files. Tap on the APK file and install it.
-- Copy the OBB file to the Android/OBB/com.carxtech.carxdr2 folder. If there is no such folder, create one.
-- Launch the game and enjoy unlimited money.
-
- How to play CarX Drift Racing 2
- Now that you have downloaded and installed the APK Money mod, you can start playing CarX Drift Racing 2. Here are some basic steps to help you get started:
- Choose your car and customize it
- The first thing you need to do is to choose your car. You can browse through the garage and select from over 100 cars, each with different stats and performance. You can also use the money you have to buy new cars or upgrade your existing ones.
- Once you have chosen your car, you can customize it to your liking. You can change the paint, vinyls, wheels, tires, suspension, engine, and more. You can also tune your car's settings, such as the steering angle, the brake force, the camber angle, etc. You can save your customizations as presets and switch between them easily.
- Learn the basics of drifting
- The next thing you need to do is to learn how to drift. Drifting is not easy, but it is very fun and rewarding. You need to master the balance between the throttle, the brake, the steering, and the handbrake. You also need to know when to initiate, maintain, and exit a drift.
-[CarX Drift Racing 2 MOD APK 1.16.0 (Unlimited Money) Download]
-[CarX Drift Racing 2 Mod Apk v1.16.0 (Unlimited Money) - ApkModPro]
-[CarX Drift Racing 2 Mod APK 1.16.0 - Download CarX Drift Racing ...]
-[CarX Drift Racing 2 MOD APK v1.16.0 (Unlimited Money) - APKMODY]
-[CarX Drift Racing 2 Mod Apk v1.16.0 (Unlimited Money) - ApkPalace]
- The game has a tutorial mode that teaches you the basics of drifting. You can also practice on different tracks and modes, such as training mode, freestyle mode, or time attack mode. You can also watch replays of other players or yourself, and learn from their mistakes or techniques.
- Compete in different modes and events
- The last thing you need to do is to compete in different modes and events. The game has a lot of options for you to challenge yourself and others. You can play in solo mode, where you can race against AI opponents or ghost cars. You can also play in online mode, where you can race against real players from around the world.
- The game also has a career mode, where you can progress through different stages and levels, and earn rewards and achievements. You can also join a club or create your own club, and compete with other clubs in club wars or club seasons. You can also participate in special events and tournaments, where you can win exclusive prizes and cars.
- Tips and tricks for CarX Drift Racing 2
- To help you improve your drifting skills and enjoy the game more, here are some tips and tricks for CarX Drift Racing 2:
- Adjust your settings and controls
- One of the most important things to do is to adjust your settings and controls according to your preference and device. You can choose from different control schemes, such as tilt, buttons, or steering wheel. You can also adjust the sensitivity and feedback of each control option. You can also change the camera angle and the sound volume.
- Use the handbrake and nitro wisely
- Another important thing to do is to use the handbrake and nitro wisely. The handbrake is useful for initiating or extending a drift, but it also reduces your speed and stability. The nitro is useful for boosting your speed and power, but it also consumes your fuel and overheats your engine. You need to find the right balance between using them and saving them for the right moments.
- Practice and improve your skills
- The last important thing to do is to practice and improve your skills. Drifting is not something that you can master overnight. It takes time and effort to learn how to drift smoothly and consistently. You need to practice on different tracks and cars, and learn how they behave and react. You also need to improve your timing, accuracy, angle, speed, and style.
- Conclusion
- CarX Drift Racing 2 is a great game for anyone who loves racing and drifting. It has realistic physics and graphics, a huge variety of cars and tracks, a lot of game modes and events, and a lot of fun and excitement. If you want to get unlimited money in CarX Drift Racing 2, you can download the APK Money mod from [this link] and follow the instructions above.
- We hope this article has helped you learn more about CarX Drift Racing 2 APK Money mod. If you have any questions or feedback, please let us know in the comments below. Happy drifting!
- FAQs
-
-- Is CarX Drift Racing 2 APK Money mod safe?
-Yes, CarX Drift Racing 2 APK Money mod is safe to use, as long as you download it from a trusted source and follow the installation steps correctly. However, you should be aware that using the mod may affect your game performance or compatibility, and may violate the game's terms of service. Use it at your own risk.
- - How can I get more money in CarX Drift Racing 2 without using the mod?
-If you don't want to use the APK Money mod, you can still get more money in CarX Drift Racing 2 by playing the game regularly and completing various tasks and challenges. You can also watch ads or make in-app purchases to get more money.
- - What are the best cars for drifting in CarX Drift Racing 2?
-The best cars for drifting in CarX Drift Racing 2 depend on your personal preference and style. However, some of the most popular and recommended cars for drifting are:
-
-- CarX E30 - A classic BMW model with good handling and balance.
-- CarX RX8 - A sporty Mazda model with high power and speed.
-- CarX S15 - A sleek Nissan model with great acceleration and stability.
-- CarX Mustang - A powerful Ford model with a lot of torque and drift potential.
-- CarX Supra - A legendary Toyota model with a high-performance engine and design.
-
- - How can I join or create a club in CarX Drift Racing 2?
-To join or create a club in CarX Drift Racing 2, you need to go to the club menu and tap on the join or create button. You can search for existing clubs by name, rating, or region, or you can create your own club by choosing a name, a logo, and a description. You can also invite other players to join your club or accept invitations from other clubs.
- - How can I share my replays or photos in CarX Drift Racing 2?
-To share your replays or photos in CarX Drift Racing 2, you need to go to the replay or photo mode and tap on the share button. You can choose from different options, such as saving to your device, uploading to YouTube, or sharing on social media. You can also edit your replays or photos before sharing them.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fffiloni/AnimateDiff-Image-Init/download_bashscripts/0-MotionModule.sh b/spaces/fffiloni/AnimateDiff-Image-Init/download_bashscripts/0-MotionModule.sh
deleted file mode 100644
index 8e2007ee6210f45e6f904ccecaad66eeff5e59ec..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/AnimateDiff-Image-Init/download_bashscripts/0-MotionModule.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-gdown 1RqkQuGPaCO5sGZ6V6KZ-jUWmsRu48Kdq -O models/Motion_Module/
-gdown 1ql0g_Ys4UCz2RnokYlBjyOYPbttbIpbu -O models/Motion_Module/
\ No newline at end of file
diff --git a/spaces/fffiloni/ControlVideo/inference.sh b/spaces/fffiloni/ControlVideo/inference.sh
deleted file mode 100644
index 2ff056ed060222067cdbc762dc087f71214b27fa..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/ControlVideo/inference.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-python inference.py \
- --prompt "A striking mallard floats effortlessly on the sparkling pond." \
- --condition "depth" \
- --video_path "data/mallard-water.mp4" \
- --output_path "outputs/" \
- --video_length 15 \
- --smoother_steps 19 20 \
- --width 512 \
- --height 512 \
- # --is_long_video
\ No newline at end of file
diff --git a/spaces/fffiloni/stable-diffusion-color-sketch/app.py b/spaces/fffiloni/stable-diffusion-color-sketch/app.py
deleted file mode 100644
index fe301a5863167c1398966294634f7ff48de0d888..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/stable-diffusion-color-sketch/app.py
+++ /dev/null
@@ -1,61 +0,0 @@
-import gradio as gr
-#import torch
-#from torch import autocast // only for GPU
-
-from PIL import Image
-import numpy as np
-from io import BytesIO
-import os
-MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')
-
-#from diffusers import StableDiffusionPipeline
-from diffusers import StableDiffusionImg2ImgPipeline
-
-print("hello sylvain")
-
-YOUR_TOKEN=MY_SECRET_TOKEN
-
-device="cpu"
-
-#prompt_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=YOUR_TOKEN)
-#prompt_pipe.to(device)
-
-img_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=YOUR_TOKEN)
-img_pipe.to(device)
-
-source_img = gr.Image(source="canvas", type="filepath", tool='color-sketch', label="new gradio color sketch")
-
-gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2], height="auto")
-
-def resize(value,img):
- #baseheight = value
- img = Image.open(img)
- #hpercent = (baseheight/float(img.size[1]))
- #wsize = int((float(img.size[0])*float(hpercent)))
- #img = img.resize((wsize,baseheight), Image.Resampling.LANCZOS)
- img = img.resize((value,value), Image.Resampling.LANCZOS)
- return img
-
-
-def infer(source_img, prompt):
-
- source_image = resize(512, source_img)
- source_image.save('source.png')
- images_list = img_pipe([prompt] * 2, init_image=source_image, strength=0.75)
- images = []
- safe_image = Image.open(r"unsafe.png")
- for i, image in enumerate(images_list["sample"]):
- if(images_list["nsfw_content_detected"][i]):
- images.append(safe_image)
- else:
- images.append(image)
- return images
-
-print("Great sylvain ! Everything is working fine !")
-
-title="Paint Stable Diffusion CPU"
-description="Img-2-Img Stable Diffusion example using CPU and the beta color-sketch gradio tool.
Warning: Slow process... ~5/10 min inference time. NSFW filter enabled."
-custom_css = "style.css"
-
-gr.Interface(fn=infer, inputs=[source_img, "text"], outputs=gallery,title=title,description=description,css=custom_css).queue(max_size=100).launch(enable_queue=True)
-
diff --git a/spaces/fracapuano/AISandbox/mailing/mailing.py b/spaces/fracapuano/AISandbox/mailing/mailing.py
deleted file mode 100644
index a40bb2a4f135e386270f48aaa8b5f8eb65b718c1..0000000000000000000000000000000000000000
--- a/spaces/fracapuano/AISandbox/mailing/mailing.py
+++ /dev/null
@@ -1,32 +0,0 @@
-import smtplib
-from email.mime.text import MIMEText
-from email.mime.multipart import MIMEMultipart
-from typing import Text, Union, Iterable
-
-
-def mailing_main(subject:Text, body:Text, to_address:Union[Text, Iterable[Text]]):
- """Sends the email with the given subject and body to the given address (accepts also list of addresses)."""
- # Mailing server configuration
- smtp_server = 'smtp.gmail.com.'
- smtp_port = 587
- sender_email = 'bainhackathon@gmail.com'
- sender_password = 'onyghfffdbmurjdf'
-
- # This creates the actual email message
- msg = MIMEMultipart()
- msg['From'] = sender_email
- msg['To'] = to_address
- msg['Subject'] = subject
- msg.attach(MIMEText(body, 'plain'))
-
- # Connects to SMTP server and then sends the actual email
- try:
- server = smtplib.SMTP(smtp_server, smtp_port)
- server.starttls()
- server.login(sender_email, sender_password)
- server.sendmail(sender_email, to_address, msg.as_string())
- server.quit()
- print("Email sent successfully!")
- except Exception as e:
- print("Error sending email:", e)
-
diff --git a/spaces/gagan3012/T5-Summarization/Makefile b/spaces/gagan3012/T5-Summarization/Makefile
deleted file mode 100644
index e8352210d475daa7a1dfff65e41bc84f05cd3a28..0000000000000000000000000000000000000000
--- a/spaces/gagan3012/T5-Summarization/Makefile
+++ /dev/null
@@ -1,95 +0,0 @@
-.PHONY: clean dirs virtualenv lint requirements push pull run
-
-#################################################################################
-# GLOBALS #
-#################################################################################
-
-PROJECT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
-PYTHON_INTERPRETER = python
-
-#################################################################################
-# COMMANDS #
-#################################################################################
-
-## Create virtualenv.
-## Activate with the command:
-## source env/bin/activate
-virtualenv:
- virtualenv -p $(PYTHON_INTERPRETER) env
-
-## Install Python Dependencies.
-## Make sure you activate the virtualenv first!
-requirements:
- $(PYTHON_INTERPRETER) -m pip install -U pip setuptools wheel
- $(PYTHON_INTERPRETER) -m pip install -r requirements.txt
-
-## Create directories that are ignored by git but required for the project
-dirs:
- mkdir -p data/raw data/processed models
-
-## Delete all compiled Python files
-clean:
- find . -type f -name "*.py[co]" -delete
- find . -type d -name "__pycache__" -delete
-
-## Lint using flake8
-lint:
- flake8 src
-
-## Upload Data to default DVC remote
-push:
- dvc push -r origin
-
-
-## Download Data from default DVC remote
-pull:
- dvc pull
-
-## run the DVC pipeline - recompute any modified outputs such as processed data or trained models
-run:
- dvc repro dvc.yaml
-
-#################################################################################
-# PROJECT RULES #
-#################################################################################
-
-
-
-#################################################################################
-# Self Documenting Commands #
-#################################################################################
-
-.DEFAULT_GOAL := help
-
-# Inspired by
-# sed script explained:
-# /^##/:
-# * save line in hold space
-# * purge line
-# * Loop:
-# * append newline + line to hold space
-# * go to next line
-# * if line starts with doc comment, strip comment character off and loop
-# * remove target prerequisites
-# * append hold space (+ newline) to line
-# * replace newline plus comments by `---`
-# * print line
-# Separate expressions are necessary because labels cannot be delimited by
-# semicolon; see
-.PHONY: help
-help:
- @echo "$$(tput bold)Available rules:$$(tput sgr0)"
- @echo
- @sed -n -e "/^## / Missing" $Missing \
- | LC_ALL='C' sort --ignore-case \
- | awk -F '---' \
- -v ncol=$$(tput cols) \
- -v indent=19 \
- -v col_on="$$(tput setaf 6)" \
- -v col_off="$$(tput sgr0)" \
- 'Missing \
- printf "%s ", words[i]; \
- } \
- printf "\n"; \
- }' \
- | more $(shell test $(shell uname) = Darwin && echo '--no-init --raw-control-chars')
diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/ops/contour_expand.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/ops/contour_expand.py
deleted file mode 100644
index ea1111e1768b5f27e118bf7dbc0d9c70a7afd6d7..0000000000000000000000000000000000000000
--- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/ops/contour_expand.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import numpy as np
-import torch
-
-from ..utils import ext_loader
-
-ext_module = ext_loader.load_ext('_ext', ['contour_expand'])
-
-
-def contour_expand(kernel_mask, internal_kernel_label, min_kernel_area,
- kernel_num):
- """Expand kernel contours so that foreground pixels are assigned into
- instances.
-
- Arguments:
- kernel_mask (np.array or Tensor): The instance kernel mask with
- size hxw.
- internal_kernel_label (np.array or Tensor): The instance internal
- kernel label with size hxw.
- min_kernel_area (int): The minimum kernel area.
- kernel_num (int): The instance kernel number.
-
- Returns:
- label (list): The instance index map with size hxw.
- """
- assert isinstance(kernel_mask, (torch.Tensor, np.ndarray))
- assert isinstance(internal_kernel_label, (torch.Tensor, np.ndarray))
- assert isinstance(min_kernel_area, int)
- assert isinstance(kernel_num, int)
-
- if isinstance(kernel_mask, np.ndarray):
- kernel_mask = torch.from_numpy(kernel_mask)
- if isinstance(internal_kernel_label, np.ndarray):
- internal_kernel_label = torch.from_numpy(internal_kernel_label)
-
- if torch.__version__ == 'parrots':
- if kernel_mask.shape[0] == 0 or internal_kernel_label.shape[0] == 0:
- label = []
- else:
- label = ext_module.contour_expand(
- kernel_mask,
- internal_kernel_label,
- min_kernel_area=min_kernel_area,
- kernel_num=kernel_num)
- label = label.tolist()
- else:
- label = ext_module.contour_expand(kernel_mask, internal_kernel_label,
- min_kernel_area, kernel_num)
- return label
diff --git a/spaces/giswqs/Streamlit/apps/xy.py b/spaces/giswqs/Streamlit/apps/xy.py
deleted file mode 100644
index 1ca2cef25553a8b1d52c19db0aac6c3ca37a6858..0000000000000000000000000000000000000000
--- a/spaces/giswqs/Streamlit/apps/xy.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import leafmap.foliumap as leafmap
-import pandas as pd
-import streamlit as st
-
-
-def app():
-
- st.title("Add Points from XY")
-
- sample_url = "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/world_cities.csv"
- url = st.text_input("Enter URL:", sample_url)
- m = leafmap.Map(locate_control=True, plugin_LatLngPopup=False)
-
- if url:
-
- try:
- df = pd.read_csv(url)
-
- columns = df.columns.values.tolist()
- row1_col1, row1_col2, row1_col3, row1_col4, row1_col5 = st.columns(
- [1, 1, 3, 1, 1]
- )
-
- lon_index = 0
- lat_index = 0
-
- for col in columns:
- if col.lower() in ["lon", "longitude", "long", "lng"]:
- lon_index = columns.index(col)
- elif col.lower() in ["lat", "latitude"]:
- lat_index = columns.index(col)
-
- with row1_col1:
- x = st.selectbox("Select longitude column", columns, lon_index)
-
- with row1_col2:
- y = st.selectbox("Select latitude column", columns, lat_index)
-
- with row1_col3:
- popups = st.multiselect("Select popup columns", columns, columns)
-
- with row1_col4:
- heatmap = st.checkbox("Add heatmap")
-
- if heatmap:
- with row1_col5:
- if "pop_max" in columns:
- index = columns.index("pop_max")
- else:
- index = 0
- heatmap_col = st.selectbox("Select heatmap column", columns, index)
- try:
- m.add_heatmap(df, y, x, heatmap_col)
- except:
- st.error("Please select a numeric column")
-
- try:
- m.add_points_from_xy(df, x, y, popups)
- except:
- st.error("Please select a numeric column")
-
- except Exception as e:
- st.error(e)
-
- m.to_streamlit()
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/BAHAN AJAR - Universitas Udayana[2].md b/spaces/gotiQspiryo/whisper-ui/examples/BAHAN AJAR - Universitas Udayana[2].md
deleted file mode 100644
index 67e2b8fdeafaa7e2dbba855732bbf718893c07ca..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/BAHAN AJAR - Universitas Udayana[2].md
+++ /dev/null
@@ -1,8 +0,0 @@
-
-Bismillah sya mau bertnya ya
*Mohon bantuannya*
1. Pada kondisi yang bagaimana anda menggunakan rancangan acak lengkap dalam suatu penelitian *
2. Pada kondisi yang bagaimana anda menggunakan rancangan acak kelompok dalam suatu penelian
3. Pada kondisi yang bagaimana anda menggunakan rancangan faktorial dalam suatu penelitian?
4. Berikan gambaran secara lengkap dan jelas (dijelaskan juga perlakuan, pengacakan dan kondisi/lokasi penelitiannya) suatu penelitian yang menggunakan rancangan acak lengkap? *
5. Berikan gambaran secara lengkap dan jelas (dijelaskan juga perlakuan, pengacakan dan kondisi/lokasi penelitiannya) suatu penelitian yang menggunakan rancangan acak kelompok? *
-Misalnya, sebagai contoh sederhana dari penelitian eksperimental, peneliti ingin mengetahui efektivitas penggunaan sistem alarm rokok di ruang publik untuk mengurangi konsumsi rokok di tempat umum. Beberapa ruang publik dibangun menjadi rokok, yang lain dengan fitur yang sama tidak dilengkapi dengan alarm rokok.
-contoh soal rancangan acak lengkap pdf to jpg
Download ✸✸✸ https://urlgoal.com/2uyNCg
-1Inventarisasi Hutan
- Petak Ukur
- Inventore Hutan dengan sampling revisi
- Invent Kuliah II
- Sampling
- Statistik Untuk Kehutanan
2Analisis Keanekaragaman Flora dan Fauna- Laporan pengamatan burung metode IPA
- Perhitungan Metode IPA
- Proyeksi Metode IPA
3Penyuluhan Kehutanan- Bab I
- Bab II
- Bab III
- Bab IV
- Bab V
- Mengukur Partisipasi
- Metode Penyuluhan Kehutanan
- Proses persiapan penyuluhan
4Pemanenan Hasil hutan- Muat Bongkar
- Penyaradan
- Penebangan
- Pembagian batang
- Perencanaan pembukaan wilayah hutan
- Sistem pemanenan kayu di hutan rawa tropika Indonesia
- Road contructions
- RIL
5Perlindungan Hutan- Ilmu hama hutan
- Pengendalian hama
- Penyakit hutan
- Gangguan hutan dari faktor abiotik
6Pengantar Konservasi Sumber Daya Hutan- Pengeertian dan sejarah konservasi
- Konservasi pada tingkat spesies dan populasi
- Bentuk kawasan dilindungi
- HCFC
- Overview
7Hutan Kota- Hutan Kota 1
- Hutan Kota 2
- Hutan Kota 3
- Hutan Kota 4
- Hutan kota 5
- Outline
- PP 63 th 2002 tentang Hutan kota
- tujuan pembangunan kota
- Urban Forest Planning
8Rancangan Percobaan- Contoh soal latihan rancob
- Percobaan dua faktor
- RAK
- Rancangan acak kelompok
- Rancangan acak lengkap
- Rancob All dosen revisi
9Perencanaan Hutan- Kuliah 1 Pendahuluan
- Kuliah 2 Unsur-unsur dan jenis perencanaan
- Kuliah 3 Perencanaan pengelolaan hutan berbasis ekosistem
- Kuliah 4 perencanaan Partisipatif
- Penyelesaian masalah Multikriteria Kehutanan dengan software
10Biometrika Hutan- Ilmu ukur kayu
- Penentuan volume sortimen
- Pengukuran dimensi tegakan
- Tabel tegakan
11Pengelolaan Daerah Aliran Sungai- Kuliah Das 1
- Kuliah Das 2
- Kuliah Das 3
- Kuliah Das 4
- Pengaruh perlakuan hutan terhadap kondisi hidrologi
12Klimatologi Pertanian- Ruang lingkup iklim
- Siklus udara
- Energi sumber penggerak iklim
- Suhu udara
- Evapotranspirasi dan Kelembaban
- Neraca radiasi
- Iklim tropis Indonesia, Sumatra, lampung
- Proses pembentukan awan dan hujan
- Iklim Global
- Kelembaban udara
13Agroforestry- Bahan Ajar Agroforestry 1
- Bahan Ajar Agroforestry 2
- Bahan Ajar Agroforestry 3
14Bioteknologi Kehutanan- Definisi dan sejarah
- Bioremediasi
- Mikoriza
- Endomikoriza
- Pemanfaatan mikroorganisme
- SNI
15Dasar-dasar Pengelolaan Kayu- Kayu lapis
- Lect11 Particleboard
- Lect12 Fiberboard
- Lect13 Pulp and Paper
16Pengelolaan Hutan Rakyat- Acara 1
- Acara 2
- Acara 3
- Acara 4
- Keanekaragaman jenis burung air
- Efektivitas penyerapan debu oleh daun
18Teknik Sampling- Kuliah 1
- Cluster Sampling
- Double Sampling
- Sampling Bertingkat
- Sistematik Sampling
- Statified Ramdom Sampling
19Sistem informasi Geografi- Kuliah 1
- Pembangunan berkelanjutan
20Fisika DasarBab 1 Penyusunan dan Penguraian VektorÂ
-Penelitian ini bertujuan untuk mengetahui pengaruh pembedaan kualitas konsentrat terhadap pertumbuhan ukuran-ukuran tubuh, bobot badan dan konsumsi pakan pedet FH betina lepas sapih. Penelitian ini menggunakan 12 ekor pedet FH betina lepas sapih, bobot badan rata-rata 84,25 ± 15,16 kg (CV= 18,62%), rata-rata tinggi pundak awal 90,56 ± 4,86 cm (CV= 5,73%), rata-rata panjang badan awal 75,15 ± 5,43 cm (CV= 2,23%), rata-rata lingkar dada awal 104,98 ± 5,87 cm (CV= 5,59%), rata-rata panjang tulang carpus awal 3,30 ± 0,61 cm (CV= 18,48%), rata-rata tulang metacarpus awal 15,79 ± 1,91 cm (CV= 7,45%). Pakan
penelitian yang digunakan adalah rumput gajah dan formulasi konsentrat I dan formulasi konsentrat II dengan perbedaan protein kasar (PK) dan total digestibel nutrien (TDN). Parameter yang diukur antara lain pertambahan bobot badan dan ukuran-ukuran tubuh meliputi lingkar dada (LD), panjang badan (PB), tinggi pundak (TP), panjang tulang carpus dan metacarpus, serta konsumsi pakan. Penganbilan sampel dilakukan secara purposive sampling berdasarkan umur pedet yaitu rata-rata 3,5 bulan. Rancangan percobaan menggunakan rancangan acak lengkap (RAL) dengan dua perlakuan dan masing-masing diulang enam kali. Semua data dianalisa secara statistik menggunakan uji t dengan peluang kesalahan 5%. Simpulan penelitian ini adalah bahwa formulasi konsentrat I dan formulasi konsentrat II dapat menjadi pakan bagi pedet FH betina lepas sapih dan layak untuk dijadikan calon induk pengganti.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Download Jungle Ki Sherni full movie in hindi in 3gp The story of a tigress of the forests and her tribal allies.md b/spaces/gotiQspiryo/whisper-ui/examples/Download Jungle Ki Sherni full movie in hindi in 3gp The story of a tigress of the forests and her tribal allies.md
deleted file mode 100644
index f39baf66ba2a93d608507f4fb38c847f5c53906d..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/Download Jungle Ki Sherni full movie in hindi in 3gp The story of a tigress of the forests and her tribal allies.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-Search www jungle ki sherni hot sexy 3gp video download comatrena videosd magi Photos
Search www jungle ki sherni hot sexy 3gp video download comatrena videosd magi XXX Videos
Search www jungle ki sherni hot sexy 3gp video download comatrena videosd magi HD Videos
Search www jungle ki sherni hot sexy 3gp video download comatrena videosd magi Indian Videos
Search www jungle ki sherni hot sexy 3gp video download comatrena videosd magi MP4 Videos
Search www jungle ki sherni hot sexy 3gp video download comatrena videosd magi Indian Images
Search www jungle ki sherni hot sexy 3gp video download comatrena videosd magi Leaked Videos
Search www jungle ki sherni hot sexy 3gp video download comatrena videosd magi Leaked Pics
Search www jungle ki sherni hot sexy 3gp video download comatrena videosd magi XXX Posts
-wapbold.com - is a free online porn tube portal, where can watch and dowload many free porn movies and porn videos, which is daily updated. So watch and download your favourite mobile porn here, at our wapbold porn site and don`t forget to bookmark us! See you at wapbold.com ;)
-download Jungle Ki Sherni full movie in hindi in 3gp
Download File ✒ https://urlgoal.com/2uyN4P
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/gulabpatel/GFP_GAN/PaperModel.md b/spaces/gulabpatel/GFP_GAN/PaperModel.md
deleted file mode 100644
index aec81d31de56df74c19ae840d44ad2b2a1f06d28..0000000000000000000000000000000000000000
--- a/spaces/gulabpatel/GFP_GAN/PaperModel.md
+++ /dev/null
@@ -1,76 +0,0 @@
-# Installation
-
-We now provide a *clean* version of GFPGAN, which does not require customized CUDA extensions. See [here](README.md#installation) for this easier installation.
-If you want want to use the original model in our paper, please follow the instructions below.
-
-1. Clone repo
-
- ```bash
- git clone https://github.com/xinntao/GFPGAN.git
- cd GFPGAN
- ```
-
-1. Install dependent packages
-
- As StyleGAN2 uses customized PyTorch C++ extensions, you need to **compile them during installation** or **load them just-in-time(JIT)**.
- You can refer to [BasicSR-INSTALL.md](https://github.com/xinntao/BasicSR/blob/master/INSTALL.md) for more details.
-
- **Option 1: Load extensions just-in-time(JIT)** (For those just want to do simple inferences, may have less issues)
-
- ```bash
- # Install basicsr - https://github.com/xinntao/BasicSR
- # We use BasicSR for both training and inference
- pip install basicsr
-
- # Install facexlib - https://github.com/xinntao/facexlib
- # We use face detection and face restoration helper in the facexlib package
- pip install facexlib
-
- pip install -r requirements.txt
- python setup.py develop
-
- # remember to set BASICSR_JIT=True before your running commands
- ```
-
- **Option 2: Compile extensions during installation** (For those need to train/inference for many times)
-
- ```bash
- # Install basicsr - https://github.com/xinntao/BasicSR
- # We use BasicSR for both training and inference
- # Set BASICSR_EXT=True to compile the cuda extensions in the BasicSR - It may take several minutes to compile, please be patient
- # Add -vvv for detailed log prints
- BASICSR_EXT=True pip install basicsr -vvv
-
- # Install facexlib - https://github.com/xinntao/facexlib
- # We use face detection and face restoration helper in the facexlib package
- pip install facexlib
-
- pip install -r requirements.txt
- python setup.py develop
- ```
-
-## :zap: Quick Inference
-
-Download pre-trained models: [GFPGANv1.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/GFPGANv1.pth)
-
-```bash
-wget https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/GFPGANv1.pth -P experiments/pretrained_models
-```
-
-- Option 1: Load extensions just-in-time(JIT)
-
- ```bash
- BASICSR_JIT=True python inference_gfpgan.py --model_path experiments/pretrained_models/GFPGANv1.pth --test_path inputs/whole_imgs --save_root results --arch original --channel 1
-
- # for aligned images
- BASICSR_JIT=True python inference_gfpgan.py --model_path experiments/pretrained_models/GFPGANv1.pth --test_path inputs/cropped_faces --save_root results --arch original --channel 1 --aligned
- ```
-
-- Option 2: Have successfully compiled extensions during installation
-
- ```bash
- python inference_gfpgan.py --model_path experiments/pretrained_models/GFPGANv1.pth --test_path inputs/whole_imgs --save_root results --arch original --channel 1
-
- # for aligned images
- python inference_gfpgan.py --model_path experiments/pretrained_models/GFPGANv1.pth --test_path inputs/cropped_faces --save_root results --arch original --channel 1 --aligned
- ```
diff --git a/spaces/gundruke/ua-thesis-absa/models/layers/__init__.py b/spaces/gundruke/ua-thesis-absa/models/layers/__init__.py
deleted file mode 100644
index 2ee872937f8b5718aaf0faacb6f22e024ec55a87..0000000000000000000000000000000000000000
--- a/spaces/gundruke/ua-thesis-absa/models/layers/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .CRF import CRF
diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/structures/image_list.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/structures/image_list.py
deleted file mode 100644
index 2d89224b64402badf7f0b113188b5f653df912ac..0000000000000000000000000000000000000000
--- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/structures/image_list.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
-from __future__ import division
-from typing import Any, List, Sequence, Tuple, Union
-import torch
-from torch.nn import functional as F
-
-
-class ImageList(object):
- """
- Structure that holds a list of images (of possibly
- varying sizes) as a single tensor.
- This works by padding the images to the same size,
- and storing in a field the original sizes of each image
-
- Attributes:
- image_sizes (list[tuple[int, int]]): each tuple is (h, w)
- """
-
- def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]):
- """
- Arguments:
- tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1
- image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can
- be smaller than (H, W) due to padding.
- """
- self.tensor = tensor
- self.image_sizes = image_sizes
-
- def __len__(self) -> int:
- return len(self.image_sizes)
-
- def __getitem__(self, idx: Union[int, slice]) -> torch.Tensor:
- """
- Access the individual image in its original size.
-
- Returns:
- Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1
- """
- size = self.image_sizes[idx]
- return self.tensor[idx, ..., : size[0], : size[1]] # type: ignore
-
- def to(self, *args: Any, **kwargs: Any) -> "ImageList":
- cast_tensor = self.tensor.to(*args, **kwargs)
- return ImageList(cast_tensor, self.image_sizes)
-
- @property
- def device(self) -> torch.device:
- return self.tensor.device
-
- @staticmethod
- def from_tensors(
- tensors: Sequence[torch.Tensor], size_divisibility: int = 0, pad_value: float = 0.0
- ) -> "ImageList":
- """
- Args:
- tensors: a tuple or list of `torch.Tensors`, each of shape (Hi, Wi) or
- (C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded
- to the same shape with `pad_value`.
- size_divisibility (int): If `size_divisibility > 0`, add padding to ensure
- the common height and width is divisible by `size_divisibility`.
- This depends on the model and many models need a divisibility of 32.
- pad_value (float): value to pad
-
- Returns:
- an `ImageList`.
- """
- assert len(tensors) > 0
- assert isinstance(tensors, (tuple, list))
- for t in tensors:
- assert isinstance(t, torch.Tensor), type(t)
- assert t.shape[1:-2] == tensors[0].shape[1:-2], t.shape
- # per dimension maximum (H, W) or (C_1, ..., C_K, H, W) where K >= 1 among all tensors
- max_size = (
- # In tracing mode, x.shape[i] is Tensor, and should not be converted
- # to int: this will cause the traced graph to have hard-coded shapes.
- # Instead we should make max_size a Tensor that depends on these tensors.
- # Using torch.stack twice seems to be the best way to convert
- # list[list[ScalarTensor]] to a Tensor
- torch.stack(
- [
- torch.stack([torch.as_tensor(dim) for dim in size])
- for size in [tuple(img.shape) for img in tensors]
- ]
- )
- .max(0)
- .values
- )
-
- if size_divisibility > 0:
- stride = size_divisibility
- # the last two dims are H,W, both subject to divisibility requirement
- max_size = torch.cat([max_size[:-2], (max_size[-2:] + (stride - 1)) // stride * stride])
-
- image_sizes = [tuple(im.shape[-2:]) for im in tensors]
-
- if len(tensors) == 1:
- # This seems slightly (2%) faster.
- # TODO: check whether it's faster for multiple images as well
- image_size = image_sizes[0]
- padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]]
- if all(x == 0 for x in padding_size): # https://github.com/pytorch/pytorch/issues/31734
- batched_imgs = tensors[0].unsqueeze(0)
- else:
- padded = F.pad(tensors[0], padding_size, value=pad_value)
- batched_imgs = padded.unsqueeze_(0)
- else:
- # max_size can be a tensor in tracing mode, therefore use tuple()
- batch_shape = (len(tensors),) + tuple(max_size)
- batched_imgs = tensors[0].new_full(batch_shape, pad_value)
- for img, pad_img in zip(tensors, batched_imgs):
- pad_img[..., : img.shape[-2], : img.shape[-1]].copy_(img)
-
- return ImageList(batched_imgs.contiguous(), image_sizes)
diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/tools/deploy/README.md b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/tools/deploy/README.md
deleted file mode 100644
index b9d5b15512c0bd160accbb1823236b8954a37b86..0000000000000000000000000000000000000000
--- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/tools/deploy/README.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-This directory contains:
-
-1. A script that converts a detectron2 model to caffe2 format.
-
-2. An example that loads a Mask R-CNN model in caffe2 format and runs inference.
-
-See [tutorial](https://detectron2.readthedocs.io/tutorials/deployment.html)
-for their usage.
diff --git a/spaces/hf-audio/open_asr_leaderboard/init.py b/spaces/hf-audio/open_asr_leaderboard/init.py
deleted file mode 100644
index 2e24a0345a30dd3eb1a79616f81a65efa36a1956..0000000000000000000000000000000000000000
--- a/spaces/hf-audio/open_asr_leaderboard/init.py
+++ /dev/null
@@ -1,93 +0,0 @@
-import os
-from constants import EVAL_REQUESTS_PATH
-from pathlib import Path
-from huggingface_hub import HfApi, Repository
-
-TOKEN_HUB = os.environ.get("TOKEN_HUB", None)
-QUEUE_REPO = os.environ.get("QUEUE_REPO", None)
-QUEUE_PATH = os.environ.get("QUEUE_PATH", None)
-
-hf_api = HfApi(
- endpoint="https://huggingface.co",
- token=TOKEN_HUB,
-)
-
-def load_all_info_from_dataset_hub():
- eval_queue_repo = None
- results_csv_path = None
- requested_models = None
-
- passed = True
- if TOKEN_HUB is None:
- passed = False
- else:
- print("Pulling evaluation requests and results.")
-
- eval_queue_repo = Repository(
- local_dir=QUEUE_PATH,
- clone_from=QUEUE_REPO,
- use_auth_token=TOKEN_HUB,
- repo_type="dataset",
- )
- eval_queue_repo.git_pull()
-
- # Local directory where dataset repo is cloned + folder with eval requests
- directory = QUEUE_PATH / EVAL_REQUESTS_PATH
- requested_models = get_all_requested_models(directory)
- requested_models = [p.stem for p in requested_models]
- # Local directory where dataset repo is cloned
- csv_results = get_csv_with_results(QUEUE_PATH)
- if csv_results is None:
- passed = False
- if not passed:
- print("No HuggingFace token provided. Skipping evaluation requests and results.")
-
- return eval_queue_repo, requested_models, csv_results
-
-
-def upload_file(requested_model_name, path_or_fileobj):
- dest_repo_file = Path(EVAL_REQUESTS_PATH) / path_or_fileobj.name
- dest_repo_file = str(dest_repo_file)
- hf_api.upload_file(
- path_or_fileobj=path_or_fileobj,
- path_in_repo=str(dest_repo_file),
- repo_id=QUEUE_REPO,
- token=TOKEN_HUB,
- repo_type="dataset",
- commit_message=f"Add {requested_model_name} to eval queue")
-
-def get_all_requested_models(directory):
- directory = Path(directory)
- all_requested_models = list(directory.glob("*.txt"))
- return all_requested_models
-
-def get_csv_with_results(directory):
- directory = Path(directory)
- all_csv_files = list(directory.glob("*.csv"))
- latest = [f for f in all_csv_files if f.stem.endswith("latest")]
- if len(latest) != 1:
- return None
- return latest[0]
-
-
-
-def is_model_on_hub(model_name, revision="main") -> bool:
- try:
- model_name = model_name.replace(" ","")
- author = model_name.split("/")[0]
- model_id = model_name.split("/")[1]
- if len(author) == 0 or len(model_id) == 0:
- return False, "is not a valid model name. Please use the format `author/model_name`."
- except Exception as e:
- return False, "is not a valid model name. Please use the format `author/model_name`."
-
- try:
- models = list(hf_api.list_models(author=author, search=model_id))
- matched = [model_name for m in models if m.modelId == model_name]
- if len(matched) != 1:
- return False, "was not found on the hub!"
- else:
- return True, None
- except Exception as e:
- print(f"Could not get the model from the hub.: {e}")
- return False, "was not found on hub!"
\ No newline at end of file
diff --git a/spaces/hhalim/hadi_first_day_in_HF/README.md b/spaces/hhalim/hadi_first_day_in_HF/README.md
deleted file mode 100644
index f71c508603356006ad9676a0602d35969180425e..0000000000000000000000000000000000000000
--- a/spaces/hhalim/hadi_first_day_in_HF/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Hadi First Day In HF
-emoji: 🐠
-colorFrom: green
-colorTo: green
-sdk: gradio
-sdk_version: 3.15.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/hra/Curriculum-BabyAGI/app.py b/spaces/hra/Curriculum-BabyAGI/app.py
deleted file mode 100644
index 74214146d6f8eb8d4e945c180f1e5d5280afef6b..0000000000000000000000000000000000000000
--- a/spaces/hra/Curriculum-BabyAGI/app.py
+++ /dev/null
@@ -1,116 +0,0 @@
-import json
-import openai
-import os
-
-import pandas as pd
-
-import gradio as gr
-from collections import deque
-from typing import Dict, List, Optional, Any
-
-from langchain import LLMChain, OpenAI, PromptTemplate
-import datetime
-from datetime import datetime, date, time, timedelta
-from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, Document, ServiceContext
-from langchain.llms import OpenAIChat
-
-import feedparser
-import pandas as pd
-import numpy as np
-
-from duckduckgo_search import ddg_videos
-from duckduckgo_search import ddg
-
-def get_learning_curriculum(openapikey,topic):
- dateforfilesave=datetime.today().strftime("%d-%m-%Y %I:%M%p")
- print(topic)
- print(dateforfilesave)
- if openapikey=='':
- return pd.DataFrame(["Please provide OpenAPI Key"],columns=['ERROR'])
-
- os.environ['OPENAI_API_KEY'] = str(openapikey)
-
-
- ###Task Creation Agent
-
- prompt='You are a training center AI. Give me a detailed curriculum to learn about "{topicforquery}" using search. The curriculum will be a series of learning tasks to be achieved. Give output as a python list of jsons with "task name", "search keyword" to search to complete the task. Donot repeat the taks. For each task name also add a list of "questions" to ask the search results data to select specific articles and complete the curriculum. Remember the search list will be a dataframe of titles & body of the searched article and you may not be able to go through the full article hence these questions should be of types "Which article best suits a learning curriculum?", "Which article is learning oriented?. To reiterate output should be in json with keys task name ex: get beginner training articles for painting, search keyword ex: beginner painting & questions ex: What are top articles for painting?'.format(topicforquery=topic)
- openai.api_key = os.getenv("OPENAI_API_KEY")
- resp=openai.ChatCompletion.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "user", "content": prompt}
- ]
- )
- tasklist=json.loads(resp['choices'][0]['message']['content'])
-
- ###Function to search the internet using DuckDuckGo exposed as a tool
- def research_search(search_keyword,question_to_ask,topic):
- llm_predictor = LLMPredictor(llm=OpenAIChat(temperature=0, model_name="gpt-3.5-turbo"))
- service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
- keyword=search_keyword
- keyword="+".join(keyword.lower().split())
- keyword=keyword.replace(' and ',' AND ')
- posts = ddg(keyword+' '+topic, safesearch='Off', page=1)
- latestnews_df=pd.DataFrame(posts)
- print(latestnews_df.columns)
- #latestnews_df=latestnews_df.drop_duplicates(subset=['title','link','published'])
- latestnews_df['text']='Title: '+latestnews_df['title']+' Description: '+latestnews_df['body']
- print(latestnews_df['text'].tolist())
- documents=[Document(t) for t in latestnews_df['text'].tolist()]
- index = GPTSimpleVectorIndex.from_documents(documents)
- prompt_query=question_to_ask
- respstr=str(index.query(prompt_query,
- service_context=service_context,
- response_mode="tree_summarize",
- similarity_top_k=10))
- print("Search response: ",respstr)
- return respstr
-
- ###Task Execution Agent loop
- list1=[]
- list2=[]
- list3=[]
- for i in range(len(tasklist)):
- taskstuff=tasklist[i]
- search_keyword=taskstuff['search keyword']
- for question in taskstuff['questions']:
- response_string=research_search(search_keyword,question,topic)
- list1.append(taskstuff['task name'])
- list2.append(question)
- list3.append(response_string)
-
- ###Create dataframe to display
- outputdf=pd.DataFrame()
- outputdf['Task']=list1
- outputdf['Question']=list2
- outputdf['Learning']=list3
-
- return outputdf
-
-with gr.Blocks() as demo:
- gr.Markdown("BabyAGI creates Learning Curriculum
")
- gr.Markdown(
- """ This is the first step of an experiment using BabyAGI as a "framework" to construct focused use cases (ex: learning curriculums). The flow uses two AI agents 1) Task creation agent: to create a task list & questions 2) Task execution agent: to execute the tasks & find answers to the questions. Unlike the original BabyAGI concept, this is not open-ended. \n\nNote: This is a series of experiments to understand AI agents and hence do check the quality of output. OpenAI agents (gpt-3.5-turbo), llama-index & DuckDuckGo search are used. The analysis takes roughly 120 secs & may not always be consistent. An error occurs when the OpenAI Api key is not provided/ ChatGPT API is overloaded/ ChatGPT is unable to correctly decipher & format the output\n\n Future directions: 1) Make the task creation more open-ended or longer. 2) Discover multiple learning paths and make ChatGPT introspect on them before finalizing the optimal one 3)Learn from the answers and change the curriculum"""
- )
-
- with gr.Row() as row:
- with gr.Column():
- textboxtopic = gr.Textbox(placeholder="Enter Topic for Curriculum...", lines=1,label='Topic')
- with gr.Column():
- textboxopenapi = gr.Textbox(placeholder="Enter OpenAPI Key...", lines=1,label='OpenAPI Key')
- with gr.Row() as row:
- examples = gr.Examples(examples=['Acrylic painting','Generative AI','latest NLP topic models','FIFA mobile game','Telemedicine'],
- inputs=[textboxtopic])
- with gr.Row() as row:
- btn = gr.Button("Generate \nCurriculum")
-
- with gr.Row() as row:
- table1=gr.Dataframe(
- #headers=["Item", "Cost"],
- #datatype=["str", "str","str"],
- label="Learning Curriculum",
- )
-
- btn.click(get_learning_curriculum, inputs=[textboxopenapi,textboxtopic],outputs=[table1])
-
-demo.launch(debug=True)
\ No newline at end of file
diff --git a/spaces/hysts/zeroscope-v2/README.md b/spaces/hysts/zeroscope-v2/README.md
deleted file mode 100644
index 803a683fa3488ffd9086df37d9f8237e98777b9d..0000000000000000000000000000000000000000
--- a/spaces/hysts/zeroscope-v2/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Zeroscope V2
-emoji: 🌖
-colorFrom: gray
-colorTo: purple
-sdk: gradio
-sdk_version: 3.45.2
-app_file: app.py
-pinned: false
-license: mit
-suggested_hardware: t4-small
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/idlsono/Idksono4/README.md b/spaces/idlsono/Idksono4/README.md
deleted file mode 100644
index 484f63344c33755efc6687ddcb752305dc60f3f7..0000000000000000000000000000000000000000
--- a/spaces/idlsono/Idksono4/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Lolcats39
-emoji: 🌍
-colorFrom: yellow
-colorTo: purple
-sdk: docker
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/imseldrith/BotX/Uploader/config.py b/spaces/imseldrith/BotX/Uploader/config.py
deleted file mode 100644
index e97d8fce7dc3879ab97081ed271023b9420ad602..0000000000000000000000000000000000000000
--- a/spaces/imseldrith/BotX/Uploader/config.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# MIT License
-
-# Copyright (c) 2022 Hash Minner
-
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE
-
-import os
-
-import logging
-
-logging.basicConfig(
- format='%(name)s - %(levelname)s - %(message)s',
- handlers=[logging.FileHandler('log.txt'),
- logging.StreamHandler()],
- level=logging.INFO
-)
-
-
-class Config(object):
- WEBHOOK = os.environ.get("BOT_TOKEN", False)
- # Get a token from @BotFather
- BOT_TOKEN = os.environ.get("BOT_TOKEN", "")
- # The Telegram API things
- API_ID = int(os.environ.get("API_ID", 12345))
- API_HASH = os.environ.get("API_HASH")
- # Get these values from my.telegram.org
- # Array to store users who are authorized to use the bot
-
- # File /video download location
- DOWNLOAD_LOCATION = "./DOWNLOADS"
-
- MEGA_EMAIL = os.environ.get("MEGA_EMAIL", "None")
- # If deploying on vps edit the above value as example := Mega_email = "Your-Mega_email-inside-inverted-commas."
-
- # This is not necessary! Enter your mega password only if you have a mega.nz account with pro/business features.
- MEGA_PASSWORD = os.environ.get("MEGA_PASSWORD", "None")
- # If deploying on vps edit the above value as example := Mega_password = "Your-Mega_password-inside-inverted-commas."
- # Telegram maximum file upload size
- TG_MAX_FILE_SIZE = 4194304000
-
- # Chunk size that should be used with requests
- CHUNK_SIZE = int(os.environ.get("CHUNK_SIZE", 128))
- # Proxy for accessing youtube-dl in GeoRestricted Areas
- # Get your own proxy from https://github.com/rg3/youtube-dl/issues/1091#issuecomment-230163061
- HTTP_PROXY = os.environ.get("HTTP_PROXY", "")
-
- # Set timeout for subprcess
- PROCESS_MAX_TIMEOUT = 3700
-
- LOG_CHANNEL = int(os.environ.get("LOG_CHANNEL", -100))
- OWNER_ID = int(os.environ.get("OWNER_ID", "12356"))
- BOT_USERNAME = os.environ.get("BOT_USERNAME", "")
- ADL_BOT_RQ = {}
- AUTH_USERS = list({int(x)
- for x in os.environ.get("AUTH_USERS", "0").split()})
- AUTH_USERS.append(OWNER_ID)
diff --git a/spaces/inamXcontru/PoeticTTS/Californication S06e01 Hdtv X264 Evolve English Subtitles Free Dvd Download.md b/spaces/inamXcontru/PoeticTTS/Californication S06e01 Hdtv X264 Evolve English Subtitles Free Dvd Download.md
deleted file mode 100644
index dbae6f903e7eb3c62cb40762a0eebd4f90ddc89e..0000000000000000000000000000000000000000
--- a/spaces/inamXcontru/PoeticTTS/Californication S06e01 Hdtv X264 Evolve English Subtitles Free Dvd Download.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Californication S06e01 Hdtv X264 Evolve English Subtitles : Free Dvd
Download ->->->-> https://gohhs.com/2uz4xY
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/7 Data Recovery Crack Serial Key.md b/spaces/inplisQlawa/anything-midjourney-v4-1/7 Data Recovery Crack Serial Key.md
deleted file mode 100644
index 5491205c9b24c2ec4ea7fa3bda21afac80478bc9..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/7 Data Recovery Crack Serial Key.md
+++ /dev/null
@@ -1,6 +0,0 @@
-7 Data Recovery Crack Serial Key
Download File ✒ ✒ ✒ https://urlin.us/2uEwO3
-
- 4fefd39f24
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Crack _VERIFIED_ed Working Digital Anarchy Backdrop Designer V1 2 2 For Photoshop Rar.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Crack _VERIFIED_ed Working Digital Anarchy Backdrop Designer V1 2 2 For Photoshop Rar.md
deleted file mode 100644
index e1fbcb942a7e55392aeabae31da038aaa4d0f625..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Crack _VERIFIED_ed Working Digital Anarchy Backdrop Designer V1 2 2 For Photoshop Rar.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Cracked Working Digital Anarchy Backdrop Designer v1 2 2 for Photoshop rar
Download ->->->-> https://urlin.us/2uEvOs
-
-Oct 19, 2020 · Digital Anarchy has released Flicker Free 2. ... in this section is stacked with pro video design features, with great visuals to choose from, ... Roll - Creative COW's user support and discussion forum for users of Adobe After Effects. rar. ... 3 CE for After Effects and Premiere Pro + Crack Anarchy Flicker Free 1. 4d29de3e1b
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Mere Brother Ki Dulhan 3 1080p Full Movie Download PATCHED.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Mere Brother Ki Dulhan 3 1080p Full Movie Download PATCHED.md
deleted file mode 100644
index 2cab3dc3e81a17344f822c68505f24ab4eded080..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Mere Brother Ki Dulhan 3 1080p Full Movie Download PATCHED.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Mere Brother Ki Dulhan 3 1080p full movie download
Download Zip ✅ https://urlin.us/2uEvN8
-
-Mere Brother Ki Dulhan. Saved by Anja India. 2. 2011 MoviesHd MoviesMovie TvMp3 Song DownloadFull Movies DownloadMere Brother Ki DulhanLatest ... Housefull 3 Movie is an Indien movie. ... Hetal PanchalFull movies download ... Read reviews and buy xXx: Return Of Xander Cage (Blu-ray+ DVD + Digital) at Target. 4d29de3e1b
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Moyea Swf To Video Converter Pro 4.0.0.1 Cracked.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Moyea Swf To Video Converter Pro 4.0.0.1 Cracked.md
deleted file mode 100644
index a33a4bd7cab7d4594fb92617025f700cee8ca08b..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Moyea Swf To Video Converter Pro 4.0.0.1 Cracked.md
+++ /dev/null
@@ -1,6 +0,0 @@
-moyea swf to video converter pro 4.0.0.1 cracked
DOWNLOAD ⇒⇒⇒ https://urlin.us/2uEweX
-
-Listen to Moyea Swf To Video Converter Pro 4001 Crack and thirty-nine more episodes by Assignment 3.pdf, free! No signup or install.... Flv 2 video converter ... 1fdad05405
-
-
-
diff --git a/spaces/inreVtussa/clothingai/Examples/AutoCAD Civil 3D 2020.1 Crack With Product Key Full [PORTABLE] Free Download.md b/spaces/inreVtussa/clothingai/Examples/AutoCAD Civil 3D 2020.1 Crack With Product Key Full [PORTABLE] Free Download.md
deleted file mode 100644
index 1124a0d58fd14f1f47096bcde3b219a280f1eb5e..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/AutoCAD Civil 3D 2020.1 Crack With Product Key Full [PORTABLE] Free Download.md
+++ /dev/null
@@ -1,7 +0,0 @@
-
- autocad 2020 crack has many other features like snapline, dynamic views, deskew, fillet, measure, wall cutting and more. it includes a brand new toolset for exporting drawings. the new toolset for autocad civil 3d includes the ability to save a new drawing with a template. the ability to save a new drawing with a template is possible for the first time. this new template-based drawing creation saves time by allowing you to save a new drawing with just a few clicks of the mouse. the template-based drawing creation helps users save time by creating drawings in a template-based drawing. this feature allows users to save a new drawing with just a few clicks of the mouse and specify a drawing template.
-AutoCAD Civil 3D 2020.1 Crack With Product Key Full Free Download
Download Zip ✶ https://tiurll.com/2uCiX0
- autocad civil 3d 2020.1 crack has many other features like 2d and 3d graphics, gis, measure, solid modeling and many other features and improvements included in this version release. you can work collaboratively with your colleagues and customers. you can share data with them and talk to them about your projects. you can make changes in the same drawing while working on a version. you can open and close multiple drawings simultaneously.
- autocad crack also allows the user to create a presentation with a new custom presentation, called the streamlined presentation. streamlined presentation is designed to make it easy for users to create a presentation. you can share content with other users and with clients. you can also open multiple drawings at once. it lets you organize your drawings and models. autocad civil 3d 2020.1 crack provides a new method to print from autocad civil 3d by using a network printer. this method uses the network printer or the winprinter in windows to print your drawings directly to a network printer. when network printing is available, you can choose to use the network printer or the winprinter to print your drawings. then, the print settings are stored as a filter to the network printer or winprinter. when the filter is applied in the autocad civil 3d printer, you can print from autocad civil 3d directly to the network printer or winprinter.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/inreVtussa/clothingai/Examples/Bhugol Aur Aap Pdf Download REPACK.md b/spaces/inreVtussa/clothingai/Examples/Bhugol Aur Aap Pdf Download REPACK.md
deleted file mode 100644
index 52173559576554396a0fc209375eea7fb8095f9f..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Bhugol Aur Aap Pdf Download REPACK.md
+++ /dev/null
@@ -1,10 +0,0 @@
-bhugol aur aap pdf download
Download File →→→ https://tiurll.com/2uCiIv
-
-July 4, 2020 - "Geography" and "You", commonly known as G'nY (in English) and its counterpart Bhugol Aur Aap ( in Hindi), published twice a month. magazines that ... read more
-July 3, 2020 - Aaj Kal (in English) and Bhugol Aur Aap (in Hindi) are released every month, which means you can enjoy two magazines at the same time.
-If you want to see everything that goes into our magazines and see how our ... read more
-June 24, 2020 - This month we will show you four new magazines and will continue to show you more every month.
-We will only show you four magazines and the rest will be stored in ... read more 8a78ff9644
-
-
-
diff --git a/spaces/inreVtussa/clothingai/Examples/Brooklyn Nine-Nine Season 1-2 Complete 1080p Plus Subtitles [rarbg][rartv] UPD.md b/spaces/inreVtussa/clothingai/Examples/Brooklyn Nine-Nine Season 1-2 Complete 1080p Plus Subtitles [rarbg][rartv] UPD.md
deleted file mode 100644
index cb7e9e7cb50c7be0b2b0a0863d1106cd76a208f0..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Brooklyn Nine-Nine Season 1-2 Complete 1080p Plus Subtitles [rarbg][rartv] UPD.md
+++ /dev/null
@@ -1,17 +0,0 @@
-
-How to Watch Brooklyn Nine-Nine Season 1-2 Complete 1080p Plus Subtitles [rarbg][rartv]
-Brooklyn Nine-Nine is a hilarious comedy series that follows the lives of an eclectic group of detectives in a New York precinct. The show has won several awards, including two Golden Globes and four Emmys. If you are a fan of Brooklyn Nine-Nine, you might be wondering how to watch the first two seasons of the show in high quality and with subtitles.
-In this article, we will show you how to watch Brooklyn Nine-Nine Season 1-2 Complete 1080p Plus Subtitles [rarbg][rartv], which is a torrent file that contains all the episodes of the first two seasons in 1080p resolution and with subtitles in various languages. We will also explain what rarbg and rartv are, and why they are popular among torrent users.
-Brooklyn Nine-Nine Season 1-2 Complete 1080p Plus Subtitles [rarbg][rartv]
DOWNLOAD ✏ https://tiurll.com/2uCliL
-What are rarbg and rartv?
-rarbg and rartv are two of the most popular torrent sites on the internet. They offer a wide range of movies, TV shows, games, music, and software for free download. They are known for their high-quality releases, fast download speeds, and reliable seeding. They also provide subtitles for many of their releases, which is very convenient for non-English speakers or hearing-impaired viewers.
-rarbg and rartv are not official sources of Brooklyn Nine-Nine, and they do not have any affiliation with the creators or distributors of the show. They are simply providing a service for people who want to watch the show without paying for it or waiting for it to be available on streaming platforms. However, downloading or streaming content from torrent sites may be illegal in some countries, so you should be careful and use a VPN (virtual private network) to protect your privacy and security.
-How to watch Brooklyn Nine-Nine Season 1-2 Complete 1080p Plus Subtitles [rarbg][rartv]?
-To watch Brooklyn Nine-Nine Season 1-2 Complete 1080p Plus Subtitles [rarbg][rartv], you will need a torrent client, such as uTorrent or BitTorrent, which will allow you to download the torrent file and connect to other peers who have the same file. You can find the torrent file by searching for it on rarbg or rartv's websites, or by using a torrent search engine like Torrentz2 or 1337x.
-Once you have downloaded the torrent file, you can open it with your torrent client and start downloading the episodes. The download speed will depend on your internet connection and the number of seeders (people who have the complete file and are sharing it). The more seeders there are, the faster the download will be.
-After you have downloaded all the episodes, you can watch them on your computer or transfer them to your TV or mobile device. You can use any media player that supports MKV files, such as VLC or MPC-HC. The subtitles should be embedded in the video files, so you can choose your preferred language from the menu. If not, you can download separate subtitle files from opensubtitles.com[^1^] or subtitlevid.com[^2^] and load them manually.
-
-Conclusion
-Brooklyn Nine-Nine Season 1-2 Complete 1080p Plus Subtitles [rarbg][rartv] is a great way to enjoy one of the best comedy shows of recent years in high quality and with subtitles. However, you should be aware of the legal and ethical implications of downloading or streaming content from torrent sites, and use a VPN to protect yourself from potential risks. Alternatively, you can watch Brooklyn Nine-Nine legally on streaming platforms like Netflix or Hulu, or buy the DVDs or Blu-rays from official sources.
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/izumi-lab/llama-13b-japanese-lora-v0-1ep/README.md b/spaces/izumi-lab/llama-13b-japanese-lora-v0-1ep/README.md
deleted file mode 100644
index 3666c04df9e84d8f96e743091a0714d19b13b346..0000000000000000000000000000000000000000
--- a/spaces/izumi-lab/llama-13b-japanese-lora-v0-1ep/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: LLaMA 13B Japanese LoRA v0 1 epoch
-emoji: 🐨
-colorFrom: gray
-colorTo: gray
-sdk: docker
-pinned: false
-license: other
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/jackyccl/segment-anything/groundingdino/util/__init__.py b/spaces/jackyccl/segment-anything/groundingdino/util/__init__.py
deleted file mode 100644
index 168f9979a4623806934b0ff1102ac166704e7dec..0000000000000000000000000000000000000000
--- a/spaces/jackyccl/segment-anything/groundingdino/util/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
diff --git a/spaces/jeffrymahbuubi/foodvision-mini/app.py b/spaces/jeffrymahbuubi/foodvision-mini/app.py
deleted file mode 100644
index 4cd715f723f6f42cc3c86fd54d25df129d61bc3f..0000000000000000000000000000000000000000
--- a/spaces/jeffrymahbuubi/foodvision-mini/app.py
+++ /dev/null
@@ -1,75 +0,0 @@
-
-### 1. Imports and class names setup ###
-import gradio as gr
-import os
-import torch
-
-from model import create_effnetb2_model
-from timeit import default_timer as timer
-from typing import Tuple, Dict
-
-# Setup class names
-class_names = ['pizza', 'steak', 'sushi']
-
-### 2. Model and transforms prepration ###
-effnetb2, effnetb2_tranforms = create_effnetb2_model(
- num_classes=3
-)
-
-# Load saved weights
-effnetb2.load_state_dict(
- torch.load(
- f"09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth",
- map_location=torch.device('cpu')
- )
-)
-
-### 3. Predict function ###
-def predict(img) -> Tuple[Dict, float]:
- """
- Transforms and performs a prediction on img and returns prediction and time taken.
- """
-
- # Start the timer
- start_time = timer()
-
- # Transform the target image and add a batch dimension
- img = effnetb2_tranforms(img).unsqueeze(0)
-
- # Put model into evaluation mode and turn on inference mode
- effnetb2.eval()
- with torch.inference_mode():
- # Pass the transformed image through the model and turn the prediction logits into prediction probabilities
- pred_probs = torch.softmax(effnetb2(img), dim=1)
-
- # Create a prediction label and prediction probability for each prediction class
- pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
-
- # Calculate the prediction time
- pred_time = round(timer() - start_time, 5)
-
- # Return the prediction dictionary and prediction time
- return pred_labels_and_probs, pred_time
-
-### 4. Gradio app ###
-# Create title, description and article strings
-title = "FoodVision Mini 🍕🥩🍣"
-description = "An EfficientNetB2 feature extractor computer vision model to classify images of food as pizza, steak or sushi."
-article = "Created at [09. PyTorch Model Deployment](https://www.learnpytorch.io/09_pytorch_model_deployment/)."
-
-# Create examples list from "examples/" directory
-example_list = [["examples/" + example] for example in os.listdir("examples")]
-
-# Create the Gradio demo
-demo = gr.Interface(fn=predict, # mapping function from input to output
- inputs=gr.Image(type="pil"), # what are the inputs?
- outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
- gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
- # Create examples list from "examples/" directory
- examples=example_list,
- title=title,
- description=description,
- article=article)
-
-# Launch the demo!
-demo.launch()
diff --git a/spaces/jgurzoni/image_background_swapper/saicinpainting/evaluation/losses/lpips.py b/spaces/jgurzoni/image_background_swapper/saicinpainting/evaluation/losses/lpips.py
deleted file mode 100644
index b5f19b747f2457902695213f7efcde4fdc306c1f..0000000000000000000000000000000000000000
--- a/spaces/jgurzoni/image_background_swapper/saicinpainting/evaluation/losses/lpips.py
+++ /dev/null
@@ -1,891 +0,0 @@
-############################################################
-# The contents below have been combined using files in the #
-# following repository: #
-# https://github.com/richzhang/PerceptualSimilarity #
-############################################################
-
-############################################################
-# __init__.py #
-############################################################
-
-import numpy as np
-from skimage.metrics import structural_similarity
-import torch
-
-from saicinpainting.utils import get_shape
-
-
-class PerceptualLoss(torch.nn.Module):
- def __init__(self, model='net-lin', net='alex', colorspace='rgb', model_path=None, spatial=False, use_gpu=True):
- # VGG using our perceptually-learned weights (LPIPS metric)
- # def __init__(self, model='net', net='vgg', use_gpu=True): # "default" way of using VGG as a perceptual loss
- super(PerceptualLoss, self).__init__()
- self.use_gpu = use_gpu
- self.spatial = spatial
- self.model = DistModel()
- self.model.initialize(model=model, net=net, use_gpu=use_gpu, colorspace=colorspace,
- model_path=model_path, spatial=self.spatial)
-
- def forward(self, pred, target, normalize=True):
- """
- Pred and target are Variables.
- If normalize is True, assumes the images are between [0,1] and then scales them between [-1,+1]
- If normalize is False, assumes the images are already between [-1,+1]
- Inputs pred and target are Nx3xHxW
- Output pytorch Variable N long
- """
-
- if normalize:
- target = 2 * target - 1
- pred = 2 * pred - 1
-
- return self.model(target, pred)
-
-
-def normalize_tensor(in_feat, eps=1e-10):
- norm_factor = torch.sqrt(torch.sum(in_feat ** 2, dim=1, keepdim=True))
- return in_feat / (norm_factor + eps)
-
-
-def l2(p0, p1, range=255.):
- return .5 * np.mean((p0 / range - p1 / range) ** 2)
-
-
-def psnr(p0, p1, peak=255.):
- return 10 * np.log10(peak ** 2 / np.mean((1. * p0 - 1. * p1) ** 2))
-
-
-def dssim(p0, p1, range=255.):
- return (1 - compare_ssim(p0, p1, data_range=range, multichannel=True)) / 2.
-
-
-def rgb2lab(in_img, mean_cent=False):
- from skimage import color
- img_lab = color.rgb2lab(in_img)
- if (mean_cent):
- img_lab[:, :, 0] = img_lab[:, :, 0] - 50
- return img_lab
-
-
-def tensor2np(tensor_obj):
- # change dimension of a tensor object into a numpy array
- return tensor_obj[0].cpu().float().numpy().transpose((1, 2, 0))
-
-
-def np2tensor(np_obj):
- # change dimenion of np array into tensor array
- return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
-
-
-def tensor2tensorlab(image_tensor, to_norm=True, mc_only=False):
- # image tensor to lab tensor
- from skimage import color
-
- img = tensor2im(image_tensor)
- img_lab = color.rgb2lab(img)
- if (mc_only):
- img_lab[:, :, 0] = img_lab[:, :, 0] - 50
- if (to_norm and not mc_only):
- img_lab[:, :, 0] = img_lab[:, :, 0] - 50
- img_lab = img_lab / 100.
-
- return np2tensor(img_lab)
-
-
-def tensorlab2tensor(lab_tensor, return_inbnd=False):
- from skimage import color
- import warnings
- warnings.filterwarnings("ignore")
-
- lab = tensor2np(lab_tensor) * 100.
- lab[:, :, 0] = lab[:, :, 0] + 50
-
- rgb_back = 255. * np.clip(color.lab2rgb(lab.astype('float')), 0, 1)
- if (return_inbnd):
- # convert back to lab, see if we match
- lab_back = color.rgb2lab(rgb_back.astype('uint8'))
- mask = 1. * np.isclose(lab_back, lab, atol=2.)
- mask = np2tensor(np.prod(mask, axis=2)[:, :, np.newaxis])
- return (im2tensor(rgb_back), mask)
- else:
- return im2tensor(rgb_back)
-
-
-def rgb2lab(input):
- from skimage import color
- return color.rgb2lab(input / 255.)
-
-
-def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255. / 2.):
- image_numpy = image_tensor[0].cpu().float().numpy()
- image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor
- return image_numpy.astype(imtype)
-
-
-def im2tensor(image, imtype=np.uint8, cent=1., factor=255. / 2.):
- return torch.Tensor((image / factor - cent)
- [:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
-
-
-def tensor2vec(vector_tensor):
- return vector_tensor.data.cpu().numpy()[:, :, 0, 0]
-
-
-def voc_ap(rec, prec, use_07_metric=False):
- """ ap = voc_ap(rec, prec, [use_07_metric])
- Compute VOC AP given precision and recall.
- If use_07_metric is true, uses the
- VOC 07 11 point method (default:False).
- """
- if use_07_metric:
- # 11 point metric
- ap = 0.
- for t in np.arange(0., 1.1, 0.1):
- if np.sum(rec >= t) == 0:
- p = 0
- else:
- p = np.max(prec[rec >= t])
- ap = ap + p / 11.
- else:
- # correct AP calculation
- # first append sentinel values at the end
- mrec = np.concatenate(([0.], rec, [1.]))
- mpre = np.concatenate(([0.], prec, [0.]))
-
- # compute the precision envelope
- for i in range(mpre.size - 1, 0, -1):
- mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
-
- # to calculate area under PR curve, look for points
- # where X axis (recall) changes value
- i = np.where(mrec[1:] != mrec[:-1])[0]
-
- # and sum (\Delta recall) * prec
- ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
- return ap
-
-
-def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255. / 2.):
- # def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=1.):
- image_numpy = image_tensor[0].cpu().float().numpy()
- image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor
- return image_numpy.astype(imtype)
-
-
-def im2tensor(image, imtype=np.uint8, cent=1., factor=255. / 2.):
- # def im2tensor(image, imtype=np.uint8, cent=1., factor=1.):
- return torch.Tensor((image / factor - cent)
- [:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
-
-
-############################################################
-# base_model.py #
-############################################################
-
-
-class BaseModel(torch.nn.Module):
- def __init__(self):
- super().__init__()
-
- def name(self):
- return 'BaseModel'
-
- def initialize(self, use_gpu=True):
- self.use_gpu = use_gpu
-
- def forward(self):
- pass
-
- def get_image_paths(self):
- pass
-
- def optimize_parameters(self):
- pass
-
- def get_current_visuals(self):
- return self.input
-
- def get_current_errors(self):
- return {}
-
- def save(self, label):
- pass
-
- # helper saving function that can be used by subclasses
- def save_network(self, network, path, network_label, epoch_label):
- save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
- save_path = os.path.join(path, save_filename)
- torch.save(network.state_dict(), save_path)
-
- # helper loading function that can be used by subclasses
- def load_network(self, network, network_label, epoch_label):
- save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
- save_path = os.path.join(self.save_dir, save_filename)
- print('Loading network from %s' % save_path)
- network.load_state_dict(torch.load(save_path, map_location='cpu'))
-
- def update_learning_rate():
- pass
-
- def get_image_paths(self):
- return self.image_paths
-
- def save_done(self, flag=False):
- np.save(os.path.join(self.save_dir, 'done_flag'), flag)
- np.savetxt(os.path.join(self.save_dir, 'done_flag'), [flag, ], fmt='%i')
-
-
-############################################################
-# dist_model.py #
-############################################################
-
-import os
-from collections import OrderedDict
-from scipy.ndimage import zoom
-from tqdm import tqdm
-
-
-class DistModel(BaseModel):
- def name(self):
- return self.model_name
-
- def initialize(self, model='net-lin', net='alex', colorspace='Lab', pnet_rand=False, pnet_tune=False,
- model_path=None,
- use_gpu=True, printNet=False, spatial=False,
- is_train=False, lr=.0001, beta1=0.5, version='0.1'):
- '''
- INPUTS
- model - ['net-lin'] for linearly calibrated network
- ['net'] for off-the-shelf network
- ['L2'] for L2 distance in Lab colorspace
- ['SSIM'] for ssim in RGB colorspace
- net - ['squeeze','alex','vgg']
- model_path - if None, will look in weights/[NET_NAME].pth
- colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM
- use_gpu - bool - whether or not to use a GPU
- printNet - bool - whether or not to print network architecture out
- spatial - bool - whether to output an array containing varying distances across spatial dimensions
- spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below).
- spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images.
- spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear).
- is_train - bool - [True] for training mode
- lr - float - initial learning rate
- beta1 - float - initial momentum term for adam
- version - 0.1 for latest, 0.0 was original (with a bug)
- '''
- BaseModel.initialize(self, use_gpu=use_gpu)
-
- self.model = model
- self.net = net
- self.is_train = is_train
- self.spatial = spatial
- self.model_name = '%s [%s]' % (model, net)
-
- if (self.model == 'net-lin'): # pretrained net + linear layer
- self.net = PNetLin(pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net,
- use_dropout=True, spatial=spatial, version=version, lpips=True)
- kw = dict(map_location='cpu')
- if (model_path is None):
- import inspect
- model_path = os.path.abspath(
- os.path.join(os.path.dirname(__file__), '..', '..', '..', 'models', 'lpips_models', f'{net}.pth'))
-
- if (not is_train):
- self.net.load_state_dict(torch.load(model_path, **kw), strict=False)
-
- elif (self.model == 'net'): # pretrained network
- self.net = PNetLin(pnet_rand=pnet_rand, pnet_type=net, lpips=False)
- elif (self.model in ['L2', 'l2']):
- self.net = L2(use_gpu=use_gpu, colorspace=colorspace) # not really a network, only for testing
- self.model_name = 'L2'
- elif (self.model in ['DSSIM', 'dssim', 'SSIM', 'ssim']):
- self.net = DSSIM(use_gpu=use_gpu, colorspace=colorspace)
- self.model_name = 'SSIM'
- else:
- raise ValueError("Model [%s] not recognized." % self.model)
-
- self.trainable_parameters = list(self.net.parameters())
-
- if self.is_train: # training mode
- # extra network on top to go from distances (d0,d1) => predicted human judgment (h*)
- self.rankLoss = BCERankingLoss()
- self.trainable_parameters += list(self.rankLoss.net.parameters())
- self.lr = lr
- self.old_lr = lr
- self.optimizer_net = torch.optim.Adam(self.trainable_parameters, lr=lr, betas=(beta1, 0.999))
- else: # test mode
- self.net.eval()
-
- # if (use_gpu):
- # self.net.to(gpu_ids[0])
- # self.net = torch.nn.DataParallel(self.net, device_ids=gpu_ids)
- # if (self.is_train):
- # self.rankLoss = self.rankLoss.to(device=gpu_ids[0]) # just put this on GPU0
-
- if (printNet):
- print('---------- Networks initialized -------------')
- print_network(self.net)
- print('-----------------------------------------------')
-
- def forward(self, in0, in1, retPerLayer=False):
- ''' Function computes the distance between image patches in0 and in1
- INPUTS
- in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1]
- OUTPUT
- computed distances between in0 and in1
- '''
-
- return self.net(in0, in1, retPerLayer=retPerLayer)
-
- # ***** TRAINING FUNCTIONS *****
- def optimize_parameters(self):
- self.forward_train()
- self.optimizer_net.zero_grad()
- self.backward_train()
- self.optimizer_net.step()
- self.clamp_weights()
-
- def clamp_weights(self):
- for module in self.net.modules():
- if (hasattr(module, 'weight') and module.kernel_size == (1, 1)):
- module.weight.data = torch.clamp(module.weight.data, min=0)
-
- def set_input(self, data):
- self.input_ref = data['ref']
- self.input_p0 = data['p0']
- self.input_p1 = data['p1']
- self.input_judge = data['judge']
-
- # if (self.use_gpu):
- # self.input_ref = self.input_ref.to(device=self.gpu_ids[0])
- # self.input_p0 = self.input_p0.to(device=self.gpu_ids[0])
- # self.input_p1 = self.input_p1.to(device=self.gpu_ids[0])
- # self.input_judge = self.input_judge.to(device=self.gpu_ids[0])
-
- # self.var_ref = Variable(self.input_ref, requires_grad=True)
- # self.var_p0 = Variable(self.input_p0, requires_grad=True)
- # self.var_p1 = Variable(self.input_p1, requires_grad=True)
-
- def forward_train(self): # run forward pass
- # print(self.net.module.scaling_layer.shift)
- # print(torch.norm(self.net.module.net.slice1[0].weight).item(), torch.norm(self.net.module.lin0.model[1].weight).item())
-
- assert False, "We shoud've not get here when using LPIPS as a metric"
-
- self.d0 = self(self.var_ref, self.var_p0)
- self.d1 = self(self.var_ref, self.var_p1)
- self.acc_r = self.compute_accuracy(self.d0, self.d1, self.input_judge)
-
- self.var_judge = Variable(1. * self.input_judge).view(self.d0.size())
-
- self.loss_total = self.rankLoss(self.d0, self.d1, self.var_judge * 2. - 1.)
-
- return self.loss_total
-
- def backward_train(self):
- torch.mean(self.loss_total).backward()
-
- def compute_accuracy(self, d0, d1, judge):
- ''' d0, d1 are Variables, judge is a Tensor '''
- d1_lt_d0 = (d1 < d0).cpu().data.numpy().flatten()
- judge_per = judge.cpu().numpy().flatten()
- return d1_lt_d0 * judge_per + (1 - d1_lt_d0) * (1 - judge_per)
-
- def get_current_errors(self):
- retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()),
- ('acc_r', self.acc_r)])
-
- for key in retDict.keys():
- retDict[key] = np.mean(retDict[key])
-
- return retDict
-
- def get_current_visuals(self):
- zoom_factor = 256 / self.var_ref.data.size()[2]
-
- ref_img = tensor2im(self.var_ref.data)
- p0_img = tensor2im(self.var_p0.data)
- p1_img = tensor2im(self.var_p1.data)
-
- ref_img_vis = zoom(ref_img, [zoom_factor, zoom_factor, 1], order=0)
- p0_img_vis = zoom(p0_img, [zoom_factor, zoom_factor, 1], order=0)
- p1_img_vis = zoom(p1_img, [zoom_factor, zoom_factor, 1], order=0)
-
- return OrderedDict([('ref', ref_img_vis),
- ('p0', p0_img_vis),
- ('p1', p1_img_vis)])
-
- def save(self, path, label):
- if (self.use_gpu):
- self.save_network(self.net.module, path, '', label)
- else:
- self.save_network(self.net, path, '', label)
- self.save_network(self.rankLoss.net, path, 'rank', label)
-
- def update_learning_rate(self, nepoch_decay):
- lrd = self.lr / nepoch_decay
- lr = self.old_lr - lrd
-
- for param_group in self.optimizer_net.param_groups:
- param_group['lr'] = lr
-
- print('update lr [%s] decay: %f -> %f' % (type, self.old_lr, lr))
- self.old_lr = lr
-
-
-def score_2afc_dataset(data_loader, func, name=''):
- ''' Function computes Two Alternative Forced Choice (2AFC) score using
- distance function 'func' in dataset 'data_loader'
- INPUTS
- data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside
- func - callable distance function - calling d=func(in0,in1) should take 2
- pytorch tensors with shape Nx3xXxY, and return numpy array of length N
- OUTPUTS
- [0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators
- [1] - dictionary with following elements
- d0s,d1s - N arrays containing distances between reference patch to perturbed patches
- gts - N array in [0,1], preferred patch selected by human evaluators
- (closer to "0" for left patch p0, "1" for right patch p1,
- "0.6" means 60pct people preferred right patch, 40pct preferred left)
- scores - N array in [0,1], corresponding to what percentage function agreed with humans
- CONSTS
- N - number of test triplets in data_loader
- '''
-
- d0s = []
- d1s = []
- gts = []
-
- for data in tqdm(data_loader.load_data(), desc=name):
- d0s += func(data['ref'], data['p0']).data.cpu().numpy().flatten().tolist()
- d1s += func(data['ref'], data['p1']).data.cpu().numpy().flatten().tolist()
- gts += data['judge'].cpu().numpy().flatten().tolist()
-
- d0s = np.array(d0s)
- d1s = np.array(d1s)
- gts = np.array(gts)
- scores = (d0s < d1s) * (1. - gts) + (d1s < d0s) * gts + (d1s == d0s) * .5
-
- return (np.mean(scores), dict(d0s=d0s, d1s=d1s, gts=gts, scores=scores))
-
-
-def score_jnd_dataset(data_loader, func, name=''):
- ''' Function computes JND score using distance function 'func' in dataset 'data_loader'
- INPUTS
- data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside
- func - callable distance function - calling d=func(in0,in1) should take 2
- pytorch tensors with shape Nx3xXxY, and return pytorch array of length N
- OUTPUTS
- [0] - JND score in [0,1], mAP score (area under precision-recall curve)
- [1] - dictionary with following elements
- ds - N array containing distances between two patches shown to human evaluator
- sames - N array containing fraction of people who thought the two patches were identical
- CONSTS
- N - number of test triplets in data_loader
- '''
-
- ds = []
- gts = []
-
- for data in tqdm(data_loader.load_data(), desc=name):
- ds += func(data['p0'], data['p1']).data.cpu().numpy().tolist()
- gts += data['same'].cpu().numpy().flatten().tolist()
-
- sames = np.array(gts)
- ds = np.array(ds)
-
- sorted_inds = np.argsort(ds)
- ds_sorted = ds[sorted_inds]
- sames_sorted = sames[sorted_inds]
-
- TPs = np.cumsum(sames_sorted)
- FPs = np.cumsum(1 - sames_sorted)
- FNs = np.sum(sames_sorted) - TPs
-
- precs = TPs / (TPs + FPs)
- recs = TPs / (TPs + FNs)
- score = voc_ap(recs, precs)
-
- return (score, dict(ds=ds, sames=sames))
-
-
-############################################################
-# networks_basic.py #
-############################################################
-
-import torch.nn as nn
-from torch.autograd import Variable
-import numpy as np
-
-
-def spatial_average(in_tens, keepdim=True):
- return in_tens.mean([2, 3], keepdim=keepdim)
-
-
-def upsample(in_tens, out_H=64): # assumes scale factor is same for H and W
- in_H = in_tens.shape[2]
- scale_factor = 1. * out_H / in_H
-
- return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens)
-
-
-# Learned perceptual metric
-class PNetLin(nn.Module):
- def __init__(self, pnet_type='vgg', pnet_rand=False, pnet_tune=False, use_dropout=True, spatial=False,
- version='0.1', lpips=True):
- super(PNetLin, self).__init__()
-
- self.pnet_type = pnet_type
- self.pnet_tune = pnet_tune
- self.pnet_rand = pnet_rand
- self.spatial = spatial
- self.lpips = lpips
- self.version = version
- self.scaling_layer = ScalingLayer()
-
- if (self.pnet_type in ['vgg', 'vgg16']):
- net_type = vgg16
- self.chns = [64, 128, 256, 512, 512]
- elif (self.pnet_type == 'alex'):
- net_type = alexnet
- self.chns = [64, 192, 384, 256, 256]
- elif (self.pnet_type == 'squeeze'):
- net_type = squeezenet
- self.chns = [64, 128, 256, 384, 384, 512, 512]
- self.L = len(self.chns)
-
- self.net = net_type(pretrained=not self.pnet_rand, requires_grad=self.pnet_tune)
-
- if (lpips):
- self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
- self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
- self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
- self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
- self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
- self.lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4]
- if (self.pnet_type == 'squeeze'): # 7 layers for squeezenet
- self.lin5 = NetLinLayer(self.chns[5], use_dropout=use_dropout)
- self.lin6 = NetLinLayer(self.chns[6], use_dropout=use_dropout)
- self.lins += [self.lin5, self.lin6]
-
- def forward(self, in0, in1, retPerLayer=False):
- # v0.0 - original release had a bug, where input was not scaled
- in0_input, in1_input = (self.scaling_layer(in0), self.scaling_layer(in1)) if self.version == '0.1' else (
- in0, in1)
- outs0, outs1 = self.net(in0_input), self.net(in1_input)
- feats0, feats1, diffs = {}, {}, {}
-
- for kk in range(self.L):
- feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk])
- diffs[kk] = (feats0[kk] - feats1[kk]) ** 2
-
- if (self.lpips):
- if (self.spatial):
- res = [upsample(self.lins[kk].model(diffs[kk]), out_H=in0.shape[2]) for kk in range(self.L)]
- else:
- res = [spatial_average(self.lins[kk].model(diffs[kk]), keepdim=True) for kk in range(self.L)]
- else:
- if (self.spatial):
- res = [upsample(diffs[kk].sum(dim=1, keepdim=True), out_H=in0.shape[2]) for kk in range(self.L)]
- else:
- res = [spatial_average(diffs[kk].sum(dim=1, keepdim=True), keepdim=True) for kk in range(self.L)]
-
- val = res[0]
- for l in range(1, self.L):
- val += res[l]
-
- if (retPerLayer):
- return (val, res)
- else:
- return val
-
-
-class ScalingLayer(nn.Module):
- def __init__(self):
- super(ScalingLayer, self).__init__()
- self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188])[None, :, None, None])
- self.register_buffer('scale', torch.Tensor([.458, .448, .450])[None, :, None, None])
-
- def forward(self, inp):
- return (inp - self.shift) / self.scale
-
-
-class NetLinLayer(nn.Module):
- ''' A single linear layer which does a 1x1 conv '''
-
- def __init__(self, chn_in, chn_out=1, use_dropout=False):
- super(NetLinLayer, self).__init__()
-
- layers = [nn.Dropout(), ] if (use_dropout) else []
- layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ]
- self.model = nn.Sequential(*layers)
-
-
-class Dist2LogitLayer(nn.Module):
- ''' takes 2 distances, puts through fc layers, spits out value between [0,1] (if use_sigmoid is True) '''
-
- def __init__(self, chn_mid=32, use_sigmoid=True):
- super(Dist2LogitLayer, self).__init__()
-
- layers = [nn.Conv2d(5, chn_mid, 1, stride=1, padding=0, bias=True), ]
- layers += [nn.LeakyReLU(0.2, True), ]
- layers += [nn.Conv2d(chn_mid, chn_mid, 1, stride=1, padding=0, bias=True), ]
- layers += [nn.LeakyReLU(0.2, True), ]
- layers += [nn.Conv2d(chn_mid, 1, 1, stride=1, padding=0, bias=True), ]
- if (use_sigmoid):
- layers += [nn.Sigmoid(), ]
- self.model = nn.Sequential(*layers)
-
- def forward(self, d0, d1, eps=0.1):
- return self.model(torch.cat((d0, d1, d0 - d1, d0 / (d1 + eps), d1 / (d0 + eps)), dim=1))
-
-
-class BCERankingLoss(nn.Module):
- def __init__(self, chn_mid=32):
- super(BCERankingLoss, self).__init__()
- self.net = Dist2LogitLayer(chn_mid=chn_mid)
- # self.parameters = list(self.net.parameters())
- self.loss = torch.nn.BCELoss()
-
- def forward(self, d0, d1, judge):
- per = (judge + 1.) / 2.
- self.logit = self.net(d0, d1)
- return self.loss(self.logit, per)
-
-
-# L2, DSSIM metrics
-class FakeNet(nn.Module):
- def __init__(self, use_gpu=True, colorspace='Lab'):
- super(FakeNet, self).__init__()
- self.use_gpu = use_gpu
- self.colorspace = colorspace
-
-
-class L2(FakeNet):
-
- def forward(self, in0, in1, retPerLayer=None):
- assert (in0.size()[0] == 1) # currently only supports batchSize 1
-
- if (self.colorspace == 'RGB'):
- (N, C, X, Y) = in0.size()
- value = torch.mean(torch.mean(torch.mean((in0 - in1) ** 2, dim=1).view(N, 1, X, Y), dim=2).view(N, 1, 1, Y),
- dim=3).view(N)
- return value
- elif (self.colorspace == 'Lab'):
- value = l2(tensor2np(tensor2tensorlab(in0.data, to_norm=False)),
- tensor2np(tensor2tensorlab(in1.data, to_norm=False)), range=100.).astype('float')
- ret_var = Variable(torch.Tensor((value,)))
- # if (self.use_gpu):
- # ret_var = ret_var.cuda()
- return ret_var
-
-
-class DSSIM(FakeNet):
-
- def forward(self, in0, in1, retPerLayer=None):
- assert (in0.size()[0] == 1) # currently only supports batchSize 1
-
- if (self.colorspace == 'RGB'):
- value = dssim(1. * tensor2im(in0.data), 1. * tensor2im(in1.data), range=255.).astype('float')
- elif (self.colorspace == 'Lab'):
- value = dssim(tensor2np(tensor2tensorlab(in0.data, to_norm=False)),
- tensor2np(tensor2tensorlab(in1.data, to_norm=False)), range=100.).astype('float')
- ret_var = Variable(torch.Tensor((value,)))
- # if (self.use_gpu):
- # ret_var = ret_var.cuda()
- return ret_var
-
-
-def print_network(net):
- num_params = 0
- for param in net.parameters():
- num_params += param.numel()
- print('Network', net)
- print('Total number of parameters: %d' % num_params)
-
-
-############################################################
-# pretrained_networks.py #
-############################################################
-
-from collections import namedtuple
-import torch
-from torchvision import models as tv
-
-
-class squeezenet(torch.nn.Module):
- def __init__(self, requires_grad=False, pretrained=True):
- super(squeezenet, self).__init__()
- pretrained_features = tv.squeezenet1_1(pretrained=pretrained).features
- self.slice1 = torch.nn.Sequential()
- self.slice2 = torch.nn.Sequential()
- self.slice3 = torch.nn.Sequential()
- self.slice4 = torch.nn.Sequential()
- self.slice5 = torch.nn.Sequential()
- self.slice6 = torch.nn.Sequential()
- self.slice7 = torch.nn.Sequential()
- self.N_slices = 7
- for x in range(2):
- self.slice1.add_module(str(x), pretrained_features[x])
- for x in range(2, 5):
- self.slice2.add_module(str(x), pretrained_features[x])
- for x in range(5, 8):
- self.slice3.add_module(str(x), pretrained_features[x])
- for x in range(8, 10):
- self.slice4.add_module(str(x), pretrained_features[x])
- for x in range(10, 11):
- self.slice5.add_module(str(x), pretrained_features[x])
- for x in range(11, 12):
- self.slice6.add_module(str(x), pretrained_features[x])
- for x in range(12, 13):
- self.slice7.add_module(str(x), pretrained_features[x])
- if not requires_grad:
- for param in self.parameters():
- param.requires_grad = False
-
- def forward(self, X):
- h = self.slice1(X)
- h_relu1 = h
- h = self.slice2(h)
- h_relu2 = h
- h = self.slice3(h)
- h_relu3 = h
- h = self.slice4(h)
- h_relu4 = h
- h = self.slice5(h)
- h_relu5 = h
- h = self.slice6(h)
- h_relu6 = h
- h = self.slice7(h)
- h_relu7 = h
- vgg_outputs = namedtuple("SqueezeOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5', 'relu6', 'relu7'])
- out = vgg_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5, h_relu6, h_relu7)
-
- return out
-
-
-class alexnet(torch.nn.Module):
- def __init__(self, requires_grad=False, pretrained=True):
- super(alexnet, self).__init__()
- alexnet_pretrained_features = tv.alexnet(pretrained=pretrained).features
- self.slice1 = torch.nn.Sequential()
- self.slice2 = torch.nn.Sequential()
- self.slice3 = torch.nn.Sequential()
- self.slice4 = torch.nn.Sequential()
- self.slice5 = torch.nn.Sequential()
- self.N_slices = 5
- for x in range(2):
- self.slice1.add_module(str(x), alexnet_pretrained_features[x])
- for x in range(2, 5):
- self.slice2.add_module(str(x), alexnet_pretrained_features[x])
- for x in range(5, 8):
- self.slice3.add_module(str(x), alexnet_pretrained_features[x])
- for x in range(8, 10):
- self.slice4.add_module(str(x), alexnet_pretrained_features[x])
- for x in range(10, 12):
- self.slice5.add_module(str(x), alexnet_pretrained_features[x])
- if not requires_grad:
- for param in self.parameters():
- param.requires_grad = False
-
- def forward(self, X):
- h = self.slice1(X)
- h_relu1 = h
- h = self.slice2(h)
- h_relu2 = h
- h = self.slice3(h)
- h_relu3 = h
- h = self.slice4(h)
- h_relu4 = h
- h = self.slice5(h)
- h_relu5 = h
- alexnet_outputs = namedtuple("AlexnetOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5'])
- out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5)
-
- return out
-
-
-class vgg16(torch.nn.Module):
- def __init__(self, requires_grad=False, pretrained=True):
- super(vgg16, self).__init__()
- vgg_pretrained_features = tv.vgg16(pretrained=pretrained).features
- self.slice1 = torch.nn.Sequential()
- self.slice2 = torch.nn.Sequential()
- self.slice3 = torch.nn.Sequential()
- self.slice4 = torch.nn.Sequential()
- self.slice5 = torch.nn.Sequential()
- self.N_slices = 5
- for x in range(4):
- self.slice1.add_module(str(x), vgg_pretrained_features[x])
- for x in range(4, 9):
- self.slice2.add_module(str(x), vgg_pretrained_features[x])
- for x in range(9, 16):
- self.slice3.add_module(str(x), vgg_pretrained_features[x])
- for x in range(16, 23):
- self.slice4.add_module(str(x), vgg_pretrained_features[x])
- for x in range(23, 30):
- self.slice5.add_module(str(x), vgg_pretrained_features[x])
- if not requires_grad:
- for param in self.parameters():
- param.requires_grad = False
-
- def forward(self, X):
- h = self.slice1(X)
- h_relu1_2 = h
- h = self.slice2(h)
- h_relu2_2 = h
- h = self.slice3(h)
- h_relu3_3 = h
- h = self.slice4(h)
- h_relu4_3 = h
- h = self.slice5(h)
- h_relu5_3 = h
- vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])
- out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
-
- return out
-
-
-class resnet(torch.nn.Module):
- def __init__(self, requires_grad=False, pretrained=True, num=18):
- super(resnet, self).__init__()
- if (num == 18):
- self.net = tv.resnet18(pretrained=pretrained)
- elif (num == 34):
- self.net = tv.resnet34(pretrained=pretrained)
- elif (num == 50):
- self.net = tv.resnet50(pretrained=pretrained)
- elif (num == 101):
- self.net = tv.resnet101(pretrained=pretrained)
- elif (num == 152):
- self.net = tv.resnet152(pretrained=pretrained)
- self.N_slices = 5
-
- self.conv1 = self.net.conv1
- self.bn1 = self.net.bn1
- self.relu = self.net.relu
- self.maxpool = self.net.maxpool
- self.layer1 = self.net.layer1
- self.layer2 = self.net.layer2
- self.layer3 = self.net.layer3
- self.layer4 = self.net.layer4
-
- def forward(self, X):
- h = self.conv1(X)
- h = self.bn1(h)
- h = self.relu(h)
- h_relu1 = h
- h = self.maxpool(h)
- h = self.layer1(h)
- h_conv2 = h
- h = self.layer2(h)
- h_conv3 = h
- h = self.layer3(h)
- h_conv4 = h
- h = self.layer4(h)
- h_conv5 = h
-
- outputs = namedtuple("Outputs", ['relu1', 'conv2', 'conv3', 'conv4', 'conv5'])
- out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5)
-
- return out
diff --git a/spaces/jhwen/bingo/src/pages/api/sydney.ts b/spaces/jhwen/bingo/src/pages/api/sydney.ts
deleted file mode 100644
index 8bd7074bc72bd2803e4acf89d3814908893ff044..0000000000000000000000000000000000000000
--- a/spaces/jhwen/bingo/src/pages/api/sydney.ts
+++ /dev/null
@@ -1,66 +0,0 @@
-import { NextApiRequest, NextApiResponse } from 'next'
-import { WebSocket, debug } from '@/lib/isomorphic'
-import { BingWebBot } from '@/lib/bots/bing'
-import { websocketUtils } from '@/lib/bots/bing/utils'
-import { WatchDog, createHeaders } from '@/lib/utils'
-
-export default async function handler(req: NextApiRequest, res: NextApiResponse) {
- const conversationContext = req.body
- const headers = createHeaders(req.cookies)
- const id = headers['x-forwarded-for']
-
- debug(id, headers)
- res.setHeader('Content-Type', 'text/stream; charset=UTF-8')
-
- const ws = new WebSocket('wss://sydney.bing.com/sydney/ChatHub', {
- headers: {
- ...headers,
- 'accept-language': 'zh-CN,zh;q=0.9',
- 'cache-control': 'no-cache',
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
- pragma: 'no-cache',
- }
- })
-
- const closeDog = new WatchDog()
- const timeoutDog = new WatchDog()
- ws.onmessage = (event) => {
- timeoutDog.watch(() => {
- debug(id, 'timeout')
- ws.send(websocketUtils.packMessage({ type: 6 }))
- }, 3000)
- closeDog.watch(() => {
- debug(id, 'timeout close')
- ws.close()
- }, 20000)
- res.write(event.data)
- if (/\{"type":([367])\}/.test(String(event.data))) {
- const type = parseInt(RegExp.$1, 10)
- debug(id, 'connection type', type)
- if (type === 3) {
- ws.close()
- } else {
- ws.send(websocketUtils.packMessage({ type }))
- }
- }
- }
-
- ws.onclose = () => {
- timeoutDog.reset()
- closeDog.reset()
- debug(id, 'ws close')
- res.end()
- }
-
- await new Promise((resolve) => ws.onopen = resolve)
- ws.send(websocketUtils.packMessage({ protocol: 'json', version: 1 }))
- ws.send(websocketUtils.packMessage({ type: 6 }))
- ws.send(websocketUtils.packMessage(BingWebBot.buildChatRequest(conversationContext!)))
- req.socket.once('close', () => {
- debug(id, 'connection close')
- ws.close()
- if (!res.closed) {
- res.end()
- }
- })
-}
diff --git a/spaces/jiawei011/dreamgaussian/mesh_utils.py b/spaces/jiawei011/dreamgaussian/mesh_utils.py
deleted file mode 100644
index ca9fce9232f5133d6f91d5cf64d9e17b0725a5c9..0000000000000000000000000000000000000000
--- a/spaces/jiawei011/dreamgaussian/mesh_utils.py
+++ /dev/null
@@ -1,147 +0,0 @@
-import numpy as np
-import pymeshlab as pml
-
-
-def poisson_mesh_reconstruction(points, normals=None):
- # points/normals: [N, 3] np.ndarray
-
- import open3d as o3d
-
- pcd = o3d.geometry.PointCloud()
- pcd.points = o3d.utility.Vector3dVector(points)
-
- # outlier removal
- pcd, ind = pcd.remove_statistical_outlier(nb_neighbors=20, std_ratio=10)
-
- # normals
- if normals is None:
- pcd.estimate_normals()
- else:
- pcd.normals = o3d.utility.Vector3dVector(normals[ind])
-
- # visualize
- o3d.visualization.draw_geometries([pcd], point_show_normal=False)
-
- mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(
- pcd, depth=9
- )
- vertices_to_remove = densities < np.quantile(densities, 0.1)
- mesh.remove_vertices_by_mask(vertices_to_remove)
-
- # visualize
- o3d.visualization.draw_geometries([mesh])
-
- vertices = np.asarray(mesh.vertices)
- triangles = np.asarray(mesh.triangles)
-
- print(
- f"[INFO] poisson mesh reconstruction: {points.shape} --> {vertices.shape} / {triangles.shape}"
- )
-
- return vertices, triangles
-
-
-def decimate_mesh(
- verts, faces, target, backend="pymeshlab", remesh=False, optimalplacement=True
-):
- # optimalplacement: default is True, but for flat mesh must turn False to prevent spike artifect.
-
- _ori_vert_shape = verts.shape
- _ori_face_shape = faces.shape
-
- if backend == "pyfqmr":
- import pyfqmr
-
- solver = pyfqmr.Simplify()
- solver.setMesh(verts, faces)
- solver.simplify_mesh(target_count=target, preserve_border=False, verbose=False)
- verts, faces, normals = solver.getMesh()
- else:
- m = pml.Mesh(verts, faces)
- ms = pml.MeshSet()
- ms.add_mesh(m, "mesh") # will copy!
-
- # filters
- # ms.meshing_decimation_clustering(threshold=pml.Percentage(1))
- ms.meshing_decimation_quadric_edge_collapse(
- targetfacenum=int(target), optimalplacement=optimalplacement
- )
-
- if remesh:
- # ms.apply_coord_taubin_smoothing()
- ms.meshing_isotropic_explicit_remeshing(
- iterations=3, targetlen=pml.Percentage(1)
- )
-
- # extract mesh
- m = ms.current_mesh()
- verts = m.vertex_matrix()
- faces = m.face_matrix()
-
- print(
- f"[INFO] mesh decimation: {_ori_vert_shape} --> {verts.shape}, {_ori_face_shape} --> {faces.shape}"
- )
-
- return verts, faces
-
-
-def clean_mesh(
- verts,
- faces,
- v_pct=1,
- min_f=64,
- min_d=20,
- repair=True,
- remesh=True,
- remesh_size=0.01,
-):
- # verts: [N, 3]
- # faces: [N, 3]
-
- _ori_vert_shape = verts.shape
- _ori_face_shape = faces.shape
-
- m = pml.Mesh(verts, faces)
- ms = pml.MeshSet()
- ms.add_mesh(m, "mesh") # will copy!
-
- # filters
- ms.meshing_remove_unreferenced_vertices() # verts not refed by any faces
-
- if v_pct > 0:
- ms.meshing_merge_close_vertices(
- threshold=pml.Percentage(v_pct)
- ) # 1/10000 of bounding box diagonal
-
- ms.meshing_remove_duplicate_faces() # faces defined by the same verts
- ms.meshing_remove_null_faces() # faces with area == 0
-
- if min_d > 0:
- ms.meshing_remove_connected_component_by_diameter(
- mincomponentdiag=pml.Percentage(min_d)
- )
-
- if min_f > 0:
- ms.meshing_remove_connected_component_by_face_number(mincomponentsize=min_f)
-
- if repair:
- # ms.meshing_remove_t_vertices(method=0, threshold=40, repeat=True)
- ms.meshing_repair_non_manifold_edges(method=0)
- ms.meshing_repair_non_manifold_vertices(vertdispratio=0)
-
- if remesh:
- # ms.apply_coord_taubin_smoothing()
- ms.meshing_isotropic_explicit_remeshing(
- iterations=3, targetlen=pml.AbsoluteValue(remesh_size)
- )
-
- # extract mesh
- m = ms.current_mesh()
- verts = m.vertex_matrix()
- faces = m.face_matrix()
-
- print(
- f"[INFO] mesh cleaning: {_ori_vert_shape} --> {verts.shape}, {_ori_face_shape} --> {faces.shape}"
- )
-
- return verts, faces
diff --git a/spaces/jmyungjoon/cartoon/network/__init__.py b/spaces/jmyungjoon/cartoon/network/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/file/video_audio.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/file/video_audio.py
deleted file mode 100644
index d0d3742e829121099f7808ba6a68f3a9f0803e5f..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/file/video_audio.py
+++ /dev/null
@@ -1,63 +0,0 @@
-"""Video audio parser.
-
-Contains parsers for mp3, mp4 files.
-
-"""
-from pathlib import Path
-from typing import Any, Dict, cast
-
-from gpt_index.readers.file.base_parser import BaseParser
-
-
-class VideoAudioParser(BaseParser):
- """Video audio parser.
-
- Extract text from transcript of video/audio files.
-
- """
-
- def __init__(self, *args: Any, model_version: str = "base", **kwargs: Any) -> None:
- """Init params."""
- super().__init__(*args, **kwargs)
- self._model_version = model_version
-
- def _init_parser(self) -> Dict:
- """Init parser."""
- try:
- import whisper
- except ImportError:
- raise ImportError(
- "Please install OpenAI whisper model "
- "'pip install git+https://github.com/openai/whisper.git' "
- "to use the model"
- )
-
- model = whisper.load_model(self._model_version)
-
- return {"model": model}
-
- def parse_file(self, file: Path, errors: str = "ignore") -> str:
- """Parse file."""
- import whisper
-
- if file.name.endswith("mp4"):
- try:
- from pydub import AudioSegment # noqa: F401
- except ImportError:
- raise ImportError("Please install pydub 'pip install pydub' ")
- # open file
- video = AudioSegment.from_file(file, format="mp4")
-
- # Extract audio from video
- audio = video.split_to_mono()[0]
-
- file_str = str(file)[:-4] + ".mp3"
- # export file
- audio.export(file_str, format="mp3")
-
- model = cast(whisper.Whisper, self.parser_config["model"])
- result = model.transcribe(str(file))
-
- transcript = result["text"]
-
- return transcript
diff --git a/spaces/johnslegers/stable-diffusion-gui-test/modules/runtime.py b/spaces/johnslegers/stable-diffusion-gui-test/modules/runtime.py
deleted file mode 100644
index 8d4bcb75ebe1b4419443669b91da7e3bfe5704c2..0000000000000000000000000000000000000000
--- a/spaces/johnslegers/stable-diffusion-gui-test/modules/runtime.py
+++ /dev/null
@@ -1,682 +0,0 @@
-import json
-import os, re
-import traceback
-import torch
-import numpy as np
-from omegaconf import OmegaConf
-from PIL import Image, ImageOps
-from tqdm import tqdm, trange
-from itertools import islice
-from einops import rearrange
-import time
-from pytorch_lightning import seed_everything
-from torch import autocast
-from contextlib import nullcontext
-from einops import rearrange, repeat
-from ldmlib.util import instantiate_from_config
-from optimizedSD.optimUtils import split_weighted_subprompts
-from transformers import logging
-
-from gfpgan import GFPGANer
-from basicsr.archs.rrdbnet_arch import RRDBNet
-from realesrgan import RealESRGANer
-
-import uuid
-
-logging.set_verbosity_error()
-
-# consts
-config_yaml = "optimizedSD/v1-inference.yaml"
-filename_regex = re.compile('[^a-zA-Z0-9]')
-
-# api stuff
-from sd_internal import Request, Response, Image as ResponseImage
-import base64
-from io import BytesIO
-#from colorama import Fore
-
-# local
-stop_processing = False
-temp_images = {}
-
-ckpt_file = None
-gfpgan_file = None
-real_esrgan_file = None
-
-model = None
-modelCS = None
-modelFS = None
-model_gfpgan = None
-model_real_esrgan = None
-
-model_is_half = False
-model_fs_is_half = False
-device = None
-unet_bs = 1
-precision = 'autocast'
-sampler_plms = None
-sampler_ddim = None
-
-has_valid_gpu = False
-force_full_precision = False
-try:
- gpu = torch.cuda.current_device()
- gpu_name = torch.cuda.get_device_name(gpu)
- print('GPU detected: ', gpu_name)
-
- force_full_precision = ('nvidia' in gpu_name.lower() or 'geforce' in gpu_name.lower()) and (' 1660' in gpu_name or ' 1650' in gpu_name) # otherwise these NVIDIA cards create green images
- if force_full_precision:
- print('forcing full precision on NVIDIA 16xx cards, to avoid green images. GPU detected: ', gpu_name)
-
- mem_free, mem_total = torch.cuda.mem_get_info(gpu)
- mem_total /= float(10**9)
- if mem_total < 3.0:
- print("GPUs with less than 3 GB of VRAM are not compatible with Stable Diffusion")
- raise Exception()
-
- has_valid_gpu = True
-except:
- print('WARNING: No compatible GPU found. Using the CPU, but this will be very slow!')
- pass
-
-def load_model_ckpt(ckpt_to_use, device_to_use='cuda', turbo=False, unet_bs_to_use=1, precision_to_use='autocast'):
- global ckpt_file, model, modelCS, modelFS, model_is_half, device, unet_bs, precision, model_fs_is_half
-
- device = device_to_use if has_valid_gpu else 'cpu'
- precision = precision_to_use if not force_full_precision else 'full'
- unet_bs = unet_bs_to_use
-
- unload_model()
-
- if device == 'cpu':
- precision = 'full'
-
- sd = load_model_from_config(f"{ckpt_to_use}.ckpt")
- li, lo = [], []
- for key, value in sd.items():
- sp = key.split(".")
- if (sp[0]) == "model":
- if "input_blocks" in sp:
- li.append(key)
- elif "middle_block" in sp:
- li.append(key)
- elif "time_embed" in sp:
- li.append(key)
- else:
- lo.append(key)
- for key in li:
- sd["model1." + key[6:]] = sd.pop(key)
- for key in lo:
- sd["model2." + key[6:]] = sd.pop(key)
-
- config = OmegaConf.load(f"{config_yaml}")
-
- model = instantiate_from_config(config.modelUNet)
- _, _ = model.load_state_dict(sd, strict=False)
- model.eval()
- model.cdevice = device
- model.unet_bs = unet_bs
- model.turbo = turbo
-
- modelCS = instantiate_from_config(config.modelCondStage)
- _, _ = modelCS.load_state_dict(sd, strict=False)
- modelCS.eval()
- modelCS.cond_stage_model.device = device
-
- modelFS = instantiate_from_config(config.modelFirstStage)
- _, _ = modelFS.load_state_dict(sd, strict=False)
- modelFS.eval()
- del sd
-
- if device != "cpu" and precision == "autocast":
- model.half()
- modelCS.half()
- modelFS.half()
- model_is_half = True
- model_fs_is_half = True
- else:
- model_is_half = False
- model_fs_is_half = False
-
- ckpt_file = ckpt_to_use
-
- print('loaded ', ckpt_file, 'to', device, 'precision', precision)
-
-def unload_model():
- global model, modelCS, modelFS
-
- if model is not None:
- del model
- del modelCS
- del modelFS
-
- model = None
- modelCS = None
- modelFS = None
-
-def load_model_gfpgan(gfpgan_to_use):
- global gfpgan_file, model_gfpgan
-
- if gfpgan_to_use is None:
- return
-
- gfpgan_file = gfpgan_to_use
- model_path = gfpgan_to_use + ".pth"
-
- if device == 'cpu':
- model_gfpgan = GFPGANer(model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=torch.device('cpu'))
- else:
- model_gfpgan = GFPGANer(model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=torch.device('cuda'))
-
- print('loaded ', gfpgan_to_use, 'to', device, 'precision', precision)
-
-def load_model_real_esrgan(real_esrgan_to_use):
- global real_esrgan_file, model_real_esrgan
-
- if real_esrgan_to_use is None:
- return
-
- real_esrgan_file = real_esrgan_to_use
- model_path = real_esrgan_to_use + ".pth"
-
- RealESRGAN_models = {
- 'RealESRGAN_x4plus': RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4),
- 'RealESRGAN_x4plus_anime_6B': RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
- }
-
- model_to_use = RealESRGAN_models[real_esrgan_to_use]
-
- if device == 'cpu':
- model_real_esrgan = RealESRGANer(scale=2, model_path=model_path, model=model_to_use, pre_pad=0, half=False) # cpu does not support half
- model_real_esrgan.device = torch.device('cpu')
- model_real_esrgan.model.to('cpu')
- else:
- model_real_esrgan = RealESRGANer(scale=2, model_path=model_path, model=model_to_use, pre_pad=0, half=model_is_half)
-
- model_real_esrgan.model.name = real_esrgan_to_use
-
- print('loaded ', real_esrgan_to_use, 'to', device, 'precision', precision)
-
-def mk_img(req: Request):
- try:
- yield from do_mk_img(req)
- except Exception as e:
- print(traceback.format_exc())
-
- gc()
-
- if device != "cpu":
- modelFS.to("cpu")
- modelCS.to("cpu")
-
- model.model1.to("cpu")
- model.model2.to("cpu")
-
- gc()
-
- yield json.dumps({
- "status": 'failed',
- "detail": str(e)
- })
-
-def do_mk_img(req: Request):
- global ckpt_file
- global model, modelCS, modelFS, device
- global model_gfpgan, model_real_esrgan
- global stop_processing
-
- stop_processing = False
-
- res = Response()
- res.request = req
- res.images = []
-
- temp_images.clear()
-
- # custom model support:
- # the req.use_stable_diffusion_model needs to be a valid path
- # to the ckpt file (without the extension).
-
- needs_model_reload = False
- ckpt_to_use = ckpt_file
- if ckpt_to_use != req.use_stable_diffusion_model:
- ckpt_to_use = req.use_stable_diffusion_model
- needs_model_reload = True
-
- model.turbo = req.turbo
- if req.use_cpu:
- if device != 'cpu':
- device = 'cpu'
-
- if model_is_half:
- load_model_ckpt(ckpt_to_use, device)
- needs_model_reload = False
-
- load_model_gfpgan(gfpgan_file)
- load_model_real_esrgan(real_esrgan_file)
- else:
- if has_valid_gpu:
- prev_device = device
- device = 'cuda'
-
- if (precision == 'autocast' and (req.use_full_precision or not model_is_half)) or \
- (precision == 'full' and not req.use_full_precision and not force_full_precision):
-
- load_model_ckpt(ckpt_to_use, device, req.turbo, unet_bs, ('full' if req.use_full_precision else 'autocast'))
- needs_model_reload = False
-
- if prev_device != device:
- load_model_gfpgan(gfpgan_file)
- load_model_real_esrgan(real_esrgan_file)
-
- if needs_model_reload:
- load_model_ckpt(ckpt_to_use, device, req.turbo, unet_bs, precision)
-
- if req.use_face_correction != gfpgan_file:
- load_model_gfpgan(req.use_face_correction)
-
- if req.use_upscale != real_esrgan_file:
- load_model_real_esrgan(req.use_upscale)
-
- model.cdevice = device
- modelCS.cond_stage_model.device = device
-
- opt_prompt = req.prompt
- opt_seed = req.seed
- opt_n_samples = req.num_outputs
- opt_n_iter = 1
- opt_scale = req.guidance_scale
- opt_C = 4
- opt_H = req.height
- opt_W = req.width
- opt_f = 8
- opt_ddim_steps = req.num_inference_steps
- opt_ddim_eta = 0.0
- opt_strength = req.prompt_strength
- opt_save_to_disk_path = req.save_to_disk_path
- opt_init_img = req.init_image
- opt_use_face_correction = req.use_face_correction
- opt_use_upscale = req.use_upscale
- opt_show_only_filtered = req.show_only_filtered_image
- opt_format = req.output_format
- opt_sampler_name = req.sampler
-
- print(req.to_string(), '\n device', device)
-
- print('\n\n Using precision:', precision)
-
- seed_everything(opt_seed)
-
- batch_size = opt_n_samples
- prompt = opt_prompt
- assert prompt is not None
- data = [batch_size * [prompt]]
-
- if precision == "autocast" and device != "cpu":
- precision_scope = autocast
- else:
- precision_scope = nullcontext
-
- mask = None
-
- if req.init_image is None:
- handler = _txt2img
-
- init_latent = None
- t_enc = None
- else:
- handler = _img2img
-
- init_image = load_img(req.init_image, opt_W, opt_H)
- init_image = init_image.to(device)
-
- if device != "cpu" and precision == "autocast":
- init_image = init_image.half()
-
- modelFS.to(device)
-
- init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
- init_latent = modelFS.get_first_stage_encoding(modelFS.encode_first_stage(init_image)) # move to latent space
-
- if req.mask is not None:
- mask = load_mask(req.mask, opt_W, opt_H, init_latent.shape[2], init_latent.shape[3], True).to(device)
- mask = mask[0][0].unsqueeze(0).repeat(4, 1, 1).unsqueeze(0)
- mask = repeat(mask, '1 ... -> b ...', b=batch_size)
-
- if device != "cpu" and precision == "autocast":
- mask = mask.half()
-
- move_fs_to_cpu()
-
- assert 0. <= opt_strength <= 1., 'can only work with strength in [0.0, 1.0]'
- t_enc = int(opt_strength * opt_ddim_steps)
- print(f"target t_enc is {t_enc} steps")
-
- if opt_save_to_disk_path is not None:
- session_out_path = os.path.join(opt_save_to_disk_path, req.session_id)
- os.makedirs(session_out_path, exist_ok=True)
- else:
- session_out_path = None
-
- seeds = ""
- with torch.no_grad():
- for n in trange(opt_n_iter, desc="Sampling"):
- for prompts in tqdm(data, desc="data"):
-
- with precision_scope("cuda"):
- modelCS.to(device)
- uc = None
- if opt_scale != 1.0:
- uc = modelCS.get_learned_conditioning(batch_size * [req.negative_prompt])
- if isinstance(prompts, tuple):
- prompts = list(prompts)
-
- subprompts, weights = split_weighted_subprompts(prompts[0])
- if len(subprompts) > 1:
- c = torch.zeros_like(uc)
- totalWeight = sum(weights)
- # normalize each "sub prompt" and add it
- for i in range(len(subprompts)):
- weight = weights[i]
- # if not skip_normalize:
- weight = weight / totalWeight
- c = torch.add(c, modelCS.get_learned_conditioning(subprompts[i]), alpha=weight)
- else:
- c = modelCS.get_learned_conditioning(prompts)
-
- modelFS.to(device)
-
- partial_x_samples = None
- def img_callback(x_samples, i):
- nonlocal partial_x_samples
-
- partial_x_samples = x_samples
-
- if req.stream_progress_updates:
- n_steps = opt_ddim_steps if req.init_image is None else t_enc
- progress = {"step": i, "total_steps": n_steps}
-
- if req.stream_image_progress and i % 5 == 0:
- partial_images = []
-
- for i in range(batch_size):
- x_samples_ddim = modelFS.decode_first_stage(x_samples[i].unsqueeze(0))
- x_sample = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
- x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c")
- x_sample = x_sample.astype(np.uint8)
- img = Image.fromarray(x_sample)
- buf = BytesIO()
- img.save(buf, format='JPEG')
- buf.seek(0)
-
- del img, x_sample, x_samples_ddim
- # don't delete x_samples, it is used in the code that called this callback
-
- temp_images[str(req.session_id) + '/' + str(i)] = buf
- partial_images.append({'path': f'/image/tmp/{req.session_id}/{i}'})
-
- progress['output'] = partial_images
-
- yield json.dumps(progress)
-
- if stop_processing:
- raise UserInitiatedStop("User requested that we stop processing")
-
- # run the handler
- try:
- if handler == _txt2img:
- x_samples = _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, None, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback, mask, opt_sampler_name)
- else:
- x_samples = _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed, img_callback, mask)
-
- yield from x_samples
-
- x_samples = partial_x_samples
- except UserInitiatedStop:
- if partial_x_samples is None:
- continue
-
- x_samples = partial_x_samples
-
- print("saving images")
- for i in range(batch_size):
-
- x_samples_ddim = modelFS.decode_first_stage(x_samples[i].unsqueeze(0))
- x_sample = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
- x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c")
- x_sample = x_sample.astype(np.uint8)
- img = Image.fromarray(x_sample)
-
- has_filters = (opt_use_face_correction is not None and opt_use_face_correction.startswith('GFPGAN')) or \
- (opt_use_upscale is not None and opt_use_upscale.startswith('RealESRGAN'))
-
- return_orig_img = not has_filters or not opt_show_only_filtered
-
- if stop_processing:
- return_orig_img = True
-
- if opt_save_to_disk_path is not None:
- prompt_flattened = filename_regex.sub('_', prompts[0])
- prompt_flattened = prompt_flattened[:50]
-
- img_id = str(uuid.uuid4())[-8:]
-
- file_path = f"{prompt_flattened}_{img_id}"
- img_out_path = os.path.join(session_out_path, f"{file_path}.{opt_format}")
- meta_out_path = os.path.join(session_out_path, f"{file_path}.txt")
-
- if return_orig_img:
- save_image(img, img_out_path)
-
- save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_strength, opt_use_face_correction, opt_use_upscale, opt_sampler_name, req.negative_prompt, ckpt_file)
-
- if return_orig_img:
- img_data = img_to_base64_str(img, opt_format)
- res_image_orig = ResponseImage(data=img_data, seed=opt_seed)
- res.images.append(res_image_orig)
-
- if opt_save_to_disk_path is not None:
- res_image_orig.path_abs = img_out_path
-
- del img
-
- if has_filters and not stop_processing:
- print('Applying filters..')
-
- gc()
- filters_applied = []
-
- if opt_use_face_correction:
- _, _, output = model_gfpgan.enhance(x_sample[:,:,::-1], has_aligned=False, only_center_face=False, paste_back=True)
- x_sample = output[:,:,::-1]
- filters_applied.append(opt_use_face_correction)
-
- if opt_use_upscale:
- output, _ = model_real_esrgan.enhance(x_sample[:,:,::-1])
- x_sample = output[:,:,::-1]
- filters_applied.append(opt_use_upscale)
-
- filtered_image = Image.fromarray(x_sample)
-
- filtered_img_data = img_to_base64_str(filtered_image, opt_format)
- res_image_filtered = ResponseImage(data=filtered_img_data, seed=opt_seed)
- res.images.append(res_image_filtered)
-
- filters_applied = "_".join(filters_applied)
-
- if opt_save_to_disk_path is not None:
- filtered_img_out_path = os.path.join(session_out_path, f"{file_path}_{filters_applied}.{opt_format}")
- save_image(filtered_image, filtered_img_out_path)
- res_image_filtered.path_abs = filtered_img_out_path
-
- del filtered_image
-
- seeds += str(opt_seed) + ","
- opt_seed += 1
-
- move_fs_to_cpu()
- gc()
- del x_samples, x_samples_ddim, x_sample
- print("memory_final = ", torch.cuda.memory_allocated() / 1e6)
-
- print('Task completed')
-
- yield json.dumps(res.json())
-
-def save_image(img, img_out_path):
- try:
- img.save(img_out_path)
- except:
- print('could not save the file', traceback.format_exc())
-
-def save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_prompt_strength, opt_correct_face, opt_upscale, sampler_name, negative_prompt, ckpt_file):
- metadata = f"{prompts[0]}\nWidth: {opt_W}\nHeight: {opt_H}\nSeed: {opt_seed}\nSteps: {opt_ddim_steps}\nGuidance Scale: {opt_scale}\nPrompt Strength: {opt_prompt_strength}\nUse Face Correction: {opt_correct_face}\nUse Upscaling: {opt_upscale}\nSampler: {sampler_name}\nNegative Prompt: {negative_prompt}\nStable Diffusion Model: {ckpt_file + '.ckpt'}"
-
- try:
- with open(meta_out_path, 'w') as f:
- f.write(metadata)
- except:
- print('could not save the file', traceback.format_exc())
-
-def _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, start_code, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback, mask, sampler_name):
- shape = [opt_n_samples, opt_C, opt_H // opt_f, opt_W // opt_f]
-
- if device != "cpu":
- mem = torch.cuda.memory_allocated() / 1e6
- modelCS.to("cpu")
- while torch.cuda.memory_allocated() / 1e6 >= mem:
- time.sleep(1)
-
- if sampler_name == 'ddim':
- model.make_schedule(ddim_num_steps=opt_ddim_steps, ddim_eta=opt_ddim_eta, verbose=False)
-
- samples_ddim = model.sample(
- S=opt_ddim_steps,
- conditioning=c,
- seed=opt_seed,
- shape=shape,
- verbose=False,
- unconditional_guidance_scale=opt_scale,
- unconditional_conditioning=uc,
- eta=opt_ddim_eta,
- x_T=start_code,
- img_callback=img_callback,
- mask=mask,
- sampler = sampler_name,
- )
-
- yield from samples_ddim
-
-def _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed, img_callback, mask):
- # encode (scaled latent)
- z_enc = model.stochastic_encode(
- init_latent,
- torch.tensor([t_enc] * batch_size).to(device),
- opt_seed,
- opt_ddim_eta,
- opt_ddim_steps,
- )
- x_T = None if mask is None else init_latent
-
- # decode it
- samples_ddim = model.sample(
- t_enc,
- c,
- z_enc,
- unconditional_guidance_scale=opt_scale,
- unconditional_conditioning=uc,
- img_callback=img_callback,
- mask=mask,
- x_T=x_T,
- sampler = 'ddim'
- )
-
- yield from samples_ddim
-
-def move_fs_to_cpu():
- if device != "cpu":
- mem = torch.cuda.memory_allocated() / 1e6
- modelFS.to("cpu")
- while torch.cuda.memory_allocated() / 1e6 >= mem:
- time.sleep(1)
-
-def gc():
- if device == 'cpu':
- return
-
- torch.cuda.empty_cache()
- torch.cuda.ipc_collect()
-
-# internal
-
-def chunk(it, size):
- it = iter(it)
- return iter(lambda: tuple(islice(it, size)), ())
-
-
-def load_model_from_config(ckpt, verbose=False):
- print(f"Loading model from {ckpt}")
- pl_sd = torch.load(ckpt, map_location="cpu")
- if "global_step" in pl_sd:
- print(f"Global Step: {pl_sd['global_step']}")
- sd = pl_sd["state_dict"]
- return sd
-
-# utils
-class UserInitiatedStop(Exception):
- pass
-
-def load_img(img_str, w0, h0):
- image = base64_str_to_img(img_str).convert("RGB")
- w, h = image.size
- print(f"loaded input image of size ({w}, {h}) from base64")
- if h0 is not None and w0 is not None:
- h, w = h0, w0
-
- w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64
- image = image.resize((w, h), resample=Image.Resampling.LANCZOS)
- image = np.array(image).astype(np.float32) / 255.0
- image = image[None].transpose(0, 3, 1, 2)
- image = torch.from_numpy(image)
- return 2.*image - 1.
-
-def load_mask(mask_str, h0, w0, newH, newW, invert=False):
- image = base64_str_to_img(mask_str).convert("RGB")
- w, h = image.size
- print(f"loaded input mask of size ({w}, {h})")
-
- if invert:
- print("inverted")
- image = ImageOps.invert(image)
- # where_0, where_1 = np.where(image == 0), np.where(image == 255)
- # image[where_0], image[where_1] = 255, 0
-
- if h0 is not None and w0 is not None:
- h, w = h0, w0
-
- w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64
-
- print(f"New mask size ({w}, {h})")
- image = image.resize((newW, newH), resample=Image.Resampling.LANCZOS)
- image = np.array(image)
-
- image = image.astype(np.float32) / 255.0
- image = image[None].transpose(0, 3, 1, 2)
- image = torch.from_numpy(image)
- return image
-
-# https://stackoverflow.com/a/61114178
-def img_to_base64_str(img, output_format="PNG"):
- buffered = BytesIO()
- img.save(buffered, format=output_format)
- buffered.seek(0)
- img_byte = buffered.getvalue()
- img_str = "data:image/png;base64," + base64.b64encode(img_byte).decode()
- return img_str
-
-def base64_str_to_img(img_str):
- img_str = img_str[len("data:image/png;base64,"):]
- data = base64.b64decode(img_str)
- buffered = BytesIO(data)
- img = Image.open(buffered)
- return img
diff --git a/spaces/jonas/sdg-policy-tracing/analyse_site.py b/spaces/jonas/sdg-policy-tracing/analyse_site.py
deleted file mode 100644
index c6d702c678e16023d9b0d6613f5dbef2ce6b7f93..0000000000000000000000000000000000000000
--- a/spaces/jonas/sdg-policy-tracing/analyse_site.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import streamlit as st
-
-import glob, os, sys; sys.path.append('/src')
-#import helper
-from src import preprocessing as pre
-from src import cleaning as clean
-
-def app():
- # Sidebar
- st.sidebar.title('Analyse Policy Document')
-
- # Container
- with st.container():
- st.markdown("SDSN X GIZ Policy Tracing
",
- unsafe_allow_html=True)
-
- file = st.file_uploader('Upload PDF File', type=['pdf', 'docx', 'txt'])
-
- if file is not None:
- st.write("Filename: ", file.name)
- # text = []
- # with pdfplumber.open(file) as pdf:
- # for page in pdf.pages:
- # text.append(page.extract_text())
- # text_str = ' '.join([page for page in text])
-
- # st.write('Number of pages:',len(pdf.pages))
-
- # load document
- docs = pre.load_document(file)
-
- # preprocess document
- docs_processed, df, all_text = clean.preprocessing(docs)
-
-
-
- st.write('... ')
-
- else:
- st.write(' ')
- st.write(' ')
- st.markdown("no PDF uploaded ...
",
- unsafe_allow_html=True)
diff --git a/spaces/jone/GFPGAN/setup.py b/spaces/jone/GFPGAN/setup.py
deleted file mode 100644
index 474e9188aa2dc5c19614921760ce4ad99bd19c13..0000000000000000000000000000000000000000
--- a/spaces/jone/GFPGAN/setup.py
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/env python
-
-from setuptools import find_packages, setup
-
-import os
-import subprocess
-import time
-
-version_file = 'gfpgan/version.py'
-
-
-def readme():
- with open('README.md', encoding='utf-8') as f:
- content = f.read()
- return content
-
-
-def get_git_hash():
-
- def _minimal_ext_cmd(cmd):
- # construct minimal environment
- env = {}
- for k in ['SYSTEMROOT', 'PATH', 'HOME']:
- v = os.environ.get(k)
- if v is not None:
- env[k] = v
- # LANGUAGE is used on win32
- env['LANGUAGE'] = 'C'
- env['LANG'] = 'C'
- env['LC_ALL'] = 'C'
- out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
- return out
-
- try:
- out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
- sha = out.strip().decode('ascii')
- except OSError:
- sha = 'unknown'
-
- return sha
-
-
-def get_hash():
- if os.path.exists('.git'):
- sha = get_git_hash()[:7]
- else:
- sha = 'unknown'
-
- return sha
-
-
-def write_version_py():
- content = """# GENERATED VERSION FILE
-# TIME: {}
-__version__ = '{}'
-__gitsha__ = '{}'
-version_info = ({})
-"""
- sha = get_hash()
- with open('VERSION', 'r') as f:
- SHORT_VERSION = f.read().strip()
- VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')])
-
- version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO)
- with open(version_file, 'w') as f:
- f.write(version_file_str)
-
-
-def get_version():
- with open(version_file, 'r') as f:
- exec(compile(f.read(), version_file, 'exec'))
- return locals()['__version__']
-
-
-def get_requirements(filename='requirements.txt'):
- here = os.path.dirname(os.path.realpath(__file__))
- with open(os.path.join(here, filename), 'r') as f:
- requires = [line.replace('\n', '') for line in f.readlines()]
- return requires
-
-
-if __name__ == '__main__':
- write_version_py()
- setup(
- name='gfpgan',
- version=get_version(),
- description='GFPGAN aims at developing Practical Algorithms for Real-world Face Restoration',
- long_description=readme(),
- long_description_content_type='text/markdown',
- author='Xintao Wang',
- author_email='xintao.wang@outlook.com',
- keywords='computer vision, pytorch, image restoration, super-resolution, face restoration, gan, gfpgan',
- url='https://github.com/TencentARC/GFPGAN',
- include_package_data=True,
- packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')),
- classifiers=[
- 'Development Status :: 4 - Beta',
- 'License :: OSI Approved :: Apache Software License',
- 'Operating System :: OS Independent',
- 'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.7',
- 'Programming Language :: Python :: 3.8',
- ],
- license='Apache License Version 2.0',
- setup_requires=['cython', 'numpy'],
- install_requires=get_requirements(),
- zip_safe=False)
diff --git a/spaces/joshen/gpt-academic/crazy_functions/test_project/cpp/libJPG/jpgd.cpp b/spaces/joshen/gpt-academic/crazy_functions/test_project/cpp/libJPG/jpgd.cpp
deleted file mode 100644
index 36d06c8e9068570c3e7624895d474f33dbfe3d29..0000000000000000000000000000000000000000
--- a/spaces/joshen/gpt-academic/crazy_functions/test_project/cpp/libJPG/jpgd.cpp
+++ /dev/null
@@ -1,3276 +0,0 @@
-// jpgd.cpp - C++ class for JPEG decompression.
-// Public domain, Rich Geldreich
-// Last updated Apr. 16, 2011
-// Alex Evans: Linear memory allocator (taken from jpge.h).
-//
-// Supports progressive and baseline sequential JPEG image files, and the most common chroma subsampling factors: Y, H1V1, H2V1, H1V2, and H2V2.
-//
-// Chroma upsampling quality: H2V2 is upsampled in the frequency domain, H2V1 and H1V2 are upsampled using point sampling.
-// Chroma upsampling reference: "Fast Scheme for Image Size Change in the Compressed Domain"
-// http://vision.ai.uiuc.edu/~dugad/research/dct/index.html
-
-#include "jpgd.h"
-#include
-
-#include
-// BEGIN EPIC MOD
-#define JPGD_ASSERT(x) { assert(x); CA_ASSUME(x); } (void)0
-// END EPIC MOD
-
-#ifdef _MSC_VER
-#pragma warning (disable : 4611) // warning C4611: interaction between '_setjmp' and C++ object destruction is non-portable
-#endif
-
-// Set to 1 to enable freq. domain chroma upsampling on images using H2V2 subsampling (0=faster nearest neighbor sampling).
-// This is slower, but results in higher quality on images with highly saturated colors.
-#define JPGD_SUPPORT_FREQ_DOMAIN_UPSAMPLING 1
-
-#define JPGD_TRUE (1)
-#define JPGD_FALSE (0)
-
-#define JPGD_MAX(a,b) (((a)>(b)) ? (a) : (b))
-#define JPGD_MIN(a,b) (((a)<(b)) ? (a) : (b))
-
-namespace jpgd {
-
- static inline void *jpgd_malloc(size_t nSize) { return FMemory::Malloc(nSize); }
- static inline void jpgd_free(void *p) { FMemory::Free(p); }
-
-// BEGIN EPIC MOD
-//@UE3 - use UE3 BGRA encoding instead of assuming RGBA
- // stolen from IImageWrapper.h
- enum ERGBFormatJPG
- {
- Invalid = -1,
- RGBA = 0,
- BGRA = 1,
- Gray = 2,
- };
- static ERGBFormatJPG jpg_format;
-// END EPIC MOD
-
- // DCT coefficients are stored in this sequence.
- static int g_ZAG[64] = { 0,1,8,16,9,2,3,10,17,24,32,25,18,11,4,5,12,19,26,33,40,48,41,34,27,20,13,6,7,14,21,28,35,42,49,56,57,50,43,36,29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,47,55,62,63 };
-
- enum JPEG_MARKER
- {
- M_SOF0 = 0xC0, M_SOF1 = 0xC1, M_SOF2 = 0xC2, M_SOF3 = 0xC3, M_SOF5 = 0xC5, M_SOF6 = 0xC6, M_SOF7 = 0xC7, M_JPG = 0xC8,
- M_SOF9 = 0xC9, M_SOF10 = 0xCA, M_SOF11 = 0xCB, M_SOF13 = 0xCD, M_SOF14 = 0xCE, M_SOF15 = 0xCF, M_DHT = 0xC4, M_DAC = 0xCC,
- M_RST0 = 0xD0, M_RST1 = 0xD1, M_RST2 = 0xD2, M_RST3 = 0xD3, M_RST4 = 0xD4, M_RST5 = 0xD5, M_RST6 = 0xD6, M_RST7 = 0xD7,
- M_SOI = 0xD8, M_EOI = 0xD9, M_SOS = 0xDA, M_DQT = 0xDB, M_DNL = 0xDC, M_DRI = 0xDD, M_DHP = 0xDE, M_EXP = 0xDF,
- M_APP0 = 0xE0, M_APP15 = 0xEF, M_JPG0 = 0xF0, M_JPG13 = 0xFD, M_COM = 0xFE, M_TEM = 0x01, M_ERROR = 0x100, RST0 = 0xD0
- };
-
- enum JPEG_SUBSAMPLING { JPGD_GRAYSCALE = 0, JPGD_YH1V1, JPGD_YH2V1, JPGD_YH1V2, JPGD_YH2V2 };
-
-#define CONST_BITS 13
-#define PASS1_BITS 2
-#define SCALEDONE ((int32)1)
-
-#define FIX_0_298631336 ((int32)2446) /* FIX(0.298631336) */
-#define FIX_0_390180644 ((int32)3196) /* FIX(0.390180644) */
-#define FIX_0_541196100 ((int32)4433) /* FIX(0.541196100) */
-#define FIX_0_765366865 ((int32)6270) /* FIX(0.765366865) */
-#define FIX_0_899976223 ((int32)7373) /* FIX(0.899976223) */
-#define FIX_1_175875602 ((int32)9633) /* FIX(1.175875602) */
-#define FIX_1_501321110 ((int32)12299) /* FIX(1.501321110) */
-#define FIX_1_847759065 ((int32)15137) /* FIX(1.847759065) */
-#define FIX_1_961570560 ((int32)16069) /* FIX(1.961570560) */
-#define FIX_2_053119869 ((int32)16819) /* FIX(2.053119869) */
-#define FIX_2_562915447 ((int32)20995) /* FIX(2.562915447) */
-#define FIX_3_072711026 ((int32)25172) /* FIX(3.072711026) */
-
-#define DESCALE(x,n) (((x) + (SCALEDONE << ((n)-1))) >> (n))
-#define DESCALE_ZEROSHIFT(x,n) (((x) + (128 << (n)) + (SCALEDONE << ((n)-1))) >> (n))
-
-#define MULTIPLY(var, cnst) ((var) * (cnst))
-
-#define CLAMP(i) ((static_cast(i) > 255) ? (((~i) >> 31) & 0xFF) : (i))
-
- // Compiler creates a fast path 1D IDCT for X non-zero columns
- template
- struct Row
- {
- static void idct(int* pTemp, const jpgd_block_t* pSrc)
- {
- // ACCESS_COL() will be optimized at compile time to either an array access, or 0.
-#define ACCESS_COL(x) (((x) < NONZERO_COLS) ? (int)pSrc[x] : 0)
-
- const int z2 = ACCESS_COL(2), z3 = ACCESS_COL(6);
-
- const int z1 = MULTIPLY(z2 + z3, FIX_0_541196100);
- const int tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065);
- const int tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865);
-
- const int tmp0 = (ACCESS_COL(0) + ACCESS_COL(4)) << CONST_BITS;
- const int tmp1 = (ACCESS_COL(0) - ACCESS_COL(4)) << CONST_BITS;
-
- const int tmp10 = tmp0 + tmp3, tmp13 = tmp0 - tmp3, tmp11 = tmp1 + tmp2, tmp12 = tmp1 - tmp2;
-
- const int atmp0 = ACCESS_COL(7), atmp1 = ACCESS_COL(5), atmp2 = ACCESS_COL(3), atmp3 = ACCESS_COL(1);
-
- const int bz1 = atmp0 + atmp3, bz2 = atmp1 + atmp2, bz3 = atmp0 + atmp2, bz4 = atmp1 + atmp3;
- const int bz5 = MULTIPLY(bz3 + bz4, FIX_1_175875602);
-
- const int az1 = MULTIPLY(bz1, - FIX_0_899976223);
- const int az2 = MULTIPLY(bz2, - FIX_2_562915447);
- const int az3 = MULTIPLY(bz3, - FIX_1_961570560) + bz5;
- const int az4 = MULTIPLY(bz4, - FIX_0_390180644) + bz5;
-
- const int btmp0 = MULTIPLY(atmp0, FIX_0_298631336) + az1 + az3;
- const int btmp1 = MULTIPLY(atmp1, FIX_2_053119869) + az2 + az4;
- const int btmp2 = MULTIPLY(atmp2, FIX_3_072711026) + az2 + az3;
- const int btmp3 = MULTIPLY(atmp3, FIX_1_501321110) + az1 + az4;
-
- pTemp[0] = DESCALE(tmp10 + btmp3, CONST_BITS-PASS1_BITS);
- pTemp[7] = DESCALE(tmp10 - btmp3, CONST_BITS-PASS1_BITS);
- pTemp[1] = DESCALE(tmp11 + btmp2, CONST_BITS-PASS1_BITS);
- pTemp[6] = DESCALE(tmp11 - btmp2, CONST_BITS-PASS1_BITS);
- pTemp[2] = DESCALE(tmp12 + btmp1, CONST_BITS-PASS1_BITS);
- pTemp[5] = DESCALE(tmp12 - btmp1, CONST_BITS-PASS1_BITS);
- pTemp[3] = DESCALE(tmp13 + btmp0, CONST_BITS-PASS1_BITS);
- pTemp[4] = DESCALE(tmp13 - btmp0, CONST_BITS-PASS1_BITS);
- }
- };
-
- template <>
- struct Row<0>
- {
- static void idct(int* pTemp, const jpgd_block_t* pSrc)
- {
-#ifdef _MSC_VER
- pTemp; pSrc;
-#endif
- }
- };
-
- template <>
- struct Row<1>
- {
- static void idct(int* pTemp, const jpgd_block_t* pSrc)
- {
- const int dcval = (pSrc[0] << PASS1_BITS);
-
- pTemp[0] = dcval;
- pTemp[1] = dcval;
- pTemp[2] = dcval;
- pTemp[3] = dcval;
- pTemp[4] = dcval;
- pTemp[5] = dcval;
- pTemp[6] = dcval;
- pTemp[7] = dcval;
- }
- };
-
- // Compiler creates a fast path 1D IDCT for X non-zero rows
- template
- struct Col
- {
- static void idct(uint8* pDst_ptr, const int* pTemp)
- {
- // ACCESS_ROW() will be optimized at compile time to either an array access, or 0.
-#define ACCESS_ROW(x) (((x) < NONZERO_ROWS) ? pTemp[x * 8] : 0)
-
- const int z2 = ACCESS_ROW(2);
- const int z3 = ACCESS_ROW(6);
-
- const int z1 = MULTIPLY(z2 + z3, FIX_0_541196100);
- const int tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065);
- const int tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865);
-
- const int tmp0 = (ACCESS_ROW(0) + ACCESS_ROW(4)) << CONST_BITS;
- const int tmp1 = (ACCESS_ROW(0) - ACCESS_ROW(4)) << CONST_BITS;
-
- const int tmp10 = tmp0 + tmp3, tmp13 = tmp0 - tmp3, tmp11 = tmp1 + tmp2, tmp12 = tmp1 - tmp2;
-
- const int atmp0 = ACCESS_ROW(7), atmp1 = ACCESS_ROW(5), atmp2 = ACCESS_ROW(3), atmp3 = ACCESS_ROW(1);
-
- const int bz1 = atmp0 + atmp3, bz2 = atmp1 + atmp2, bz3 = atmp0 + atmp2, bz4 = atmp1 + atmp3;
- const int bz5 = MULTIPLY(bz3 + bz4, FIX_1_175875602);
-
- const int az1 = MULTIPLY(bz1, - FIX_0_899976223);
- const int az2 = MULTIPLY(bz2, - FIX_2_562915447);
- const int az3 = MULTIPLY(bz3, - FIX_1_961570560) + bz5;
- const int az4 = MULTIPLY(bz4, - FIX_0_390180644) + bz5;
-
- const int btmp0 = MULTIPLY(atmp0, FIX_0_298631336) + az1 + az3;
- const int btmp1 = MULTIPLY(atmp1, FIX_2_053119869) + az2 + az4;
- const int btmp2 = MULTIPLY(atmp2, FIX_3_072711026) + az2 + az3;
- const int btmp3 = MULTIPLY(atmp3, FIX_1_501321110) + az1 + az4;
-
- int i = DESCALE_ZEROSHIFT(tmp10 + btmp3, CONST_BITS+PASS1_BITS+3);
- pDst_ptr[8*0] = (uint8)CLAMP(i);
-
- i = DESCALE_ZEROSHIFT(tmp10 - btmp3, CONST_BITS+PASS1_BITS+3);
- pDst_ptr[8*7] = (uint8)CLAMP(i);
-
- i = DESCALE_ZEROSHIFT(tmp11 + btmp2, CONST_BITS+PASS1_BITS+3);
- pDst_ptr[8*1] = (uint8)CLAMP(i);
-
- i = DESCALE_ZEROSHIFT(tmp11 - btmp2, CONST_BITS+PASS1_BITS+3);
- pDst_ptr[8*6] = (uint8)CLAMP(i);
-
- i = DESCALE_ZEROSHIFT(tmp12 + btmp1, CONST_BITS+PASS1_BITS+3);
- pDst_ptr[8*2] = (uint8)CLAMP(i);
-
- i = DESCALE_ZEROSHIFT(tmp12 - btmp1, CONST_BITS+PASS1_BITS+3);
- pDst_ptr[8*5] = (uint8)CLAMP(i);
-
- i = DESCALE_ZEROSHIFT(tmp13 + btmp0, CONST_BITS+PASS1_BITS+3);
- pDst_ptr[8*3] = (uint8)CLAMP(i);
-
- i = DESCALE_ZEROSHIFT(tmp13 - btmp0, CONST_BITS+PASS1_BITS+3);
- pDst_ptr[8*4] = (uint8)CLAMP(i);
- }
- };
-
- template <>
- struct Col<1>
- {
- static void idct(uint8* pDst_ptr, const int* pTemp)
- {
- int dcval = DESCALE_ZEROSHIFT(pTemp[0], PASS1_BITS+3);
- const uint8 dcval_clamped = (uint8)CLAMP(dcval);
- pDst_ptr[0*8] = dcval_clamped;
- pDst_ptr[1*8] = dcval_clamped;
- pDst_ptr[2*8] = dcval_clamped;
- pDst_ptr[3*8] = dcval_clamped;
- pDst_ptr[4*8] = dcval_clamped;
- pDst_ptr[5*8] = dcval_clamped;
- pDst_ptr[6*8] = dcval_clamped;
- pDst_ptr[7*8] = dcval_clamped;
- }
- };
-
- static const uint8 s_idct_row_table[] =
- {
- 1,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0, 2,1,0,0,0,0,0,0, 2,1,1,0,0,0,0,0, 2,2,1,0,0,0,0,0, 3,2,1,0,0,0,0,0, 4,2,1,0,0,0,0,0, 4,3,1,0,0,0,0,0,
- 4,3,2,0,0,0,0,0, 4,3,2,1,0,0,0,0, 4,3,2,1,1,0,0,0, 4,3,2,2,1,0,0,0, 4,3,3,2,1,0,0,0, 4,4,3,2,1,0,0,0, 5,4,3,2,1,0,0,0, 6,4,3,2,1,0,0,0,
- 6,5,3,2,1,0,0,0, 6,5,4,2,1,0,0,0, 6,5,4,3,1,0,0,0, 6,5,4,3,2,0,0,0, 6,5,4,3,2,1,0,0, 6,5,4,3,2,1,1,0, 6,5,4,3,2,2,1,0, 6,5,4,3,3,2,1,0,
- 6,5,4,4,3,2,1,0, 6,5,5,4,3,2,1,0, 6,6,5,4,3,2,1,0, 7,6,5,4,3,2,1,0, 8,6,5,4,3,2,1,0, 8,7,5,4,3,2,1,0, 8,7,6,4,3,2,1,0, 8,7,6,5,3,2,1,0,
- 8,7,6,5,4,2,1,0, 8,7,6,5,4,3,1,0, 8,7,6,5,4,3,2,0, 8,7,6,5,4,3,2,1, 8,7,6,5,4,3,2,2, 8,7,6,5,4,3,3,2, 8,7,6,5,4,4,3,2, 8,7,6,5,5,4,3,2,
- 8,7,6,6,5,4,3,2, 8,7,7,6,5,4,3,2, 8,8,7,6,5,4,3,2, 8,8,8,6,5,4,3,2, 8,8,8,7,5,4,3,2, 8,8,8,7,6,4,3,2, 8,8,8,7,6,5,3,2, 8,8,8,7,6,5,4,2,
- 8,8,8,7,6,5,4,3, 8,8,8,7,6,5,4,4, 8,8,8,7,6,5,5,4, 8,8,8,7,6,6,5,4, 8,8,8,7,7,6,5,4, 8,8,8,8,7,6,5,4, 8,8,8,8,8,6,5,4, 8,8,8,8,8,7,5,4,
- 8,8,8,8,8,7,6,4, 8,8,8,8,8,7,6,5, 8,8,8,8,8,7,6,6, 8,8,8,8,8,7,7,6, 8,8,8,8,8,8,7,6, 8,8,8,8,8,8,8,6, 8,8,8,8,8,8,8,7, 8,8,8,8,8,8,8,8,
- };
-
- static const uint8 s_idct_col_table[] = { 1, 1, 2, 3, 3, 3, 3, 3, 3, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 };
-
- void idct(const jpgd_block_t* pSrc_ptr, uint8* pDst_ptr, int block_max_zag)
- {
- JPGD_ASSERT(block_max_zag >= 1);
- JPGD_ASSERT(block_max_zag <= 64);
-
- if (block_max_zag == 1)
- {
- int k = ((pSrc_ptr[0] + 4) >> 3) + 128;
- k = CLAMP(k);
- k = k | (k<<8);
- k = k | (k<<16);
-
- for (int i = 8; i > 0; i--)
- {
- *(int*)&pDst_ptr[0] = k;
- *(int*)&pDst_ptr[4] = k;
- pDst_ptr += 8;
- }
- return;
- }
-
- int temp[64];
-
- const jpgd_block_t* pSrc = pSrc_ptr;
- int* pTemp = temp;
-
- const uint8* pRow_tab = &s_idct_row_table[(block_max_zag - 1) * 8];
- int i;
- for (i = 8; i > 0; i--, pRow_tab++)
- {
- switch (*pRow_tab)
- {
- case 0: Row<0>::idct(pTemp, pSrc); break;
- case 1: Row<1>::idct(pTemp, pSrc); break;
- case 2: Row<2>::idct(pTemp, pSrc); break;
- case 3: Row<3>::idct(pTemp, pSrc); break;
- case 4: Row<4>::idct(pTemp, pSrc); break;
- case 5: Row<5>::idct(pTemp, pSrc); break;
- case 6: Row<6>::idct(pTemp, pSrc); break;
- case 7: Row<7>::idct(pTemp, pSrc); break;
- case 8: Row<8>::idct(pTemp, pSrc); break;
- }
-
- pSrc += 8;
- pTemp += 8;
- }
-
- pTemp = temp;
-
- const int nonzero_rows = s_idct_col_table[block_max_zag - 1];
- for (i = 8; i > 0; i--)
- {
- switch (nonzero_rows)
- {
- case 1: Col<1>::idct(pDst_ptr, pTemp); break;
- case 2: Col<2>::idct(pDst_ptr, pTemp); break;
- case 3: Col<3>::idct(pDst_ptr, pTemp); break;
- case 4: Col<4>::idct(pDst_ptr, pTemp); break;
- case 5: Col<5>::idct(pDst_ptr, pTemp); break;
- case 6: Col<6>::idct(pDst_ptr, pTemp); break;
- case 7: Col<7>::idct(pDst_ptr, pTemp); break;
- case 8: Col<8>::idct(pDst_ptr, pTemp); break;
- }
-
- pTemp++;
- pDst_ptr++;
- }
- }
-
- void idct_4x4(const jpgd_block_t* pSrc_ptr, uint8* pDst_ptr)
- {
- int temp[64];
- int* pTemp = temp;
- const jpgd_block_t* pSrc = pSrc_ptr;
-
- for (int i = 4; i > 0; i--)
- {
- Row<4>::idct(pTemp, pSrc);
- pSrc += 8;
- pTemp += 8;
- }
-
- pTemp = temp;
- for (int i = 8; i > 0; i--)
- {
- Col<4>::idct(pDst_ptr, pTemp);
- pTemp++;
- pDst_ptr++;
- }
- }
-
- // Retrieve one character from the input stream.
- inline uint jpeg_decoder::get_char()
- {
- // Any bytes remaining in buffer?
- if (!m_in_buf_left)
- {
- // Try to get more bytes.
- prep_in_buffer();
- // Still nothing to get?
- if (!m_in_buf_left)
- {
- // Pad the end of the stream with 0xFF 0xD9 (EOI marker)
- int t = m_tem_flag;
- m_tem_flag ^= 1;
- if (t)
- return 0xD9;
- else
- return 0xFF;
- }
- }
-
- uint c = *m_pIn_buf_ofs++;
- m_in_buf_left--;
-
- return c;
- }
-
- // Same as previous method, except can indicate if the character is a pad character or not.
- inline uint jpeg_decoder::get_char(bool *pPadding_flag)
- {
- if (!m_in_buf_left)
- {
- prep_in_buffer();
- if (!m_in_buf_left)
- {
- *pPadding_flag = true;
- int t = m_tem_flag;
- m_tem_flag ^= 1;
- if (t)
- return 0xD9;
- else
- return 0xFF;
- }
- }
-
- *pPadding_flag = false;
-
- uint c = *m_pIn_buf_ofs++;
- m_in_buf_left--;
-
- return c;
- }
-
- // Inserts a previously retrieved character back into the input buffer.
- inline void jpeg_decoder::stuff_char(uint8 q)
- {
- *(--m_pIn_buf_ofs) = q;
- m_in_buf_left++;
- }
-
- // Retrieves one character from the input stream, but does not read past markers. Will continue to return 0xFF when a marker is encountered.
- inline uint8 jpeg_decoder::get_octet()
- {
- bool padding_flag;
- int c = get_char(&padding_flag);
-
- if (c == 0xFF)
- {
- if (padding_flag)
- return 0xFF;
-
- c = get_char(&padding_flag);
- if (padding_flag)
- {
- stuff_char(0xFF);
- return 0xFF;
- }
-
- if (c == 0x00)
- return 0xFF;
- else
- {
- stuff_char(static_cast(c));
- stuff_char(0xFF);
- return 0xFF;
- }
- }
-
- return static_cast(c);
- }
-
- // Retrieves a variable number of bits from the input stream. Does not recognize markers.
- inline uint jpeg_decoder::get_bits(int num_bits)
- {
- if (!num_bits)
- return 0;
-
- uint i = m_bit_buf >> (32 - num_bits);
-
- if ((m_bits_left -= num_bits) <= 0)
- {
- m_bit_buf <<= (num_bits += m_bits_left);
-
- uint c1 = get_char();
- uint c2 = get_char();
- m_bit_buf = (m_bit_buf & 0xFFFF0000) | (c1 << 8) | c2;
-
- m_bit_buf <<= -m_bits_left;
-
- m_bits_left += 16;
-
- JPGD_ASSERT(m_bits_left >= 0);
- }
- else
- m_bit_buf <<= num_bits;
-
- return i;
- }
-
- // Retrieves a variable number of bits from the input stream. Markers will not be read into the input bit buffer. Instead, an infinite number of all 1's will be returned when a marker is encountered.
- inline uint jpeg_decoder::get_bits_no_markers(int num_bits)
- {
- if (!num_bits)
- return 0;
-
- uint i = m_bit_buf >> (32 - num_bits);
-
- if ((m_bits_left -= num_bits) <= 0)
- {
- m_bit_buf <<= (num_bits += m_bits_left);
-
- if ((m_in_buf_left < 2) || (m_pIn_buf_ofs[0] == 0xFF) || (m_pIn_buf_ofs[1] == 0xFF))
- {
- uint c1 = get_octet();
- uint c2 = get_octet();
- m_bit_buf |= (c1 << 8) | c2;
- }
- else
- {
- m_bit_buf |= ((uint)m_pIn_buf_ofs[0] << 8) | m_pIn_buf_ofs[1];
- m_in_buf_left -= 2;
- m_pIn_buf_ofs += 2;
- }
-
- m_bit_buf <<= -m_bits_left;
-
- m_bits_left += 16;
-
- JPGD_ASSERT(m_bits_left >= 0);
- }
- else
- m_bit_buf <<= num_bits;
-
- return i;
- }
-
- // Decodes a Huffman encoded symbol.
- inline int jpeg_decoder::huff_decode(huff_tables *pH)
- {
- int symbol;
-
- // Check first 8-bits: do we have a complete symbol?
- if ((symbol = pH->look_up[m_bit_buf >> 24]) < 0)
- {
- // Decode more bits, use a tree traversal to find symbol.
- int ofs = 23;
- do
- {
- symbol = pH->tree[-(int)(symbol + ((m_bit_buf >> ofs) & 1))];
- ofs--;
- } while (symbol < 0);
-
- get_bits_no_markers(8 + (23 - ofs));
- }
- else
- get_bits_no_markers(pH->code_size[symbol]);
-
- return symbol;
- }
-
- // Decodes a Huffman encoded symbol.
- inline int jpeg_decoder::huff_decode(huff_tables *pH, int& extra_bits)
- {
- int symbol;
-
- // Check first 8-bits: do we have a complete symbol?
- if ((symbol = pH->look_up2[m_bit_buf >> 24]) < 0)
- {
- // Use a tree traversal to find symbol.
- int ofs = 23;
- do
- {
- symbol = pH->tree[-(int)(symbol + ((m_bit_buf >> ofs) & 1))];
- ofs--;
- } while (symbol < 0);
-
- get_bits_no_markers(8 + (23 - ofs));
-
- extra_bits = get_bits_no_markers(symbol & 0xF);
- }
- else
- {
- JPGD_ASSERT(((symbol >> 8) & 31) == pH->code_size[symbol & 255] + ((symbol & 0x8000) ? (symbol & 15) : 0));
-
- if (symbol & 0x8000)
- {
- get_bits_no_markers((symbol >> 8) & 31);
- extra_bits = symbol >> 16;
- }
- else
- {
- int code_size = (symbol >> 8) & 31;
- int num_extra_bits = symbol & 0xF;
- int bits = code_size + num_extra_bits;
- if (bits <= (m_bits_left + 16))
- extra_bits = get_bits_no_markers(bits) & ((1 << num_extra_bits) - 1);
- else
- {
- get_bits_no_markers(code_size);
- extra_bits = get_bits_no_markers(num_extra_bits);
- }
- }
-
- symbol &= 0xFF;
- }
-
- return symbol;
- }
-
- // Tables and macro used to fully decode the DPCM differences.
- static const int s_extend_test[16] = { 0, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000 };
- static const int s_extend_offset[16] = { 0, -1, -3, -7, -15, -31, -63, -127, -255, -511, -1023, -2047, -4095, -8191, -16383, -32767 };
- static const int s_extend_mask[] = { 0, (1<<0), (1<<1), (1<<2), (1<<3), (1<<4), (1<<5), (1<<6), (1<<7), (1<<8), (1<<9), (1<<10), (1<<11), (1<<12), (1<<13), (1<<14), (1<<15), (1<<16) };
-#define HUFF_EXTEND(x,s) ((x) < s_extend_test[s] ? (x) + s_extend_offset[s] : (x))
-
- // Clamps a value between 0-255.
- inline uint8 jpeg_decoder::clamp(int i)
- {
- if (static_cast(i) > 255)
- i = (((~i) >> 31) & 0xFF);
-
- return static_cast(i);
- }
-
- namespace DCT_Upsample
- {
- struct Matrix44
- {
- typedef int Element_Type;
- enum { NUM_ROWS = 4, NUM_COLS = 4 };
-
- Element_Type v[NUM_ROWS][NUM_COLS];
-
- inline int rows() const { return NUM_ROWS; }
- inline int cols() const { return NUM_COLS; }
-
- inline const Element_Type & at(int r, int c) const { return v[r][c]; }
- inline Element_Type & at(int r, int c) { return v[r][c]; }
-
- inline Matrix44() { }
-
- inline Matrix44& operator += (const Matrix44& a)
- {
- for (int r = 0; r < NUM_ROWS; r++)
- {
- at(r, 0) += a.at(r, 0);
- at(r, 1) += a.at(r, 1);
- at(r, 2) += a.at(r, 2);
- at(r, 3) += a.at(r, 3);
- }
- return *this;
- }
-
- inline Matrix44& operator -= (const Matrix44& a)
- {
- for (int r = 0; r < NUM_ROWS; r++)
- {
- at(r, 0) -= a.at(r, 0);
- at(r, 1) -= a.at(r, 1);
- at(r, 2) -= a.at(r, 2);
- at(r, 3) -= a.at(r, 3);
- }
- return *this;
- }
-
- friend inline Matrix44 operator + (const Matrix44& a, const Matrix44& b)
- {
- Matrix44 ret;
- for (int r = 0; r < NUM_ROWS; r++)
- {
- ret.at(r, 0) = a.at(r, 0) + b.at(r, 0);
- ret.at(r, 1) = a.at(r, 1) + b.at(r, 1);
- ret.at(r, 2) = a.at(r, 2) + b.at(r, 2);
- ret.at(r, 3) = a.at(r, 3) + b.at(r, 3);
- }
- return ret;
- }
-
- friend inline Matrix44 operator - (const Matrix44& a, const Matrix44& b)
- {
- Matrix44 ret;
- for (int r = 0; r < NUM_ROWS; r++)
- {
- ret.at(r, 0) = a.at(r, 0) - b.at(r, 0);
- ret.at(r, 1) = a.at(r, 1) - b.at(r, 1);
- ret.at(r, 2) = a.at(r, 2) - b.at(r, 2);
- ret.at(r, 3) = a.at(r, 3) - b.at(r, 3);
- }
- return ret;
- }
-
- static inline void add_and_store(jpgd_block_t* pDst, const Matrix44& a, const Matrix44& b)
- {
- for (int r = 0; r < 4; r++)
- {
- pDst[0*8 + r] = static_cast(a.at(r, 0) + b.at(r, 0));
- pDst[1*8 + r] = static_cast(a.at(r, 1) + b.at(r, 1));
- pDst[2*8 + r] = static_cast(a.at(r, 2) + b.at(r, 2));
- pDst[3*8 + r] = static_cast(a.at(r, 3) + b.at(r, 3));
- }
- }
-
- static inline void sub_and_store(jpgd_block_t* pDst, const Matrix44& a, const Matrix44& b)
- {
- for (int r = 0; r < 4; r++)
- {
- pDst[0*8 + r] = static_cast(a.at(r, 0) - b.at(r, 0));
- pDst[1*8 + r] = static_cast(a.at(r, 1) - b.at(r, 1));
- pDst[2*8 + r] = static_cast(a.at(r, 2) - b.at(r, 2));
- pDst[3*8 + r] = static_cast(a.at(r, 3) - b.at(r, 3));
- }
- }
- };
-
- const int FRACT_BITS = 10;
- const int SCALE = 1 << FRACT_BITS;
-
- typedef int Temp_Type;
-#define D(i) (((i) + (SCALE >> 1)) >> FRACT_BITS)
-#define F(i) ((int)((i) * SCALE + .5f))
-
- // Any decent C++ compiler will optimize this at compile time to a 0, or an array access.
-#define AT(c, r) ((((c)>=NUM_COLS)||((r)>=NUM_ROWS)) ? 0 : pSrc[(c)+(r)*8])
-
- // NUM_ROWS/NUM_COLS = # of non-zero rows/cols in input matrix
- template
- struct P_Q
- {
- static void calc(Matrix44& P, Matrix44& Q, const jpgd_block_t* pSrc)
- {
- // 4x8 = 4x8 times 8x8, matrix 0 is constant
- const Temp_Type X000 = AT(0, 0);
- const Temp_Type X001 = AT(0, 1);
- const Temp_Type X002 = AT(0, 2);
- const Temp_Type X003 = AT(0, 3);
- const Temp_Type X004 = AT(0, 4);
- const Temp_Type X005 = AT(0, 5);
- const Temp_Type X006 = AT(0, 6);
- const Temp_Type X007 = AT(0, 7);
- const Temp_Type X010 = D(F(0.415735f) * AT(1, 0) + F(0.791065f) * AT(3, 0) + F(-0.352443f) * AT(5, 0) + F(0.277785f) * AT(7, 0));
- const Temp_Type X011 = D(F(0.415735f) * AT(1, 1) + F(0.791065f) * AT(3, 1) + F(-0.352443f) * AT(5, 1) + F(0.277785f) * AT(7, 1));
- const Temp_Type X012 = D(F(0.415735f) * AT(1, 2) + F(0.791065f) * AT(3, 2) + F(-0.352443f) * AT(5, 2) + F(0.277785f) * AT(7, 2));
- const Temp_Type X013 = D(F(0.415735f) * AT(1, 3) + F(0.791065f) * AT(3, 3) + F(-0.352443f) * AT(5, 3) + F(0.277785f) * AT(7, 3));
- const Temp_Type X014 = D(F(0.415735f) * AT(1, 4) + F(0.791065f) * AT(3, 4) + F(-0.352443f) * AT(5, 4) + F(0.277785f) * AT(7, 4));
- const Temp_Type X015 = D(F(0.415735f) * AT(1, 5) + F(0.791065f) * AT(3, 5) + F(-0.352443f) * AT(5, 5) + F(0.277785f) * AT(7, 5));
- const Temp_Type X016 = D(F(0.415735f) * AT(1, 6) + F(0.791065f) * AT(3, 6) + F(-0.352443f) * AT(5, 6) + F(0.277785f) * AT(7, 6));
- const Temp_Type X017 = D(F(0.415735f) * AT(1, 7) + F(0.791065f) * AT(3, 7) + F(-0.352443f) * AT(5, 7) + F(0.277785f) * AT(7, 7));
- const Temp_Type X020 = AT(4, 0);
- const Temp_Type X021 = AT(4, 1);
- const Temp_Type X022 = AT(4, 2);
- const Temp_Type X023 = AT(4, 3);
- const Temp_Type X024 = AT(4, 4);
- const Temp_Type X025 = AT(4, 5);
- const Temp_Type X026 = AT(4, 6);
- const Temp_Type X027 = AT(4, 7);
- const Temp_Type X030 = D(F(0.022887f) * AT(1, 0) + F(-0.097545f) * AT(3, 0) + F(0.490393f) * AT(5, 0) + F(0.865723f) * AT(7, 0));
- const Temp_Type X031 = D(F(0.022887f) * AT(1, 1) + F(-0.097545f) * AT(3, 1) + F(0.490393f) * AT(5, 1) + F(0.865723f) * AT(7, 1));
- const Temp_Type X032 = D(F(0.022887f) * AT(1, 2) + F(-0.097545f) * AT(3, 2) + F(0.490393f) * AT(5, 2) + F(0.865723f) * AT(7, 2));
- const Temp_Type X033 = D(F(0.022887f) * AT(1, 3) + F(-0.097545f) * AT(3, 3) + F(0.490393f) * AT(5, 3) + F(0.865723f) * AT(7, 3));
- const Temp_Type X034 = D(F(0.022887f) * AT(1, 4) + F(-0.097545f) * AT(3, 4) + F(0.490393f) * AT(5, 4) + F(0.865723f) * AT(7, 4));
- const Temp_Type X035 = D(F(0.022887f) * AT(1, 5) + F(-0.097545f) * AT(3, 5) + F(0.490393f) * AT(5, 5) + F(0.865723f) * AT(7, 5));
- const Temp_Type X036 = D(F(0.022887f) * AT(1, 6) + F(-0.097545f) * AT(3, 6) + F(0.490393f) * AT(5, 6) + F(0.865723f) * AT(7, 6));
- const Temp_Type X037 = D(F(0.022887f) * AT(1, 7) + F(-0.097545f) * AT(3, 7) + F(0.490393f) * AT(5, 7) + F(0.865723f) * AT(7, 7));
-
- // 4x4 = 4x8 times 8x4, matrix 1 is constant
- P.at(0, 0) = X000;
- P.at(0, 1) = D(X001 * F(0.415735f) + X003 * F(0.791065f) + X005 * F(-0.352443f) + X007 * F(0.277785f));
- P.at(0, 2) = X004;
- P.at(0, 3) = D(X001 * F(0.022887f) + X003 * F(-0.097545f) + X005 * F(0.490393f) + X007 * F(0.865723f));
- P.at(1, 0) = X010;
- P.at(1, 1) = D(X011 * F(0.415735f) + X013 * F(0.791065f) + X015 * F(-0.352443f) + X017 * F(0.277785f));
- P.at(1, 2) = X014;
- P.at(1, 3) = D(X011 * F(0.022887f) + X013 * F(-0.097545f) + X015 * F(0.490393f) + X017 * F(0.865723f));
- P.at(2, 0) = X020;
- P.at(2, 1) = D(X021 * F(0.415735f) + X023 * F(0.791065f) + X025 * F(-0.352443f) + X027 * F(0.277785f));
- P.at(2, 2) = X024;
- P.at(2, 3) = D(X021 * F(0.022887f) + X023 * F(-0.097545f) + X025 * F(0.490393f) + X027 * F(0.865723f));
- P.at(3, 0) = X030;
- P.at(3, 1) = D(X031 * F(0.415735f) + X033 * F(0.791065f) + X035 * F(-0.352443f) + X037 * F(0.277785f));
- P.at(3, 2) = X034;
- P.at(3, 3) = D(X031 * F(0.022887f) + X033 * F(-0.097545f) + X035 * F(0.490393f) + X037 * F(0.865723f));
- // 40 muls 24 adds
-
- // 4x4 = 4x8 times 8x4, matrix 1 is constant
- Q.at(0, 0) = D(X001 * F(0.906127f) + X003 * F(-0.318190f) + X005 * F(0.212608f) + X007 * F(-0.180240f));
- Q.at(0, 1) = X002;
- Q.at(0, 2) = D(X001 * F(-0.074658f) + X003 * F(0.513280f) + X005 * F(0.768178f) + X007 * F(-0.375330f));
- Q.at(0, 3) = X006;
- Q.at(1, 0) = D(X011 * F(0.906127f) + X013 * F(-0.318190f) + X015 * F(0.212608f) + X017 * F(-0.180240f));
- Q.at(1, 1) = X012;
- Q.at(1, 2) = D(X011 * F(-0.074658f) + X013 * F(0.513280f) + X015 * F(0.768178f) + X017 * F(-0.375330f));
- Q.at(1, 3) = X016;
- Q.at(2, 0) = D(X021 * F(0.906127f) + X023 * F(-0.318190f) + X025 * F(0.212608f) + X027 * F(-0.180240f));
- Q.at(2, 1) = X022;
- Q.at(2, 2) = D(X021 * F(-0.074658f) + X023 * F(0.513280f) + X025 * F(0.768178f) + X027 * F(-0.375330f));
- Q.at(2, 3) = X026;
- Q.at(3, 0) = D(X031 * F(0.906127f) + X033 * F(-0.318190f) + X035 * F(0.212608f) + X037 * F(-0.180240f));
- Q.at(3, 1) = X032;
- Q.at(3, 2) = D(X031 * F(-0.074658f) + X033 * F(0.513280f) + X035 * F(0.768178f) + X037 * F(-0.375330f));
- Q.at(3, 3) = X036;
- // 40 muls 24 adds
- }
- };
-
- template
- struct R_S
- {
- static void calc(Matrix44& R, Matrix44& S, const jpgd_block_t* pSrc)
- {
- // 4x8 = 4x8 times 8x8, matrix 0 is constant
- const Temp_Type X100 = D(F(0.906127f) * AT(1, 0) + F(-0.318190f) * AT(3, 0) + F(0.212608f) * AT(5, 0) + F(-0.180240f) * AT(7, 0));
- const Temp_Type X101 = D(F(0.906127f) * AT(1, 1) + F(-0.318190f) * AT(3, 1) + F(0.212608f) * AT(5, 1) + F(-0.180240f) * AT(7, 1));
- const Temp_Type X102 = D(F(0.906127f) * AT(1, 2) + F(-0.318190f) * AT(3, 2) + F(0.212608f) * AT(5, 2) + F(-0.180240f) * AT(7, 2));
- const Temp_Type X103 = D(F(0.906127f) * AT(1, 3) + F(-0.318190f) * AT(3, 3) + F(0.212608f) * AT(5, 3) + F(-0.180240f) * AT(7, 3));
- const Temp_Type X104 = D(F(0.906127f) * AT(1, 4) + F(-0.318190f) * AT(3, 4) + F(0.212608f) * AT(5, 4) + F(-0.180240f) * AT(7, 4));
- const Temp_Type X105 = D(F(0.906127f) * AT(1, 5) + F(-0.318190f) * AT(3, 5) + F(0.212608f) * AT(5, 5) + F(-0.180240f) * AT(7, 5));
- const Temp_Type X106 = D(F(0.906127f) * AT(1, 6) + F(-0.318190f) * AT(3, 6) + F(0.212608f) * AT(5, 6) + F(-0.180240f) * AT(7, 6));
- const Temp_Type X107 = D(F(0.906127f) * AT(1, 7) + F(-0.318190f) * AT(3, 7) + F(0.212608f) * AT(5, 7) + F(-0.180240f) * AT(7, 7));
- const Temp_Type X110 = AT(2, 0);
- const Temp_Type X111 = AT(2, 1);
- const Temp_Type X112 = AT(2, 2);
- const Temp_Type X113 = AT(2, 3);
- const Temp_Type X114 = AT(2, 4);
- const Temp_Type X115 = AT(2, 5);
- const Temp_Type X116 = AT(2, 6);
- const Temp_Type X117 = AT(2, 7);
- const Temp_Type X120 = D(F(-0.074658f) * AT(1, 0) + F(0.513280f) * AT(3, 0) + F(0.768178f) * AT(5, 0) + F(-0.375330f) * AT(7, 0));
- const Temp_Type X121 = D(F(-0.074658f) * AT(1, 1) + F(0.513280f) * AT(3, 1) + F(0.768178f) * AT(5, 1) + F(-0.375330f) * AT(7, 1));
- const Temp_Type X122 = D(F(-0.074658f) * AT(1, 2) + F(0.513280f) * AT(3, 2) + F(0.768178f) * AT(5, 2) + F(-0.375330f) * AT(7, 2));
- const Temp_Type X123 = D(F(-0.074658f) * AT(1, 3) + F(0.513280f) * AT(3, 3) + F(0.768178f) * AT(5, 3) + F(-0.375330f) * AT(7, 3));
- const Temp_Type X124 = D(F(-0.074658f) * AT(1, 4) + F(0.513280f) * AT(3, 4) + F(0.768178f) * AT(5, 4) + F(-0.375330f) * AT(7, 4));
- const Temp_Type X125 = D(F(-0.074658f) * AT(1, 5) + F(0.513280f) * AT(3, 5) + F(0.768178f) * AT(5, 5) + F(-0.375330f) * AT(7, 5));
- const Temp_Type X126 = D(F(-0.074658f) * AT(1, 6) + F(0.513280f) * AT(3, 6) + F(0.768178f) * AT(5, 6) + F(-0.375330f) * AT(7, 6));
- const Temp_Type X127 = D(F(-0.074658f) * AT(1, 7) + F(0.513280f) * AT(3, 7) + F(0.768178f) * AT(5, 7) + F(-0.375330f) * AT(7, 7));
- const Temp_Type X130 = AT(6, 0);
- const Temp_Type X131 = AT(6, 1);
- const Temp_Type X132 = AT(6, 2);
- const Temp_Type X133 = AT(6, 3);
- const Temp_Type X134 = AT(6, 4);
- const Temp_Type X135 = AT(6, 5);
- const Temp_Type X136 = AT(6, 6);
- const Temp_Type X137 = AT(6, 7);
- // 80 muls 48 adds
-
- // 4x4 = 4x8 times 8x4, matrix 1 is constant
- R.at(0, 0) = X100;
- R.at(0, 1) = D(X101 * F(0.415735f) + X103 * F(0.791065f) + X105 * F(-0.352443f) + X107 * F(0.277785f));
- R.at(0, 2) = X104;
- R.at(0, 3) = D(X101 * F(0.022887f) + X103 * F(-0.097545f) + X105 * F(0.490393f) + X107 * F(0.865723f));
- R.at(1, 0) = X110;
- R.at(1, 1) = D(X111 * F(0.415735f) + X113 * F(0.791065f) + X115 * F(-0.352443f) + X117 * F(0.277785f));
- R.at(1, 2) = X114;
- R.at(1, 3) = D(X111 * F(0.022887f) + X113 * F(-0.097545f) + X115 * F(0.490393f) + X117 * F(0.865723f));
- R.at(2, 0) = X120;
- R.at(2, 1) = D(X121 * F(0.415735f) + X123 * F(0.791065f) + X125 * F(-0.352443f) + X127 * F(0.277785f));
- R.at(2, 2) = X124;
- R.at(2, 3) = D(X121 * F(0.022887f) + X123 * F(-0.097545f) + X125 * F(0.490393f) + X127 * F(0.865723f));
- R.at(3, 0) = X130;
- R.at(3, 1) = D(X131 * F(0.415735f) + X133 * F(0.791065f) + X135 * F(-0.352443f) + X137 * F(0.277785f));
- R.at(3, 2) = X134;
- R.at(3, 3) = D(X131 * F(0.022887f) + X133 * F(-0.097545f) + X135 * F(0.490393f) + X137 * F(0.865723f));
- // 40 muls 24 adds
- // 4x4 = 4x8 times 8x4, matrix 1 is constant
- S.at(0, 0) = D(X101 * F(0.906127f) + X103 * F(-0.318190f) + X105 * F(0.212608f) + X107 * F(-0.180240f));
- S.at(0, 1) = X102;
- S.at(0, 2) = D(X101 * F(-0.074658f) + X103 * F(0.513280f) + X105 * F(0.768178f) + X107 * F(-0.375330f));
- S.at(0, 3) = X106;
- S.at(1, 0) = D(X111 * F(0.906127f) + X113 * F(-0.318190f) + X115 * F(0.212608f) + X117 * F(-0.180240f));
- S.at(1, 1) = X112;
- S.at(1, 2) = D(X111 * F(-0.074658f) + X113 * F(0.513280f) + X115 * F(0.768178f) + X117 * F(-0.375330f));
- S.at(1, 3) = X116;
- S.at(2, 0) = D(X121 * F(0.906127f) + X123 * F(-0.318190f) + X125 * F(0.212608f) + X127 * F(-0.180240f));
- S.at(2, 1) = X122;
- S.at(2, 2) = D(X121 * F(-0.074658f) + X123 * F(0.513280f) + X125 * F(0.768178f) + X127 * F(-0.375330f));
- S.at(2, 3) = X126;
- S.at(3, 0) = D(X131 * F(0.906127f) + X133 * F(-0.318190f) + X135 * F(0.212608f) + X137 * F(-0.180240f));
- S.at(3, 1) = X132;
- S.at(3, 2) = D(X131 * F(-0.074658f) + X133 * F(0.513280f) + X135 * F(0.768178f) + X137 * F(-0.375330f));
- S.at(3, 3) = X136;
- // 40 muls 24 adds
- }
- };
- } // end namespace DCT_Upsample
-
- // Unconditionally frees all allocated m_blocks.
- void jpeg_decoder::free_all_blocks()
- {
- m_pStream = NULL;
- for (mem_block *b = m_pMem_blocks; b; )
- {
- mem_block *n = b->m_pNext;
- jpgd_free(b);
- b = n;
- }
- m_pMem_blocks = NULL;
- }
-
- // This method handles all errors.
- // It could easily be changed to use C++ exceptions.
- void jpeg_decoder::stop_decoding(jpgd_status status)
- {
- m_error_code = status;
- free_all_blocks();
- longjmp(m_jmp_state, status);
-
- // we shouldn't get here as longjmp shouldn't return, but we put it here to make it explicit
- // that this function doesn't return, otherwise we get this error:
- //
- // error : function declared 'noreturn' should not return
- exit(1);
- }
-
- void *jpeg_decoder::alloc(size_t nSize, bool zero)
- {
- nSize = (JPGD_MAX(nSize, 1) + 3) & ~3;
- char *rv = NULL;
- for (mem_block *b = m_pMem_blocks; b; b = b->m_pNext)
- {
- if ((b->m_used_count + nSize) <= b->m_size)
- {
- rv = b->m_data + b->m_used_count;
- b->m_used_count += nSize;
- break;
- }
- }
- if (!rv)
- {
- int capacity = JPGD_MAX(32768 - 256, (nSize + 2047) & ~2047);
- mem_block *b = (mem_block*)jpgd_malloc(sizeof(mem_block) + capacity);
- if (!b) stop_decoding(JPGD_NOTENOUGHMEM);
- b->m_pNext = m_pMem_blocks; m_pMem_blocks = b;
- b->m_used_count = nSize;
- b->m_size = capacity;
- rv = b->m_data;
- }
- if (zero) memset(rv, 0, nSize);
- return rv;
- }
-
- void jpeg_decoder::word_clear(void *p, uint16 c, uint n)
- {
- uint8 *pD = (uint8*)p;
- const uint8 l = c & 0xFF, h = (c >> 8) & 0xFF;
- while (n)
- {
- pD[0] = l; pD[1] = h; pD += 2;
- n--;
- }
- }
-
- // Refill the input buffer.
- // This method will sit in a loop until (A) the buffer is full or (B)
- // the stream's read() method reports and end of file condition.
- void jpeg_decoder::prep_in_buffer()
- {
- m_in_buf_left = 0;
- m_pIn_buf_ofs = m_in_buf;
-
- if (m_eof_flag)
- return;
-
- do
- {
- int bytes_read = m_pStream->read(m_in_buf + m_in_buf_left, JPGD_IN_BUF_SIZE - m_in_buf_left, &m_eof_flag);
- if (bytes_read == -1)
- stop_decoding(JPGD_STREAM_READ);
-
- m_in_buf_left += bytes_read;
- } while ((m_in_buf_left < JPGD_IN_BUF_SIZE) && (!m_eof_flag));
-
- m_total_bytes_read += m_in_buf_left;
-
- // Pad the end of the block with M_EOI (prevents the decompressor from going off the rails if the stream is invalid).
- // (This dates way back to when this decompressor was written in C/asm, and the all-asm Huffman decoder did some fancy things to increase perf.)
- word_clear(m_pIn_buf_ofs + m_in_buf_left, 0xD9FF, 64);
- }
-
- // Read a Huffman code table.
- void jpeg_decoder::read_dht_marker()
- {
- int i, index, count;
- uint8 huff_num[17];
- uint8 huff_val[256];
-
- uint num_left = get_bits(16);
-
- if (num_left < 2)
- stop_decoding(JPGD_BAD_DHT_MARKER);
-
- num_left -= 2;
-
- while (num_left)
- {
- index = get_bits(8);
-
- huff_num[0] = 0;
-
- count = 0;
-
- for (i = 1; i <= 16; i++)
- {
- huff_num[i] = static_cast(get_bits(8));
- count += huff_num[i];
- }
-
- if (count > 255)
- stop_decoding(JPGD_BAD_DHT_COUNTS);
-
- for (i = 0; i < count; i++)
- huff_val[i] = static_cast(get_bits(8));
-
- i = 1 + 16 + count;
-
- if (num_left < (uint)i)
- stop_decoding(JPGD_BAD_DHT_MARKER);
-
- num_left -= i;
-
- if ((index & 0x10) > 0x10)
- stop_decoding(JPGD_BAD_DHT_INDEX);
-
- index = (index & 0x0F) + ((index & 0x10) >> 4) * (JPGD_MAX_HUFF_TABLES >> 1);
-
- if (index >= JPGD_MAX_HUFF_TABLES)
- stop_decoding(JPGD_BAD_DHT_INDEX);
-
- if (!m_huff_num[index])
- m_huff_num[index] = (uint8 *)alloc(17);
-
- if (!m_huff_val[index])
- m_huff_val[index] = (uint8 *)alloc(256);
-
- m_huff_ac[index] = (index & 0x10) != 0;
- memcpy(m_huff_num[index], huff_num, 17);
- memcpy(m_huff_val[index], huff_val, 256);
- }
- }
-
- // Read a quantization table.
- void jpeg_decoder::read_dqt_marker()
- {
- int n, i, prec;
- uint num_left;
- uint temp;
-
- num_left = get_bits(16);
-
- if (num_left < 2)
- stop_decoding(JPGD_BAD_DQT_MARKER);
-
- num_left -= 2;
-
- while (num_left)
- {
- n = get_bits(8);
- prec = n >> 4;
- n &= 0x0F;
-
- if (n >= JPGD_MAX_QUANT_TABLES)
- stop_decoding(JPGD_BAD_DQT_TABLE);
-
- if (!m_quant[n])
- m_quant[n] = (jpgd_quant_t *)alloc(64 * sizeof(jpgd_quant_t));
-
- // read quantization entries, in zag order
- for (i = 0; i < 64; i++)
- {
- temp = get_bits(8);
-
- if (prec)
- temp = (temp << 8) + get_bits(8);
-
- m_quant[n][i] = static_cast(temp);
- }
-
- i = 64 + 1;
-
- if (prec)
- i += 64;
-
- if (num_left < (uint)i)
- stop_decoding(JPGD_BAD_DQT_LENGTH);
-
- num_left -= i;
- }
- }
-
- // Read the start of frame (SOF) marker.
- void jpeg_decoder::read_sof_marker()
- {
- int i;
- uint num_left;
-
- num_left = get_bits(16);
-
- if (get_bits(8) != 8) /* precision: sorry, only 8-bit precision is supported right now */
- stop_decoding(JPGD_BAD_PRECISION);
-
- m_image_y_size = get_bits(16);
-
- if ((m_image_y_size < 1) || (m_image_y_size > JPGD_MAX_HEIGHT))
- stop_decoding(JPGD_BAD_HEIGHT);
-
- m_image_x_size = get_bits(16);
-
- if ((m_image_x_size < 1) || (m_image_x_size > JPGD_MAX_WIDTH))
- stop_decoding(JPGD_BAD_WIDTH);
-
- m_comps_in_frame = get_bits(8);
-
- if (m_comps_in_frame > JPGD_MAX_COMPONENTS)
- stop_decoding(JPGD_TOO_MANY_COMPONENTS);
-
- if (num_left != (uint)(m_comps_in_frame * 3 + 8))
- stop_decoding(JPGD_BAD_SOF_LENGTH);
-
- for (i = 0; i < m_comps_in_frame; i++)
- {
- m_comp_ident[i] = get_bits(8);
- m_comp_h_samp[i] = get_bits(4);
- m_comp_v_samp[i] = get_bits(4);
- m_comp_quant[i] = get_bits(8);
- }
- }
-
- // Used to skip unrecognized markers.
- void jpeg_decoder::skip_variable_marker()
- {
- uint num_left;
-
- num_left = get_bits(16);
-
- if (num_left < 2)
- stop_decoding(JPGD_BAD_VARIABLE_MARKER);
-
- num_left -= 2;
-
- while (num_left)
- {
- get_bits(8);
- num_left--;
- }
- }
-
- // Read a define restart interval (DRI) marker.
- void jpeg_decoder::read_dri_marker()
- {
- if (get_bits(16) != 4)
- stop_decoding(JPGD_BAD_DRI_LENGTH);
-
- m_restart_interval = get_bits(16);
- }
-
- // Read a start of scan (SOS) marker.
- void jpeg_decoder::read_sos_marker()
- {
- uint num_left;
- int i, ci, n, c, cc;
-
- num_left = get_bits(16);
-
- n = get_bits(8);
-
- m_comps_in_scan = n;
-
- num_left -= 3;
-
- if ( (num_left != (uint)(n * 2 + 3)) || (n < 1) || (n > JPGD_MAX_COMPS_IN_SCAN) )
- stop_decoding(JPGD_BAD_SOS_LENGTH);
-
- for (i = 0; i < n; i++)
- {
- cc = get_bits(8);
- c = get_bits(8);
- num_left -= 2;
-
- for (ci = 0; ci < m_comps_in_frame; ci++)
- if (cc == m_comp_ident[ci])
- break;
-
- if (ci >= m_comps_in_frame)
- stop_decoding(JPGD_BAD_SOS_COMP_ID);
-
- m_comp_list[i] = ci;
- m_comp_dc_tab[ci] = (c >> 4) & 15;
- m_comp_ac_tab[ci] = (c & 15) + (JPGD_MAX_HUFF_TABLES >> 1);
- }
-
- m_spectral_start = get_bits(8);
- m_spectral_end = get_bits(8);
- m_successive_high = get_bits(4);
- m_successive_low = get_bits(4);
-
- if (!m_progressive_flag)
- {
- m_spectral_start = 0;
- m_spectral_end = 63;
- }
-
- num_left -= 3;
-
- while (num_left) /* read past whatever is num_left */
- {
- get_bits(8);
- num_left--;
- }
- }
-
- // Finds the next marker.
- int jpeg_decoder::next_marker()
- {
- uint c, bytes;
-
- bytes = 0;
-
- do
- {
- do
- {
- bytes++;
- c = get_bits(8);
- } while (c != 0xFF);
-
- do
- {
- c = get_bits(8);
- } while (c == 0xFF);
-
- } while (c == 0);
-
- // If bytes > 0 here, there where extra bytes before the marker (not good).
-
- return c;
- }
-
- // Process markers. Returns when an SOFx, SOI, EOI, or SOS marker is
- // encountered.
- int jpeg_decoder::process_markers()
- {
- int c;
-
- for ( ; ; )
- {
- c = next_marker();
-
- switch (c)
- {
- case M_SOF0:
- case M_SOF1:
- case M_SOF2:
- case M_SOF3:
- case M_SOF5:
- case M_SOF6:
- case M_SOF7:
- // case M_JPG:
- case M_SOF9:
- case M_SOF10:
- case M_SOF11:
- case M_SOF13:
- case M_SOF14:
- case M_SOF15:
- case M_SOI:
- case M_EOI:
- case M_SOS:
- {
- return c;
- }
- case M_DHT:
- {
- read_dht_marker();
- break;
- }
- // No arithmitic support - dumb patents!
- case M_DAC:
- {
- stop_decoding(JPGD_NO_ARITHMITIC_SUPPORT);
- break;
- }
- case M_DQT:
- {
- read_dqt_marker();
- break;
- }
- case M_DRI:
- {
- read_dri_marker();
- break;
- }
- //case M_APP0: /* no need to read the JFIF marker */
-
- case M_JPG:
- case M_RST0: /* no parameters */
- case M_RST1:
- case M_RST2:
- case M_RST3:
- case M_RST4:
- case M_RST5:
- case M_RST6:
- case M_RST7:
- case M_TEM:
- {
- stop_decoding(JPGD_UNEXPECTED_MARKER);
- break;
- }
- default: /* must be DNL, DHP, EXP, APPn, JPGn, COM, or RESn or APP0 */
- {
- skip_variable_marker();
- break;
- }
- }
- }
- }
-
- // Finds the start of image (SOI) marker.
- // This code is rather defensive: it only checks the first 512 bytes to avoid
- // false positives.
- void jpeg_decoder::locate_soi_marker()
- {
- uint lastchar, thischar;
- uint bytesleft;
-
- lastchar = get_bits(8);
-
- thischar = get_bits(8);
-
- /* ok if it's a normal JPEG file without a special header */
-
- if ((lastchar == 0xFF) && (thischar == M_SOI))
- return;
-
- bytesleft = 4096; //512;
-
- for ( ; ; )
- {
- if (--bytesleft == 0)
- stop_decoding(JPGD_NOT_JPEG);
-
- lastchar = thischar;
-
- thischar = get_bits(8);
-
- if (lastchar == 0xFF)
- {
- if (thischar == M_SOI)
- break;
- else if (thischar == M_EOI) // get_bits will keep returning M_EOI if we read past the end
- stop_decoding(JPGD_NOT_JPEG);
- }
- }
-
- // Check the next character after marker: if it's not 0xFF, it can't be the start of the next marker, so the file is bad.
- thischar = (m_bit_buf >> 24) & 0xFF;
-
- if (thischar != 0xFF)
- stop_decoding(JPGD_NOT_JPEG);
- }
-
- // Find a start of frame (SOF) marker.
- void jpeg_decoder::locate_sof_marker()
- {
- locate_soi_marker();
-
- int c = process_markers();
-
- switch (c)
- {
- case M_SOF2:
- m_progressive_flag = JPGD_TRUE;
- case M_SOF0: /* baseline DCT */
- case M_SOF1: /* extended sequential DCT */
- {
- read_sof_marker();
- break;
- }
- case M_SOF9: /* Arithmitic coding */
- {
- stop_decoding(JPGD_NO_ARITHMITIC_SUPPORT);
- break;
- }
- default:
- {
- stop_decoding(JPGD_UNSUPPORTED_MARKER);
- break;
- }
- }
- }
-
- // Find a start of scan (SOS) marker.
- int jpeg_decoder::locate_sos_marker()
- {
- int c;
-
- c = process_markers();
-
- if (c == M_EOI)
- return JPGD_FALSE;
- else if (c != M_SOS)
- stop_decoding(JPGD_UNEXPECTED_MARKER);
-
- read_sos_marker();
-
- return JPGD_TRUE;
- }
-
- // Reset everything to default/uninitialized state.
- void jpeg_decoder::init(jpeg_decoder_stream *pStream)
- {
- m_pMem_blocks = NULL;
- m_error_code = JPGD_SUCCESS;
- m_ready_flag = false;
- m_image_x_size = m_image_y_size = 0;
- m_pStream = pStream;
- m_progressive_flag = JPGD_FALSE;
-
- memset(m_huff_ac, 0, sizeof(m_huff_ac));
- memset(m_huff_num, 0, sizeof(m_huff_num));
- memset(m_huff_val, 0, sizeof(m_huff_val));
- memset(m_quant, 0, sizeof(m_quant));
-
- m_scan_type = 0;
- m_comps_in_frame = 0;
-
- memset(m_comp_h_samp, 0, sizeof(m_comp_h_samp));
- memset(m_comp_v_samp, 0, sizeof(m_comp_v_samp));
- memset(m_comp_quant, 0, sizeof(m_comp_quant));
- memset(m_comp_ident, 0, sizeof(m_comp_ident));
- memset(m_comp_h_blocks, 0, sizeof(m_comp_h_blocks));
- memset(m_comp_v_blocks, 0, sizeof(m_comp_v_blocks));
-
- m_comps_in_scan = 0;
- memset(m_comp_list, 0, sizeof(m_comp_list));
- memset(m_comp_dc_tab, 0, sizeof(m_comp_dc_tab));
- memset(m_comp_ac_tab, 0, sizeof(m_comp_ac_tab));
-
- m_spectral_start = 0;
- m_spectral_end = 0;
- m_successive_low = 0;
- m_successive_high = 0;
- m_max_mcu_x_size = 0;
- m_max_mcu_y_size = 0;
- m_blocks_per_mcu = 0;
- m_max_blocks_per_row = 0;
- m_mcus_per_row = 0;
- m_mcus_per_col = 0;
- m_expanded_blocks_per_component = 0;
- m_expanded_blocks_per_mcu = 0;
- m_expanded_blocks_per_row = 0;
- m_freq_domain_chroma_upsample = false;
-
- memset(m_mcu_org, 0, sizeof(m_mcu_org));
-
- m_total_lines_left = 0;
- m_mcu_lines_left = 0;
- m_real_dest_bytes_per_scan_line = 0;
- m_dest_bytes_per_scan_line = 0;
- m_dest_bytes_per_pixel = 0;
-
- memset(m_pHuff_tabs, 0, sizeof(m_pHuff_tabs));
-
- memset(m_dc_coeffs, 0, sizeof(m_dc_coeffs));
- memset(m_ac_coeffs, 0, sizeof(m_ac_coeffs));
- memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu));
-
- m_eob_run = 0;
-
- memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu));
-
- m_pIn_buf_ofs = m_in_buf;
- m_in_buf_left = 0;
- m_eof_flag = false;
- m_tem_flag = 0;
-
- memset(m_in_buf_pad_start, 0, sizeof(m_in_buf_pad_start));
- memset(m_in_buf, 0, sizeof(m_in_buf));
- memset(m_in_buf_pad_end, 0, sizeof(m_in_buf_pad_end));
-
- m_restart_interval = 0;
- m_restarts_left = 0;
- m_next_restart_num = 0;
-
- m_max_mcus_per_row = 0;
- m_max_blocks_per_mcu = 0;
- m_max_mcus_per_col = 0;
-
- memset(m_last_dc_val, 0, sizeof(m_last_dc_val));
- m_pMCU_coefficients = NULL;
- m_pSample_buf = NULL;
-
- m_total_bytes_read = 0;
-
- m_pScan_line_0 = NULL;
- m_pScan_line_1 = NULL;
-
- // Ready the input buffer.
- prep_in_buffer();
-
- // Prime the bit buffer.
- m_bits_left = 16;
- m_bit_buf = 0;
-
- get_bits(16);
- get_bits(16);
-
- for (int i = 0; i < JPGD_MAX_BLOCKS_PER_MCU; i++)
- m_mcu_block_max_zag[i] = 64;
- }
-
-#define SCALEBITS 16
-#define ONE_HALF ((int) 1 << (SCALEBITS-1))
-#define FIX(x) ((int) ((x) * (1L<> SCALEBITS;
- m_cbb[i] = ( FIX(1.77200f) * k + ONE_HALF) >> SCALEBITS;
- m_crg[i] = (-FIX(0.71414f)) * k;
- m_cbg[i] = (-FIX(0.34414f)) * k + ONE_HALF;
- }
- }
-
- // This method throws back into the stream any bytes that where read
- // into the bit buffer during initial marker scanning.
- void jpeg_decoder::fix_in_buffer()
- {
- // In case any 0xFF's where pulled into the buffer during marker scanning.
- JPGD_ASSERT((m_bits_left & 7) == 0);
-
- if (m_bits_left == 16)
- stuff_char( (uint8)(m_bit_buf & 0xFF));
-
- if (m_bits_left >= 8)
- stuff_char( (uint8)((m_bit_buf >> 8) & 0xFF));
-
- stuff_char((uint8)((m_bit_buf >> 16) & 0xFF));
- stuff_char((uint8)((m_bit_buf >> 24) & 0xFF));
-
- m_bits_left = 16;
- get_bits_no_markers(16);
- get_bits_no_markers(16);
- }
-
- void jpeg_decoder::transform_mcu(int mcu_row)
- {
- jpgd_block_t* pSrc_ptr = m_pMCU_coefficients;
- uint8* pDst_ptr = m_pSample_buf + mcu_row * m_blocks_per_mcu * 64;
-
- for (int mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++)
- {
- idct(pSrc_ptr, pDst_ptr, m_mcu_block_max_zag[mcu_block]);
- pSrc_ptr += 64;
- pDst_ptr += 64;
- }
- }
-
- static const uint8 s_max_rc[64] =
- {
- 17, 18, 34, 50, 50, 51, 52, 52, 52, 68, 84, 84, 84, 84, 85, 86, 86, 86, 86, 86,
- 102, 118, 118, 118, 118, 118, 118, 119, 120, 120, 120, 120, 120, 120, 120, 136,
- 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136,
- 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136
- };
-
- void jpeg_decoder::transform_mcu_expand(int mcu_row)
- {
- jpgd_block_t* pSrc_ptr = m_pMCU_coefficients;
- uint8* pDst_ptr = m_pSample_buf + mcu_row * m_expanded_blocks_per_mcu * 64;
-
- // Y IDCT
- int mcu_block;
- for (mcu_block = 0; mcu_block < m_expanded_blocks_per_component; mcu_block++)
- {
- idct(pSrc_ptr, pDst_ptr, m_mcu_block_max_zag[mcu_block]);
- pSrc_ptr += 64;
- pDst_ptr += 64;
- }
-
- // Chroma IDCT, with upsampling
- jpgd_block_t temp_block[64];
-
- for (int i = 0; i < 2; i++)
- {
- DCT_Upsample::Matrix44 P, Q, R, S;
-
- JPGD_ASSERT(m_mcu_block_max_zag[mcu_block] >= 1);
- JPGD_ASSERT(m_mcu_block_max_zag[mcu_block] <= 64);
-
- switch (s_max_rc[m_mcu_block_max_zag[mcu_block++] - 1])
- {
- case 1*16+1:
- DCT_Upsample::P_Q<1, 1>::calc(P, Q, pSrc_ptr);
- DCT_Upsample::R_S<1, 1>::calc(R, S, pSrc_ptr);
- break;
- case 1*16+2:
- DCT_Upsample::P_Q<1, 2>::calc(P, Q, pSrc_ptr);
- DCT_Upsample::R_S<1, 2>::calc(R, S, pSrc_ptr);
- break;
- case 2*16+2:
- DCT_Upsample::P_Q<2, 2>::calc(P, Q, pSrc_ptr);
- DCT_Upsample::R_S<2, 2>::calc(R, S, pSrc_ptr);
- break;
- case 3*16+2:
- DCT_Upsample::P_Q<3, 2>::calc(P, Q, pSrc_ptr);
- DCT_Upsample::R_S<3, 2>::calc(R, S, pSrc_ptr);
- break;
- case 3*16+3:
- DCT_Upsample::P_Q<3, 3>::calc(P, Q, pSrc_ptr);
- DCT_Upsample::R_S<3, 3>::calc(R, S, pSrc_ptr);
- break;
- case 3*16+4:
- DCT_Upsample::P_Q<3, 4>::calc(P, Q, pSrc_ptr);
- DCT_Upsample::R_S<3, 4>::calc(R, S, pSrc_ptr);
- break;
- case 4*16+4:
- DCT_Upsample::P_Q<4, 4>::calc(P, Q, pSrc_ptr);
- DCT_Upsample::R_S<4, 4>::calc(R, S, pSrc_ptr);
- break;
- case 5*16+4:
- DCT_Upsample::P_Q<5, 4>::calc(P, Q, pSrc_ptr);
- DCT_Upsample::R_S<5, 4>::calc(R, S, pSrc_ptr);
- break;
- case 5*16+5:
- DCT_Upsample::P_Q<5, 5>::calc(P, Q, pSrc_ptr);
- DCT_Upsample::R_S<5, 5>::calc(R, S, pSrc_ptr);
- break;
- case 5*16+6:
- DCT_Upsample::P_Q<5, 6>::calc(P, Q, pSrc_ptr);
- DCT_Upsample::R_S<5, 6>::calc(R, S, pSrc_ptr);
- break;
- case 6*16+6:
- DCT_Upsample::P_Q<6, 6>::calc(P, Q, pSrc_ptr);
- DCT_Upsample::R_S<6, 6>::calc(R, S, pSrc_ptr);
- break;
- case 7*16+6:
- DCT_Upsample::P_Q<7, 6>::calc(P, Q, pSrc_ptr);
- DCT_Upsample::R_S<7, 6>::calc(R, S, pSrc_ptr);
- break;
- case 7*16+7:
- DCT_Upsample::P_Q<7, 7>::calc(P, Q, pSrc_ptr);
- DCT_Upsample::R_S<7, 7>::calc(R, S, pSrc_ptr);
- break;
- case 7*16+8:
- DCT_Upsample::P_Q<7, 8>::calc(P, Q, pSrc_ptr);
- DCT_Upsample::R_S<7, 8>::calc(R, S, pSrc_ptr);
- break;
- case 8*16+8:
- DCT_Upsample::P_Q<8, 8>::calc(P, Q, pSrc_ptr);
- DCT_Upsample::R_S<8, 8>::calc(R, S, pSrc_ptr);
- break;
- default:
- JPGD_ASSERT(false);
- }
-
- DCT_Upsample::Matrix44 a(P + Q); P -= Q;
- DCT_Upsample::Matrix44& b = P;
- DCT_Upsample::Matrix44 c(R + S); R -= S;
- DCT_Upsample::Matrix44& d = R;
-
- DCT_Upsample::Matrix44::add_and_store(temp_block, a, c);
- idct_4x4(temp_block, pDst_ptr);
- pDst_ptr += 64;
-
- DCT_Upsample::Matrix44::sub_and_store(temp_block, a, c);
- idct_4x4(temp_block, pDst_ptr);
- pDst_ptr += 64;
-
- DCT_Upsample::Matrix44::add_and_store(temp_block, b, d);
- idct_4x4(temp_block, pDst_ptr);
- pDst_ptr += 64;
-
- DCT_Upsample::Matrix44::sub_and_store(temp_block, b, d);
- idct_4x4(temp_block, pDst_ptr);
- pDst_ptr += 64;
-
- pSrc_ptr += 64;
- }
- }
-
- // Loads and dequantizes the next row of (already decoded) coefficients.
- // Progressive images only.
- void jpeg_decoder::load_next_row()
- {
- int i;
- jpgd_block_t *p;
- jpgd_quant_t *q;
- int mcu_row, mcu_block, row_block = 0;
- int component_num, component_id;
- int block_x_mcu[JPGD_MAX_COMPONENTS];
-
- memset(block_x_mcu, 0, JPGD_MAX_COMPONENTS * sizeof(int));
-
- for (mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++)
- {
- int block_x_mcu_ofs = 0, block_y_mcu_ofs = 0;
-
- for (mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++)
- {
- component_id = m_mcu_org[mcu_block];
- q = m_quant[m_comp_quant[component_id]];
-
- p = m_pMCU_coefficients + 64 * mcu_block;
-
- jpgd_block_t* pAC = coeff_buf_getp(m_ac_coeffs[component_id], block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs);
- jpgd_block_t* pDC = coeff_buf_getp(m_dc_coeffs[component_id], block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs);
- p[0] = pDC[0];
- memcpy(&p[1], &pAC[1], 63 * sizeof(jpgd_block_t));
-
- for (i = 63; i > 0; i--)
- if (p[g_ZAG[i]])
- break;
-
- m_mcu_block_max_zag[mcu_block] = i + 1;
-
- for ( ; i >= 0; i--)
- if (p[g_ZAG[i]])
- p[g_ZAG[i]] = static_cast(p[g_ZAG[i]] * q[i]);
-
- row_block++;
-
- if (m_comps_in_scan == 1)
- block_x_mcu[component_id]++;
- else
- {
- if (++block_x_mcu_ofs == m_comp_h_samp[component_id])
- {
- block_x_mcu_ofs = 0;
-
- if (++block_y_mcu_ofs == m_comp_v_samp[component_id])
- {
- block_y_mcu_ofs = 0;
-
- block_x_mcu[component_id] += m_comp_h_samp[component_id];
- }
- }
- }
- }
-
- if (m_freq_domain_chroma_upsample)
- transform_mcu_expand(mcu_row);
- else
- transform_mcu(mcu_row);
- }
-
- if (m_comps_in_scan == 1)
- m_block_y_mcu[m_comp_list[0]]++;
- else
- {
- for (component_num = 0; component_num < m_comps_in_scan; component_num++)
- {
- component_id = m_comp_list[component_num];
-
- m_block_y_mcu[component_id] += m_comp_v_samp[component_id];
- }
- }
- }
-
- // Restart interval processing.
- void jpeg_decoder::process_restart()
- {
- int i;
- int c = 0;
-
- // Align to a byte boundry
- // FIXME: Is this really necessary? get_bits_no_markers() never reads in markers!
- //get_bits_no_markers(m_bits_left & 7);
-
- // Let's scan a little bit to find the marker, but not _too_ far.
- // 1536 is a "fudge factor" that determines how much to scan.
- for (i = 1536; i > 0; i--)
- if (get_char() == 0xFF)
- break;
-
- if (i == 0)
- stop_decoding(JPGD_BAD_RESTART_MARKER);
-
- for ( ; i > 0; i--)
- if ((c = get_char()) != 0xFF)
- break;
-
- if (i == 0)
- stop_decoding(JPGD_BAD_RESTART_MARKER);
-
- // Is it the expected marker? If not, something bad happened.
- if (c != (m_next_restart_num + M_RST0))
- stop_decoding(JPGD_BAD_RESTART_MARKER);
-
- // Reset each component's DC prediction values.
- memset(&m_last_dc_val, 0, m_comps_in_frame * sizeof(uint));
-
- m_eob_run = 0;
-
- m_restarts_left = m_restart_interval;
-
- m_next_restart_num = (m_next_restart_num + 1) & 7;
-
- // Get the bit buffer going again...
-
- m_bits_left = 16;
- get_bits_no_markers(16);
- get_bits_no_markers(16);
- }
-
- static inline int dequantize_ac(int c, int q) { c *= q; return c; }
-
- // Decodes and dequantizes the next row of coefficients.
- void jpeg_decoder::decode_next_row()
- {
- int row_block = 0;
-
- for (int mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++)
- {
- if ((m_restart_interval) && (m_restarts_left == 0))
- process_restart();
-
- jpgd_block_t* p = m_pMCU_coefficients;
- for (int mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++, p += 64)
- {
- int component_id = m_mcu_org[mcu_block];
- jpgd_quant_t* q = m_quant[m_comp_quant[component_id]];
-
- int r, s;
- s = huff_decode(m_pHuff_tabs[m_comp_dc_tab[component_id]], r);
- s = HUFF_EXTEND(r, s);
-
- m_last_dc_val[component_id] = (s += m_last_dc_val[component_id]);
-
- p[0] = static_cast(s * q[0]);
-
- int prev_num_set = m_mcu_block_max_zag[mcu_block];
-
- huff_tables *pH = m_pHuff_tabs[m_comp_ac_tab[component_id]];
-
- int k;
- for (k = 1; k < 64; k++)
- {
- int extra_bits;
- s = huff_decode(pH, extra_bits);
-
- r = s >> 4;
- s &= 15;
-
- if (s)
- {
- if (r)
- {
- if ((k + r) > 63)
- stop_decoding(JPGD_DECODE_ERROR);
-
- if (k < prev_num_set)
- {
- int n = JPGD_MIN(r, prev_num_set - k);
- int kt = k;
- while (n--)
- p[g_ZAG[kt++]] = 0;
- }
-
- k += r;
- }
-
- s = HUFF_EXTEND(extra_bits, s);
-
- JPGD_ASSERT(k < 64);
-
- p[g_ZAG[k]] = static_cast(dequantize_ac(s, q[k])); //s * q[k];
- }
- else
- {
- if (r == 15)
- {
- if ((k + 16) > 64)
- stop_decoding(JPGD_DECODE_ERROR);
-
- if (k < prev_num_set)
- {
- int n = JPGD_MIN(16, prev_num_set - k);
- int kt = k;
- while (n--)
- {
- JPGD_ASSERT(kt <= 63);
- p[g_ZAG[kt++]] = 0;
- }
- }
-
- k += 16 - 1; // - 1 because the loop counter is k
- // BEGIN EPIC MOD
- JPGD_ASSERT(k < 64 && p[g_ZAG[k]] == 0);
- // END EPIC MOD
- }
- else
- break;
- }
- }
-
- if (k < prev_num_set)
- {
- int kt = k;
- while (kt < prev_num_set)
- p[g_ZAG[kt++]] = 0;
- }
-
- m_mcu_block_max_zag[mcu_block] = k;
-
- row_block++;
- }
-
- if (m_freq_domain_chroma_upsample)
- transform_mcu_expand(mcu_row);
- else
- transform_mcu(mcu_row);
-
- m_restarts_left--;
- }
- }
-
- // YCbCr H1V1 (1x1:1:1, 3 m_blocks per MCU) to RGB
- void jpeg_decoder::H1V1Convert()
- {
- int row = m_max_mcu_y_size - m_mcu_lines_left;
- uint8 *d = m_pScan_line_0;
- uint8 *s = m_pSample_buf + row * 8;
-
- for (int i = m_max_mcus_per_row; i > 0; i--)
- {
- for (int j = 0; j < 8; j++)
- {
- int y = s[j];
- int cb = s[64+j];
- int cr = s[128+j];
-
- if (jpg_format == ERGBFormatJPG::BGRA)
- {
- d[0] = clamp(y + m_cbb[cb]);
- d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16));
- d[2] = clamp(y + m_crr[cr]);
- d[3] = 255;
- }
- else
- {
- d[0] = clamp(y + m_crr[cr]);
- d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16));
- d[2] = clamp(y + m_cbb[cb]);
- d[3] = 255;
- }
- d += 4;
- }
-
- s += 64*3;
- }
- }
-
- // YCbCr H2V1 (2x1:1:1, 4 m_blocks per MCU) to RGB
- void jpeg_decoder::H2V1Convert()
- {
- int row = m_max_mcu_y_size - m_mcu_lines_left;
- uint8 *d0 = m_pScan_line_0;
- uint8 *y = m_pSample_buf + row * 8;
- uint8 *c = m_pSample_buf + 2*64 + row * 8;
-
- for (int i = m_max_mcus_per_row; i > 0; i--)
- {
- for (int l = 0; l < 2; l++)
- {
- for (int j = 0; j < 4; j++)
- {
- int cb = c[0];
- int cr = c[64];
-
- int rc = m_crr[cr];
- int gc = ((m_crg[cr] + m_cbg[cb]) >> 16);
- int bc = m_cbb[cb];
-
- int yy = y[j<<1];
- if (jpg_format == ERGBFormatJPG::BGRA)
- {
- d0[0] = clamp(yy+bc);
- d0[1] = clamp(yy+gc);
- d0[2] = clamp(yy+rc);
- d0[3] = 255;
- yy = y[(j<<1)+1];
- d0[4] = clamp(yy+bc);
- d0[5] = clamp(yy+gc);
- d0[6] = clamp(yy+rc);
- d0[7] = 255;
- }
- else
- {
- d0[0] = clamp(yy+rc);
- d0[1] = clamp(yy+gc);
- d0[2] = clamp(yy+bc);
- d0[3] = 255;
- yy = y[(j<<1)+1];
- d0[4] = clamp(yy+rc);
- d0[5] = clamp(yy+gc);
- d0[6] = clamp(yy+bc);
- d0[7] = 255;
- }
-
- d0 += 8;
-
- c++;
- }
- y += 64;
- }
-
- y += 64*4 - 64*2;
- c += 64*4 - 8;
- }
- }
-
- // YCbCr H2V1 (1x2:1:1, 4 m_blocks per MCU) to RGB
- void jpeg_decoder::H1V2Convert()
- {
- int row = m_max_mcu_y_size - m_mcu_lines_left;
- uint8 *d0 = m_pScan_line_0;
- uint8 *d1 = m_pScan_line_1;
- uint8 *y;
- uint8 *c;
-
- if (row < 8)
- y = m_pSample_buf + row * 8;
- else
- y = m_pSample_buf + 64*1 + (row & 7) * 8;
-
- c = m_pSample_buf + 64*2 + (row >> 1) * 8;
-
- for (int i = m_max_mcus_per_row; i > 0; i--)
- {
- for (int j = 0; j < 8; j++)
- {
- int cb = c[0+j];
- int cr = c[64+j];
-
- int rc = m_crr[cr];
- int gc = ((m_crg[cr] + m_cbg[cb]) >> 16);
- int bc = m_cbb[cb];
-
- int yy = y[j];
- if (jpg_format == ERGBFormatJPG::BGRA)
- {
- d0[0] = clamp(yy+bc);
- d0[1] = clamp(yy+gc);
- d0[2] = clamp(yy+rc);
- d0[3] = 255;
- yy = y[8+j];
- d1[0] = clamp(yy+bc);
- d1[1] = clamp(yy+gc);
- d1[2] = clamp(yy+rc);
- d1[3] = 255;
- }
- else
- {
- d0[0] = clamp(yy+rc);
- d0[1] = clamp(yy+gc);
- d0[2] = clamp(yy+bc);
- d0[3] = 255;
- yy = y[8+j];
- d1[0] = clamp(yy+rc);
- d1[1] = clamp(yy+gc);
- d1[2] = clamp(yy+bc);
- d1[3] = 255;
- }
-
- d0 += 4;
- d1 += 4;
- }
-
- y += 64*4;
- c += 64*4;
- }
- }
-
- // YCbCr H2V2 (2x2:1:1, 6 m_blocks per MCU) to RGB
- void jpeg_decoder::H2V2Convert()
- {
- int row = m_max_mcu_y_size - m_mcu_lines_left;
- uint8 *d0 = m_pScan_line_0;
- uint8 *d1 = m_pScan_line_1;
- uint8 *y;
- uint8 *c;
-
- if (row < 8)
- y = m_pSample_buf + row * 8;
- else
- y = m_pSample_buf + 64*2 + (row & 7) * 8;
-
- c = m_pSample_buf + 64*4 + (row >> 1) * 8;
-
- for (int i = m_max_mcus_per_row; i > 0; i--)
- {
- for (int l = 0; l < 2; l++)
- {
- for (int j = 0; j < 8; j += 2)
- {
- int cb = c[0];
- int cr = c[64];
-
- int rc = m_crr[cr];
- int gc = ((m_crg[cr] + m_cbg[cb]) >> 16);
- int bc = m_cbb[cb];
-
- int yy = y[j];
- if (jpg_format == ERGBFormatJPG::BGRA)
- {
- d0[0] = clamp(yy+bc);
- d0[1] = clamp(yy+gc);
- d0[2] = clamp(yy+rc);
- d0[3] = 255;
- yy = y[j+1];
- d0[4] = clamp(yy+bc);
- d0[5] = clamp(yy+gc);
- d0[6] = clamp(yy+rc);
- d0[7] = 255;
- yy = y[j+8];
- d1[0] = clamp(yy+bc);
- d1[1] = clamp(yy+gc);
- d1[2] = clamp(yy+rc);
- d1[3] = 255;
- yy = y[j+8+1];
- d1[4] = clamp(yy+bc);
- d1[5] = clamp(yy+gc);
- d1[6] = clamp(yy+rc);
- d1[7] = 255;
- }
- else
- {
- d0[0] = clamp(yy+rc);
- d0[1] = clamp(yy+gc);
- d0[2] = clamp(yy+bc);
- d0[3] = 255;
- yy = y[j+1];
- d0[4] = clamp(yy+rc);
- d0[5] = clamp(yy+gc);
- d0[6] = clamp(yy+bc);
- d0[7] = 255;
- yy = y[j+8];
- d1[0] = clamp(yy+rc);
- d1[1] = clamp(yy+gc);
- d1[2] = clamp(yy+bc);
- d1[3] = 255;
- yy = y[j+8+1];
- d1[4] = clamp(yy+rc);
- d1[5] = clamp(yy+gc);
- d1[6] = clamp(yy+bc);
- d1[7] = 255;
- }
-
- d0 += 8;
- d1 += 8;
-
- c++;
- }
- y += 64;
- }
-
- y += 64*6 - 64*2;
- c += 64*6 - 8;
- }
- }
-
- // Y (1 block per MCU) to 8-bit grayscale
- void jpeg_decoder::gray_convert()
- {
- int row = m_max_mcu_y_size - m_mcu_lines_left;
- uint8 *d = m_pScan_line_0;
- uint8 *s = m_pSample_buf + row * 8;
-
- for (int i = m_max_mcus_per_row; i > 0; i--)
- {
- *(uint *)d = *(uint *)s;
- *(uint *)(&d[4]) = *(uint *)(&s[4]);
-
- s += 64;
- d += 8;
- }
- }
-
- void jpeg_decoder::expanded_convert()
- {
- int row = m_max_mcu_y_size - m_mcu_lines_left;
-
- uint8* Py = m_pSample_buf + (row / 8) * 64 * m_comp_h_samp[0] + (row & 7) * 8;
-
- uint8* d = m_pScan_line_0;
-
- for (int i = m_max_mcus_per_row; i > 0; i--)
- {
- for (int k = 0; k < m_max_mcu_x_size; k += 8)
- {
- const int Y_ofs = k * 8;
- const int Cb_ofs = Y_ofs + 64 * m_expanded_blocks_per_component;
- const int Cr_ofs = Y_ofs + 64 * m_expanded_blocks_per_component * 2;
- for (int j = 0; j < 8; j++)
- {
- int y = Py[Y_ofs + j];
- int cb = Py[Cb_ofs + j];
- int cr = Py[Cr_ofs + j];
-
- if (jpg_format == ERGBFormatJPG::BGRA)
- {
- d[0] = clamp(y + m_cbb[cb]);
- d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16));
- d[2] = clamp(y + m_crr[cr]);
- d[3] = 255;
- }
- else
- {
- d[0] = clamp(y + m_crr[cr]);
- d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16));
- d[2] = clamp(y + m_cbb[cb]);
- d[3] = 255;
- }
-
- d += 4;
- }
- }
-
- Py += 64 * m_expanded_blocks_per_mcu;
- }
- }
-
- // Find end of image (EOI) marker, so we can return to the user the exact size of the input stream.
- void jpeg_decoder::find_eoi()
- {
- if (!m_progressive_flag)
- {
- // Attempt to read the EOI marker.
- //get_bits_no_markers(m_bits_left & 7);
-
- // Prime the bit buffer
- m_bits_left = 16;
- get_bits(16);
- get_bits(16);
-
- // The next marker _should_ be EOI
- process_markers();
- }
-
- m_total_bytes_read -= m_in_buf_left;
- }
-
- int jpeg_decoder::decode(const void** pScan_line, uint* pScan_line_len)
- {
- if ((m_error_code) || (!m_ready_flag))
- return JPGD_FAILED;
-
- if (m_total_lines_left == 0)
- return JPGD_DONE;
-
- if (m_mcu_lines_left == 0)
- {
- if (setjmp(m_jmp_state))
- return JPGD_FAILED;
-
- if (m_progressive_flag)
- load_next_row();
- else
- decode_next_row();
-
- // Find the EOI marker if that was the last row.
- if (m_total_lines_left <= m_max_mcu_y_size)
- find_eoi();
-
- m_mcu_lines_left = m_max_mcu_y_size;
- }
-
- if (m_freq_domain_chroma_upsample)
- {
- expanded_convert();
- *pScan_line = m_pScan_line_0;
- }
- else
- {
- switch (m_scan_type)
- {
- case JPGD_YH2V2:
- {
- if ((m_mcu_lines_left & 1) == 0)
- {
- H2V2Convert();
- *pScan_line = m_pScan_line_0;
- }
- else
- *pScan_line = m_pScan_line_1;
-
- break;
- }
- case JPGD_YH2V1:
- {
- H2V1Convert();
- *pScan_line = m_pScan_line_0;
- break;
- }
- case JPGD_YH1V2:
- {
- if ((m_mcu_lines_left & 1) == 0)
- {
- H1V2Convert();
- *pScan_line = m_pScan_line_0;
- }
- else
- *pScan_line = m_pScan_line_1;
-
- break;
- }
- case JPGD_YH1V1:
- {
- H1V1Convert();
- *pScan_line = m_pScan_line_0;
- break;
- }
- case JPGD_GRAYSCALE:
- {
- gray_convert();
- *pScan_line = m_pScan_line_0;
-
- break;
- }
- }
- }
-
- *pScan_line_len = m_real_dest_bytes_per_scan_line;
-
- m_mcu_lines_left--;
- m_total_lines_left--;
-
- return JPGD_SUCCESS;
- }
-
- // Creates the tables needed for efficient Huffman decoding.
- void jpeg_decoder::make_huff_table(int index, huff_tables *pH)
- {
- int p, i, l, si;
- uint8 huffsize[257];
- uint huffcode[257];
- uint code;
- uint subtree;
- int code_size;
- int lastp;
- int nextfreeentry;
- int currententry;
-
- pH->ac_table = m_huff_ac[index] != 0;
-
- p = 0;
-
- for (l = 1; l <= 16; l++)
- {
- for (i = 1; i <= m_huff_num[index][l]; i++)
- huffsize[p++] = static_cast(l);
- }
-
- huffsize[p] = 0;
-
- lastp = p;
-
- code = 0;
- si = huffsize[0];
- p = 0;
-
- while (huffsize[p])
- {
- while (huffsize[p] == si)
- {
- huffcode[p++] = code;
- code++;
- }
-
- code <<= 1;
- si++;
- }
-
- memset(pH->look_up, 0, sizeof(pH->look_up));
- memset(pH->look_up2, 0, sizeof(pH->look_up2));
- memset(pH->tree, 0, sizeof(pH->tree));
- memset(pH->code_size, 0, sizeof(pH->code_size));
-
- nextfreeentry = -1;
-
- p = 0;
-
- while (p < lastp)
- {
- i = m_huff_val[index][p];
- code = huffcode[p];
- code_size = huffsize[p];
-
- pH->code_size[i] = static_cast(code_size);
-
- if (code_size <= 8)
- {
- code <<= (8 - code_size);
-
- for (l = 1 << (8 - code_size); l > 0; l--)
- {
- JPGD_ASSERT(i < 256);
-
- pH->look_up[code] = i;
-
- bool has_extrabits = false;
- int extra_bits = 0;
- int num_extra_bits = i & 15;
-
- int bits_to_fetch = code_size;
- if (num_extra_bits)
- {
- int total_codesize = code_size + num_extra_bits;
- if (total_codesize <= 8)
- {
- has_extrabits = true;
- extra_bits = ((1 << num_extra_bits) - 1) & (code >> (8 - total_codesize));
- JPGD_ASSERT(extra_bits <= 0x7FFF);
- bits_to_fetch += num_extra_bits;
- }
- }
-
- if (!has_extrabits)
- pH->look_up2[code] = i | (bits_to_fetch << 8);
- else
- pH->look_up2[code] = i | 0x8000 | (extra_bits << 16) | (bits_to_fetch << 8);
-
- code++;
- }
- }
- else
- {
- subtree = (code >> (code_size - 8)) & 0xFF;
-
- currententry = pH->look_up[subtree];
-
- if (currententry == 0)
- {
- pH->look_up[subtree] = currententry = nextfreeentry;
- pH->look_up2[subtree] = currententry = nextfreeentry;
-
- nextfreeentry -= 2;
- }
-
- code <<= (16 - (code_size - 8));
-
- for (l = code_size; l > 9; l--)
- {
- if ((code & 0x8000) == 0)
- currententry--;
-
- if (pH->tree[-currententry - 1] == 0)
- {
- pH->tree[-currententry - 1] = nextfreeentry;
-
- currententry = nextfreeentry;
-
- nextfreeentry -= 2;
- }
- else
- currententry = pH->tree[-currententry - 1];
-
- code <<= 1;
- }
-
- if ((code & 0x8000) == 0)
- currententry--;
-
- pH->tree[-currententry - 1] = i;
- }
-
- p++;
- }
- }
-
- // Verifies the quantization tables needed for this scan are available.
- void jpeg_decoder::check_quant_tables()
- {
- for (int i = 0; i < m_comps_in_scan; i++)
- if (m_quant[m_comp_quant[m_comp_list[i]]] == NULL)
- stop_decoding(JPGD_UNDEFINED_QUANT_TABLE);
- }
-
- // Verifies that all the Huffman tables needed for this scan are available.
- void jpeg_decoder::check_huff_tables()
- {
- for (int i = 0; i < m_comps_in_scan; i++)
- {
- if ((m_spectral_start == 0) && (m_huff_num[m_comp_dc_tab[m_comp_list[i]]] == NULL))
- stop_decoding(JPGD_UNDEFINED_HUFF_TABLE);
-
- if ((m_spectral_end > 0) && (m_huff_num[m_comp_ac_tab[m_comp_list[i]]] == NULL))
- stop_decoding(JPGD_UNDEFINED_HUFF_TABLE);
- }
-
- for (int i = 0; i < JPGD_MAX_HUFF_TABLES; i++)
- if (m_huff_num[i])
- {
- if (!m_pHuff_tabs[i])
- m_pHuff_tabs[i] = (huff_tables *)alloc(sizeof(huff_tables));
-
- make_huff_table(i, m_pHuff_tabs[i]);
- }
- }
-
- // Determines the component order inside each MCU.
- // Also calcs how many MCU's are on each row, etc.
- void jpeg_decoder::calc_mcu_block_order()
- {
- int component_num, component_id;
- int max_h_samp = 0, max_v_samp = 0;
-
- for (component_id = 0; component_id < m_comps_in_frame; component_id++)
- {
- if (m_comp_h_samp[component_id] > max_h_samp)
- max_h_samp = m_comp_h_samp[component_id];
-
- if (m_comp_v_samp[component_id] > max_v_samp)
- max_v_samp = m_comp_v_samp[component_id];
- }
-
- for (component_id = 0; component_id < m_comps_in_frame; component_id++)
- {
- m_comp_h_blocks[component_id] = ((((m_image_x_size * m_comp_h_samp[component_id]) + (max_h_samp - 1)) / max_h_samp) + 7) / 8;
- m_comp_v_blocks[component_id] = ((((m_image_y_size * m_comp_v_samp[component_id]) + (max_v_samp - 1)) / max_v_samp) + 7) / 8;
- }
-
- if (m_comps_in_scan == 1)
- {
- m_mcus_per_row = m_comp_h_blocks[m_comp_list[0]];
- m_mcus_per_col = m_comp_v_blocks[m_comp_list[0]];
- }
- else
- {
- m_mcus_per_row = (((m_image_x_size + 7) / 8) + (max_h_samp - 1)) / max_h_samp;
- m_mcus_per_col = (((m_image_y_size + 7) / 8) + (max_v_samp - 1)) / max_v_samp;
- }
-
- if (m_comps_in_scan == 1)
- {
- m_mcu_org[0] = m_comp_list[0];
-
- m_blocks_per_mcu = 1;
- }
- else
- {
- m_blocks_per_mcu = 0;
-
- for (component_num = 0; component_num < m_comps_in_scan; component_num++)
- {
- int num_blocks;
-
- component_id = m_comp_list[component_num];
-
- num_blocks = m_comp_h_samp[component_id] * m_comp_v_samp[component_id];
-
- while (num_blocks--)
- m_mcu_org[m_blocks_per_mcu++] = component_id;
- }
- }
- }
-
- // Starts a new scan.
- int jpeg_decoder::init_scan()
- {
- if (!locate_sos_marker())
- return JPGD_FALSE;
-
- calc_mcu_block_order();
-
- check_huff_tables();
-
- check_quant_tables();
-
- memset(m_last_dc_val, 0, m_comps_in_frame * sizeof(uint));
-
- m_eob_run = 0;
-
- if (m_restart_interval)
- {
- m_restarts_left = m_restart_interval;
- m_next_restart_num = 0;
- }
-
- fix_in_buffer();
-
- return JPGD_TRUE;
- }
-
- // Starts a frame. Determines if the number of components or sampling factors
- // are supported.
- void jpeg_decoder::init_frame()
- {
- int i;
-
- if (m_comps_in_frame == 1)
- {
- if ((m_comp_h_samp[0] != 1) || (m_comp_v_samp[0] != 1))
- stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS);
-
- m_scan_type = JPGD_GRAYSCALE;
- m_max_blocks_per_mcu = 1;
- m_max_mcu_x_size = 8;
- m_max_mcu_y_size = 8;
- }
- else if (m_comps_in_frame == 3)
- {
- if ( ((m_comp_h_samp[1] != 1) || (m_comp_v_samp[1] != 1)) ||
- ((m_comp_h_samp[2] != 1) || (m_comp_v_samp[2] != 1)) )
- stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS);
-
- if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 1))
- {
- m_scan_type = JPGD_YH1V1;
-
- m_max_blocks_per_mcu = 3;
- m_max_mcu_x_size = 8;
- m_max_mcu_y_size = 8;
- }
- else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 1))
- {
- m_scan_type = JPGD_YH2V1;
- m_max_blocks_per_mcu = 4;
- m_max_mcu_x_size = 16;
- m_max_mcu_y_size = 8;
- }
- else if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 2))
- {
- m_scan_type = JPGD_YH1V2;
- m_max_blocks_per_mcu = 4;
- m_max_mcu_x_size = 8;
- m_max_mcu_y_size = 16;
- }
- else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 2))
- {
- m_scan_type = JPGD_YH2V2;
- m_max_blocks_per_mcu = 6;
- m_max_mcu_x_size = 16;
- m_max_mcu_y_size = 16;
- }
- else
- stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS);
- }
- else
- stop_decoding(JPGD_UNSUPPORTED_COLORSPACE);
-
- m_max_mcus_per_row = (m_image_x_size + (m_max_mcu_x_size - 1)) / m_max_mcu_x_size;
- m_max_mcus_per_col = (m_image_y_size + (m_max_mcu_y_size - 1)) / m_max_mcu_y_size;
-
- // These values are for the *destination* pixels: after conversion.
- if (m_scan_type == JPGD_GRAYSCALE)
- m_dest_bytes_per_pixel = 1;
- else
- m_dest_bytes_per_pixel = 4;
-
- m_dest_bytes_per_scan_line = ((m_image_x_size + 15) & 0xFFF0) * m_dest_bytes_per_pixel;
-
- m_real_dest_bytes_per_scan_line = (m_image_x_size * m_dest_bytes_per_pixel);
-
- // Initialize two scan line buffers.
- m_pScan_line_0 = (uint8 *)alloc(m_dest_bytes_per_scan_line, true);
- if ((m_scan_type == JPGD_YH1V2) || (m_scan_type == JPGD_YH2V2))
- m_pScan_line_1 = (uint8 *)alloc(m_dest_bytes_per_scan_line, true);
-
- m_max_blocks_per_row = m_max_mcus_per_row * m_max_blocks_per_mcu;
-
- // Should never happen
- if (m_max_blocks_per_row > JPGD_MAX_BLOCKS_PER_ROW)
- stop_decoding(JPGD_ASSERTION_ERROR);
-
- // Allocate the coefficient buffer, enough for one MCU
- m_pMCU_coefficients = (jpgd_block_t*)alloc(m_max_blocks_per_mcu * 64 * sizeof(jpgd_block_t));
-
- for (i = 0; i < m_max_blocks_per_mcu; i++)
- m_mcu_block_max_zag[i] = 64;
-
- m_expanded_blocks_per_component = m_comp_h_samp[0] * m_comp_v_samp[0];
- m_expanded_blocks_per_mcu = m_expanded_blocks_per_component * m_comps_in_frame;
- m_expanded_blocks_per_row = m_max_mcus_per_row * m_expanded_blocks_per_mcu;
- // Freq. domain chroma upsampling is only supported for H2V2 subsampling factor.
-// BEGIN EPIC MOD
-#if JPGD_SUPPORT_FREQ_DOMAIN_UPSAMPLING
- m_freq_domain_chroma_upsample = (m_expanded_blocks_per_mcu == 4*3);
-#else
- m_freq_domain_chroma_upsample = 0;
-#endif
-// END EPIC MOD
-
- if (m_freq_domain_chroma_upsample)
- m_pSample_buf = (uint8 *)alloc(m_expanded_blocks_per_row * 64);
- else
- m_pSample_buf = (uint8 *)alloc(m_max_blocks_per_row * 64);
-
- m_total_lines_left = m_image_y_size;
-
- m_mcu_lines_left = 0;
-
- create_look_ups();
- }
-
- // The coeff_buf series of methods originally stored the coefficients
- // into a "virtual" file which was located in EMS, XMS, or a disk file. A cache
- // was used to make this process more efficient. Now, we can store the entire
- // thing in RAM.
- jpeg_decoder::coeff_buf* jpeg_decoder::coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y)
- {
- coeff_buf* cb = (coeff_buf*)alloc(sizeof(coeff_buf));
-
- cb->block_num_x = block_num_x;
- cb->block_num_y = block_num_y;
- cb->block_len_x = block_len_x;
- cb->block_len_y = block_len_y;
- cb->block_size = (block_len_x * block_len_y) * sizeof(jpgd_block_t);
- cb->pData = (uint8 *)alloc(cb->block_size * block_num_x * block_num_y, true);
- return cb;
- }
-
- inline jpgd_block_t *jpeg_decoder::coeff_buf_getp(coeff_buf *cb, int block_x, int block_y)
- {
- JPGD_ASSERT((block_x < cb->block_num_x) && (block_y < cb->block_num_y));
- return (jpgd_block_t *)(cb->pData + block_x * cb->block_size + block_y * (cb->block_size * cb->block_num_x));
- }
-
- // The following methods decode the various types of m_blocks encountered
- // in progressively encoded images.
- void jpeg_decoder::decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y)
- {
- int s, r;
- jpgd_block_t *p = pD->coeff_buf_getp(pD->m_dc_coeffs[component_id], block_x, block_y);
-
- if ((s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_dc_tab[component_id]])) != 0)
- {
- r = pD->get_bits_no_markers(s);
- s = HUFF_EXTEND(r, s);
- }
-
- pD->m_last_dc_val[component_id] = (s += pD->m_last_dc_val[component_id]);
-
- p[0] = static_cast(s << pD->m_successive_low);
- }
-
- void jpeg_decoder::decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y)
- {
- if (pD->get_bits_no_markers(1))
- {
- jpgd_block_t *p = pD->coeff_buf_getp(pD->m_dc_coeffs[component_id], block_x, block_y);
-
- p[0] |= (1 << pD->m_successive_low);
- }
- }
-
- void jpeg_decoder::decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y)
- {
- int k, s, r;
-
- if (pD->m_eob_run)
- {
- pD->m_eob_run--;
- return;
- }
-
- jpgd_block_t *p = pD->coeff_buf_getp(pD->m_ac_coeffs[component_id], block_x, block_y);
-
- for (k = pD->m_spectral_start; k <= pD->m_spectral_end; k++)
- {
- s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_ac_tab[component_id]]);
-
- r = s >> 4;
- s &= 15;
-
- if (s)
- {
- if ((k += r) > 63)
- pD->stop_decoding(JPGD_DECODE_ERROR);
-
- r = pD->get_bits_no_markers(s);
- s = HUFF_EXTEND(r, s);
-
- p[g_ZAG[k]] = static_cast(s << pD->m_successive_low);
- }
- else
- {
- if (r == 15)
- {
- if ((k += 15) > 63)
- pD->stop_decoding(JPGD_DECODE_ERROR);
- }
- else
- {
- pD->m_eob_run = 1 << r;
-
- if (r)
- pD->m_eob_run += pD->get_bits_no_markers(r);
-
- pD->m_eob_run--;
-
- break;
- }
- }
- }
- }
-
- void jpeg_decoder::decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y)
- {
- int s, k, r;
- int p1 = 1 << pD->m_successive_low;
- int m1 = (-1) << pD->m_successive_low;
- jpgd_block_t *p = pD->coeff_buf_getp(pD->m_ac_coeffs[component_id], block_x, block_y);
-
- k = pD->m_spectral_start;
-
- if (pD->m_eob_run == 0)
- {
- for ( ; k <= pD->m_spectral_end; k++)
- {
- s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_ac_tab[component_id]]);
-
- r = s >> 4;
- s &= 15;
-
- if (s)
- {
- if (s != 1)
- pD->stop_decoding(JPGD_DECODE_ERROR);
-
- if (pD->get_bits_no_markers(1))
- s = p1;
- else
- s = m1;
- }
- else
- {
- if (r != 15)
- {
- pD->m_eob_run = 1 << r;
-
- if (r)
- pD->m_eob_run += pD->get_bits_no_markers(r);
-
- break;
- }
- }
-
- do
- {
- // BEGIN EPIC MOD
- JPGD_ASSERT(k < 64);
- // END EPIC MOD
-
- jpgd_block_t *this_coef = p + g_ZAG[k];
-
- if (*this_coef != 0)
- {
- if (pD->get_bits_no_markers(1))
- {
- if ((*this_coef & p1) == 0)
- {
- if (*this_coef >= 0)
- *this_coef = static_cast(*this_coef + p1);
- else
- *this_coef = static_cast(*this_coef + m1);
- }
- }
- }
- else
- {
- if (--r < 0)
- break;
- }
-
- k++;
-
- } while (k <= pD->m_spectral_end);
-
- if ((s) && (k < 64))
- {
- p[g_ZAG[k]] = static_cast(s);
- }
- }
- }
-
- if (pD->m_eob_run > 0)
- {
- for ( ; k <= pD->m_spectral_end; k++)
- {
- // BEGIN EPIC MOD
- JPGD_ASSERT(k < 64);
- // END EPIC MOD
-
- jpgd_block_t *this_coef = p + g_ZAG[k];
-
- if (*this_coef != 0)
- {
- if (pD->get_bits_no_markers(1))
- {
- if ((*this_coef & p1) == 0)
- {
- if (*this_coef >= 0)
- *this_coef = static_cast(*this_coef + p1);
- else
- *this_coef = static_cast(*this_coef + m1);
- }
- }
- }
- }
-
- pD->m_eob_run--;
- }
- }
-
- // Decode a scan in a progressively encoded image.
- void jpeg_decoder::decode_scan(pDecode_block_func decode_block_func)
- {
- int mcu_row, mcu_col, mcu_block;
- int block_x_mcu[JPGD_MAX_COMPONENTS], m_block_y_mcu[JPGD_MAX_COMPONENTS];
-
- memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu));
-
- for (mcu_col = 0; mcu_col < m_mcus_per_col; mcu_col++)
- {
- int component_num, component_id;
-
- memset(block_x_mcu, 0, sizeof(block_x_mcu));
-
- for (mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++)
- {
- int block_x_mcu_ofs = 0, block_y_mcu_ofs = 0;
-
- if ((m_restart_interval) && (m_restarts_left == 0))
- process_restart();
-
- for (mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++)
- {
- component_id = m_mcu_org[mcu_block];
-
- decode_block_func(this, component_id, block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs);
-
- if (m_comps_in_scan == 1)
- block_x_mcu[component_id]++;
- else
- {
- if (++block_x_mcu_ofs == m_comp_h_samp[component_id])
- {
- block_x_mcu_ofs = 0;
-
- if (++block_y_mcu_ofs == m_comp_v_samp[component_id])
- {
- block_y_mcu_ofs = 0;
- block_x_mcu[component_id] += m_comp_h_samp[component_id];
- }
- }
- }
- }
-
- m_restarts_left--;
- }
-
- if (m_comps_in_scan == 1)
- m_block_y_mcu[m_comp_list[0]]++;
- else
- {
- for (component_num = 0; component_num < m_comps_in_scan; component_num++)
- {
- component_id = m_comp_list[component_num];
- m_block_y_mcu[component_id] += m_comp_v_samp[component_id];
- }
- }
- }
- }
-
- // Decode a progressively encoded image.
- void jpeg_decoder::init_progressive()
- {
- int i;
-
- if (m_comps_in_frame == 4)
- stop_decoding(JPGD_UNSUPPORTED_COLORSPACE);
-
- // Allocate the coefficient buffers.
- for (i = 0; i < m_comps_in_frame; i++)
- {
- m_dc_coeffs[i] = coeff_buf_open(m_max_mcus_per_row * m_comp_h_samp[i], m_max_mcus_per_col * m_comp_v_samp[i], 1, 1);
- m_ac_coeffs[i] = coeff_buf_open(m_max_mcus_per_row * m_comp_h_samp[i], m_max_mcus_per_col * m_comp_v_samp[i], 8, 8);
- }
-
- for ( ; ; )
- {
- int dc_only_scan, refinement_scan;
- pDecode_block_func decode_block_func;
-
- if (!init_scan())
- break;
-
- dc_only_scan = (m_spectral_start == 0);
- refinement_scan = (m_successive_high != 0);
-
- if ((m_spectral_start > m_spectral_end) || (m_spectral_end > 63))
- stop_decoding(JPGD_BAD_SOS_SPECTRAL);
-
- if (dc_only_scan)
- {
- if (m_spectral_end)
- stop_decoding(JPGD_BAD_SOS_SPECTRAL);
- }
- else if (m_comps_in_scan != 1) /* AC scans can only contain one component */
- stop_decoding(JPGD_BAD_SOS_SPECTRAL);
-
- if ((refinement_scan) && (m_successive_low != m_successive_high - 1))
- stop_decoding(JPGD_BAD_SOS_SUCCESSIVE);
-
- if (dc_only_scan)
- {
- if (refinement_scan)
- decode_block_func = decode_block_dc_refine;
- else
- decode_block_func = decode_block_dc_first;
- }
- else
- {
- if (refinement_scan)
- decode_block_func = decode_block_ac_refine;
- else
- decode_block_func = decode_block_ac_first;
- }
-
- decode_scan(decode_block_func);
-
- m_bits_left = 16;
- get_bits(16);
- get_bits(16);
- }
-
- m_comps_in_scan = m_comps_in_frame;
-
- for (i = 0; i < m_comps_in_frame; i++)
- m_comp_list[i] = i;
-
- calc_mcu_block_order();
- }
-
- void jpeg_decoder::init_sequential()
- {
- if (!init_scan())
- stop_decoding(JPGD_UNEXPECTED_MARKER);
- }
-
- void jpeg_decoder::decode_start()
- {
- init_frame();
-
- if (m_progressive_flag)
- init_progressive();
- else
- init_sequential();
- }
-
- void jpeg_decoder::decode_init(jpeg_decoder_stream *pStream)
- {
- init(pStream);
- locate_sof_marker();
- }
-
- jpeg_decoder::jpeg_decoder(jpeg_decoder_stream *pStream)
- {
- if (setjmp(m_jmp_state))
- return;
- decode_init(pStream);
- }
-
- int jpeg_decoder::begin_decoding()
- {
- if (m_ready_flag)
- return JPGD_SUCCESS;
-
- if (m_error_code)
- return JPGD_FAILED;
-
- if (setjmp(m_jmp_state))
- return JPGD_FAILED;
-
- decode_start();
-
- m_ready_flag = true;
-
- return JPGD_SUCCESS;
- }
-
- jpeg_decoder::~jpeg_decoder()
- {
- free_all_blocks();
- }
-
- jpeg_decoder_file_stream::jpeg_decoder_file_stream()
- {
- m_pFile = NULL;
- m_eof_flag = false;
- m_error_flag = false;
- }
-
- void jpeg_decoder_file_stream::close()
- {
- if (m_pFile)
- {
- fclose(m_pFile);
- m_pFile = NULL;
- }
-
- m_eof_flag = false;
- m_error_flag = false;
- }
-
- jpeg_decoder_file_stream::~jpeg_decoder_file_stream()
- {
- close();
- }
-
- bool jpeg_decoder_file_stream::open(const char *Pfilename)
- {
- close();
-
- m_eof_flag = false;
- m_error_flag = false;
-
-#if defined(_MSC_VER)
- m_pFile = NULL;
- fopen_s(&m_pFile, Pfilename, "rb");
-#else
- m_pFile = fopen(Pfilename, "rb");
-#endif
- return m_pFile != NULL;
- }
-
- int jpeg_decoder_file_stream::read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag)
- {
- if (!m_pFile)
- return -1;
-
- if (m_eof_flag)
- {
- *pEOF_flag = true;
- return 0;
- }
-
- if (m_error_flag)
- return -1;
-
- int bytes_read = static_cast(fread(pBuf, 1, max_bytes_to_read, m_pFile));
- if (bytes_read < max_bytes_to_read)
- {
- if (ferror(m_pFile))
- {
- m_error_flag = true;
- return -1;
- }
-
- m_eof_flag = true;
- *pEOF_flag = true;
- }
-
- return bytes_read;
- }
-
- bool jpeg_decoder_mem_stream::open(const uint8 *pSrc_data, uint size)
- {
- close();
- m_pSrc_data = pSrc_data;
- m_ofs = 0;
- m_size = size;
- return true;
- }
-
- int jpeg_decoder_mem_stream::read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag)
- {
- *pEOF_flag = false;
-
- if (!m_pSrc_data)
- return -1;
-
- uint bytes_remaining = m_size - m_ofs;
- if ((uint)max_bytes_to_read > bytes_remaining)
- {
- max_bytes_to_read = bytes_remaining;
- *pEOF_flag = true;
- }
-
- memcpy(pBuf, m_pSrc_data + m_ofs, max_bytes_to_read);
- m_ofs += max_bytes_to_read;
-
- return max_bytes_to_read;
- }
-
- unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps)
- {
- if (!actual_comps)
- return NULL;
- *actual_comps = 0;
-
- if ((!pStream) || (!width) || (!height) || (!req_comps))
- return NULL;
-
- if ((req_comps != 1) && (req_comps != 3) && (req_comps != 4))
- return NULL;
-
- jpeg_decoder decoder(pStream);
- if (decoder.get_error_code() != JPGD_SUCCESS)
- return NULL;
-
- const int image_width = decoder.get_width(), image_height = decoder.get_height();
- *width = image_width;
- *height = image_height;
- *actual_comps = decoder.get_num_components();
-
- if (decoder.begin_decoding() != JPGD_SUCCESS)
- return NULL;
-
- const int dst_bpl = image_width * req_comps;
-
- uint8 *pImage_data = (uint8*)jpgd_malloc(dst_bpl * image_height);
- if (!pImage_data)
- return NULL;
-
- for (int y = 0; y < image_height; y++)
- {
- const uint8* pScan_line = 0;
- uint scan_line_len;
- if (decoder.decode((const void**)&pScan_line, &scan_line_len) != JPGD_SUCCESS)
- {
- jpgd_free(pImage_data);
- return NULL;
- }
-
- uint8 *pDst = pImage_data + y * dst_bpl;
-
- if (((req_comps == 4) && (decoder.get_num_components() == 3)) ||
- ((req_comps == 1) && (decoder.get_num_components() == 1)))
- {
- memcpy(pDst, pScan_line, dst_bpl);
- }
- else if (decoder.get_num_components() == 1)
- {
- if (req_comps == 3)
- {
- for (int x = 0; x < image_width; x++)
- {
- uint8 luma = pScan_line[x];
- pDst[0] = luma;
- pDst[1] = luma;
- pDst[2] = luma;
- pDst += 3;
- }
- }
- else
- {
- for (int x = 0; x < image_width; x++)
- {
- uint8 luma = pScan_line[x];
- pDst[0] = luma;
- pDst[1] = luma;
- pDst[2] = luma;
- pDst[3] = 255;
- pDst += 4;
- }
- }
- }
- else if (decoder.get_num_components() == 3)
- {
- if (req_comps == 1)
- {
- const int YR = 19595, YG = 38470, YB = 7471;
- for (int x = 0; x < image_width; x++)
- {
- int r = pScan_line[x*4+0];
- int g = pScan_line[x*4+1];
- int b = pScan_line[x*4+2];
- *pDst++ = static_cast((r * YR + g * YG + b * YB + 32768) >> 16);
- }
- }
- else
- {
- for (int x = 0; x < image_width; x++)
- {
- pDst[0] = pScan_line[x*4+0];
- pDst[1] = pScan_line[x*4+1];
- pDst[2] = pScan_line[x*4+2];
- pDst += 3;
- }
- }
- }
- }
-
- return pImage_data;
- }
-
-// BEGIN EPIC MOD
- unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format)
- {
- jpg_format = (ERGBFormatJPG)format;
-// EMD EPIC MOD
- jpgd::jpeg_decoder_mem_stream mem_stream(pSrc_data, src_data_size);
- return decompress_jpeg_image_from_stream(&mem_stream, width, height, actual_comps, req_comps);
- }
-
- unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps)
- {
- jpgd::jpeg_decoder_file_stream file_stream;
- if (!file_stream.open(pSrc_filename))
- return NULL;
- return decompress_jpeg_image_from_stream(&file_stream, width, height, actual_comps, req_comps);
- }
-
-} // namespace jpgd
diff --git a/spaces/joushe/moe-tts/mel_processing.py b/spaces/joushe/moe-tts/mel_processing.py
deleted file mode 100644
index 3e252e76320522a8a4195a60665168f22769aec2..0000000000000000000000000000000000000000
--- a/spaces/joushe/moe-tts/mel_processing.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import torch
-import torch.utils.data
-from librosa.filters import mel as librosa_mel_fn
-
-MAX_WAV_VALUE = 32768.0
-
-
-def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
- """
- PARAMS
- ------
- C: compression factor
- """
- return torch.log(torch.clamp(x, min=clip_val) * C)
-
-
-def dynamic_range_decompression_torch(x, C=1):
- """
- PARAMS
- ------
- C: compression factor used to compress
- """
- return torch.exp(x) / C
-
-
-def spectral_normalize_torch(magnitudes):
- output = dynamic_range_compression_torch(magnitudes)
- return output
-
-
-def spectral_de_normalize_torch(magnitudes):
- output = dynamic_range_decompression_torch(magnitudes)
- return output
-
-
-mel_basis = {}
-hann_window = {}
-
-
-def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
- return spec
-
-
-def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
- global mel_basis
- dtype_device = str(spec.dtype) + '_' + str(spec.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
- return spec
-
-
-def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global mel_basis, hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
-
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
-
- return spec
diff --git a/spaces/jpdiazpardo/jpdiazpardo-whisper-tiny-metal/functions/charts.py b/spaces/jpdiazpardo/jpdiazpardo-whisper-tiny-metal/functions/charts.py
deleted file mode 100644
index e28f7844dddd82f027881fdf35dbc4f9b0042f43..0000000000000000000000000000000000000000
--- a/spaces/jpdiazpardo/jpdiazpardo-whisper-tiny-metal/functions/charts.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import plotly.graph_objects as go
-
-def add_emoji(emotions_list):
- for s in range(len(emotions_list)):
- if emotions_list[s]=="surprise": emotions_list[s]="surprise 😲"
- elif emotions_list[s]=="joy": emotions_list[s]="joy 😀"
- elif emotions_list[s]=="anger": emotions_list[s]="anger 🤬"
- elif emotions_list[s]=="neutral": emotions_list[s]="neutral 😐"
- elif emotions_list[s]=="disgust": emotions_list[s]="disgust 🤢"
- elif emotions_list[s]=="fear": emotions_list[s]="fear 😨"
- elif emotions_list[s]=="sadness": emotions_list[s]="sadness 😭"
- else: print(s)
-
- return emotions_list
-
-
-def spider_chart(dictionary):
-
- fig = go.Figure(data=go.Scatterpolar(
- r=[round(v*100,2) for v in dictionary.values()],
- theta= add_emoji([k for k in dictionary.keys()]),
- fill='toself'))
-
- fig.update_layout(
- polar=dict(
- radialaxis=dict(
- visible=True
- ),
- ),
- showlegend=False,
- width = 400, height = 400,
- title = "Audio Sentiment Analysis", title_x=0.5)
-
- return fig
\ No newline at end of file
diff --git a/spaces/katanaml-org/sparrow-ui/views/data_review.py b/spaces/katanaml-org/sparrow-ui/views/data_review.py
deleted file mode 100644
index d5d3c719ce7870e3cb2e5f227a57c34fd2e671cf..0000000000000000000000000000000000000000
--- a/spaces/katanaml-org/sparrow-ui/views/data_review.py
+++ /dev/null
@@ -1,165 +0,0 @@
-import streamlit as st
-from natsort import natsorted
-import os
-from PIL import Image
-import math
-from streamlit_sparrow_labeling import st_sparrow_labeling
-import json
-
-
-class DataReview:
- class Model:
- # pageTitle = "Data Review"
- subheader_2 = "Select"
- subheader_3 = "Result"
- selection_text = "File to review"
- initial_msg = "Please select a file to review"
-
- img_file = None
-
- def set_image_file(self, img_file):
- st.session_state['img_file_review'] = img_file
-
- def get_image_file(self):
- if 'img_file_review' not in st.session_state:
- return None
- return st.session_state['img_file_review']
-
- json_file = None
-
- def set_json_file(self, json_file):
- st.session_state['json_file_review'] = json_file
-
- def get_json_file(self):
- if 'json_file_review' not in st.session_state:
- return None
- return st.session_state['json_file_review']
-
- def view(self, model, ui_width, device_type, device_width):
- # st.title(model.pageTitle)
-
- with st.sidebar:
- st.markdown("---")
- st.subheader(model.subheader_2)
-
- # get list of files in inference directory
- processed_file_names = self.get_processed_file_names('docs/inference/')
-
- if 'selection_index' not in st.session_state:
- st.session_state['selection_index'] = 0
- selection_index = 0
- else:
- selection_index = st.session_state['selection_index']
-
- selection = st.selectbox(model.selection_text, processed_file_names, index=selection_index)
-
- selection_index = self.get_selection_index(selection, processed_file_names)
- st.session_state['selection_index'] = selection_index
-
- img_file = "docs/inference/" + selection + ".jpg"
- json_file = "docs/inference/" + selection + ".json"
-
- model.set_image_file(img_file)
- model.set_json_file(json_file)
-
- if model.get_image_file() is not None:
- doc_img = Image.open(model.get_image_file())
- doc_height = doc_img.height
- doc_width = doc_img.width
-
- canvas_width, number_of_columns = self.canvas_available_width(ui_width, doc_width, device_type,
- device_width)
-
- if number_of_columns > 1:
- col1, col2 = st.columns([number_of_columns, 10 - number_of_columns])
- with col1:
- pass
- self.render_doc(model, doc_img, canvas_width, doc_height, doc_width)
- with col2:
- pass
- self.render_results(model)
- else:
- pass
- self.render_doc(model, doc_img, canvas_width, doc_height, doc_width)
- self.render_results(model)
- else:
- st.title(model.initial_msg)
-
-
- def get_processed_file_names(self, dir_name):
- # get ordered list of files without file extension, excluding hidden files, with JSON extension only
- file_names = [os.path.splitext(f)[0] for f in os.listdir(dir_name) if
- os.path.isfile(os.path.join(dir_name, f)) and not f.startswith('.') and f.endswith('.json')]
- file_names = natsorted(file_names)
- return file_names
-
- def get_selection_index(self, file, files_list):
- return files_list.index(file)
-
- def canvas_available_width(self, ui_width, doc_width, device_type, device_width):
- doc_width_pct = (doc_width * 100) / ui_width
- if doc_width_pct < 45:
- canvas_width_pct = 37
- elif doc_width_pct < 55:
- canvas_width_pct = 49
- else:
- canvas_width_pct = 60
-
- if ui_width > 700 and canvas_width_pct == 37 and device_type == "desktop":
- return math.floor(canvas_width_pct * ui_width / 100), 4
- elif ui_width > 700 and canvas_width_pct == 49 and device_type == "desktop":
- return math.floor(canvas_width_pct * ui_width / 100), 5
- elif ui_width > 700 and canvas_width_pct == 60 and device_type == "desktop":
- return math.floor(canvas_width_pct * ui_width / 100), 6
- else:
- if device_type == "desktop":
- ui_width = device_width - math.floor((device_width * 22) / 100)
- elif device_type == "mobile":
- ui_width = device_width - math.floor((device_width * 13) / 100)
- return ui_width, 1
-
-
- def render_doc(self, model, doc_img, canvas_width, doc_height, doc_width):
- height = 1296
- width = 864
-
- annotations_json = {
- "meta": {
- "version": "v0.1",
- "split": "train",
- "image_id": 0,
- "image_size": {
- "width": doc_width,
- "height": doc_height
- }
- },
- "words": []
- }
-
- st_sparrow_labeling(
- fill_color="rgba(0, 151, 255, 0.3)",
- stroke_width=2,
- stroke_color="rgba(0, 50, 255, 0.7)",
- background_image=doc_img,
- initial_rects=annotations_json,
- height=height,
- width=width,
- drawing_mode="transform",
- display_toolbar=False,
- update_streamlit=False,
- canvas_width=canvas_width,
- doc_height=doc_height,
- doc_width=doc_width,
- image_rescale=True,
- key="doc_annotation" + model.get_image_file()
- )
-
- def render_results(self, model):
- json_file = model.get_json_file()
- if json_file is not None:
- with open(json_file) as f:
- data_json = json.load(f)
- st.subheader(model.subheader_3)
- st.markdown("---")
- st.json(data_json)
- st.markdown("---")
\ No newline at end of file
diff --git a/spaces/kepl/gpt/run.py b/spaces/kepl/gpt/run.py
deleted file mode 100644
index 3b9ca0f439c4dd6a791f7eed62d942d096562b61..0000000000000000000000000000000000000000
--- a/spaces/kepl/gpt/run.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import secrets
-
-from server.bp import bp
-from server.website import Website
-from server.backend import Backend_Api
-from server.babel import create_babel
-from json import load
-from flask import Flask
-
-if __name__ == '__main__':
-
- # Load configuration from config.json
- config = load(open('config.json', 'r'))
- site_config = config['site_config']
- url_prefix = config.pop('url_prefix')
-
- # Create the app
- app = Flask(__name__)
- app.secret_key = secrets.token_hex(16)
-
- # Set up Babel
- create_babel(app)
-
- # Set up the website routes
- site = Website(bp, url_prefix)
- for route in site.routes:
- bp.add_url_rule(
- route,
- view_func=site.routes[route]['function'],
- methods=site.routes[route]['methods'],
- )
-
- # Set up the backend API routes
- backend_api = Backend_Api(bp, config)
- for route in backend_api.routes:
- bp.add_url_rule(
- route,
- view_func=backend_api.routes[route]['function'],
- methods=backend_api.routes[route]['methods'],
- )
-
- # Register the blueprint
- app.register_blueprint(bp, url_prefix=url_prefix)
-
- # Run the Flask server
- print(f"Running on {site_config['port']}{url_prefix}")
- app.run(**site_config)
- print(f"Closing port {site_config['port']}")
diff --git a/spaces/keremberke/valorant-object-detection/README.md b/spaces/keremberke/valorant-object-detection/README.md
deleted file mode 100644
index bbb470b3979a43eeb4acfcbb2dd0cd4b6ceb8f98..0000000000000000000000000000000000000000
--- a/spaces/keremberke/valorant-object-detection/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Valorant Object Detection
-emoji: 🎮
-colorFrom: red
-colorTo: gray
-sdk: gradio
-sdk_version: 3.15.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/arcface_torch/docs/speed_benchmark.md b/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/arcface_torch/docs/speed_benchmark.md
deleted file mode 100644
index 055aee0defe2c43a523ced48260242f0f99b7cea..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/arcface_torch/docs/speed_benchmark.md
+++ /dev/null
@@ -1,93 +0,0 @@
-## Test Training Speed
-
-- Test Commands
-
-You need to use the following two commands to test the Partial FC training performance.
-The number of identites is **3 millions** (synthetic data), turn mixed precision training on, backbone is resnet50,
-batch size is 1024.
-```shell
-# Model Parallel
-python -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py configs/3millions
-# Partial FC 0.1
-python -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py configs/3millions_pfc
-```
-
-- GPU Memory
-
-```
-# (Model Parallel) gpustat -i
-[0] Tesla V100-SXM2-32GB | 64'C, 94 % | 30338 / 32510 MB
-[1] Tesla V100-SXM2-32GB | 60'C, 99 % | 28876 / 32510 MB
-[2] Tesla V100-SXM2-32GB | 60'C, 99 % | 28872 / 32510 MB
-[3] Tesla V100-SXM2-32GB | 69'C, 99 % | 28872 / 32510 MB
-[4] Tesla V100-SXM2-32GB | 66'C, 99 % | 28888 / 32510 MB
-[5] Tesla V100-SXM2-32GB | 60'C, 99 % | 28932 / 32510 MB
-[6] Tesla V100-SXM2-32GB | 68'C, 100 % | 28916 / 32510 MB
-[7] Tesla V100-SXM2-32GB | 65'C, 99 % | 28860 / 32510 MB
-
-# (Partial FC 0.1) gpustat -i
-[0] Tesla V100-SXM2-32GB | 60'C, 95 % | 10488 / 32510 MB │·······················
-[1] Tesla V100-SXM2-32GB | 60'C, 97 % | 10344 / 32510 MB │·······················
-[2] Tesla V100-SXM2-32GB | 61'C, 95 % | 10340 / 32510 MB │·······················
-[3] Tesla V100-SXM2-32GB | 66'C, 95 % | 10340 / 32510 MB │·······················
-[4] Tesla V100-SXM2-32GB | 65'C, 94 % | 10356 / 32510 MB │·······················
-[5] Tesla V100-SXM2-32GB | 61'C, 95 % | 10400 / 32510 MB │·······················
-[6] Tesla V100-SXM2-32GB | 68'C, 96 % | 10384 / 32510 MB │·······················
-[7] Tesla V100-SXM2-32GB | 64'C, 95 % | 10328 / 32510 MB │·······················
-```
-
-- Training Speed
-
-```python
-# (Model Parallel) trainging.log
-Training: Speed 2271.33 samples/sec Loss 1.1624 LearningRate 0.2000 Epoch: 0 Global Step: 100
-Training: Speed 2269.94 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 150
-Training: Speed 2272.67 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 200
-Training: Speed 2266.55 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 250
-Training: Speed 2272.54 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 300
-
-# (Partial FC 0.1) trainging.log
-Training: Speed 5299.56 samples/sec Loss 1.0965 LearningRate 0.2000 Epoch: 0 Global Step: 100
-Training: Speed 5296.37 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 150
-Training: Speed 5304.37 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 200
-Training: Speed 5274.43 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 250
-Training: Speed 5300.10 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 300
-```
-
-In this test case, Partial FC 0.1 only use1 1/3 of the GPU memory of the model parallel,
-and the training speed is 2.5 times faster than the model parallel.
-
-
-## Speed Benchmark
-
-1. Training speed of different parallel methods (samples/second), Tesla V100 32GB * 8. (Larger is better)
-
-| Number of Identities in Dataset | Data Parallel | Model Parallel | Partial FC 0.1 |
-| :--- | :--- | :--- | :--- |
-|125000 | 4681 | 4824 | 5004 |
-|250000 | 4047 | 4521 | 4976 |
-|500000 | 3087 | 4013 | 4900 |
-|1000000 | 2090 | 3449 | 4803 |
-|1400000 | 1672 | 3043 | 4738 |
-|2000000 | - | 2593 | 4626 |
-|4000000 | - | 1748 | 4208 |
-|5500000 | - | 1389 | 3975 |
-|8000000 | - | - | 3565 |
-|16000000 | - | - | 2679 |
-|29000000 | - | - | 1855 |
-
-2. GPU memory cost of different parallel methods (GB per GPU), Tesla V100 32GB * 8. (Smaller is better)
-
-| Number of Identities in Dataset | Data Parallel | Model Parallel | Partial FC 0.1 |
-| :--- | :--- | :--- | :--- |
-|125000 | 7358 | 5306 | 4868 |
-|250000 | 9940 | 5826 | 5004 |
-|500000 | 14220 | 7114 | 5202 |
-|1000000 | 23708 | 9966 | 5620 |
-|1400000 | 32252 | 11178 | 6056 |
-|2000000 | - | 13978 | 6472 |
-|4000000 | - | 23238 | 8284 |
-|5500000 | - | 32188 | 9854 |
-|8000000 | - | - | 12310 |
-|16000000 | - | - | 19950 |
-|29000000 | - | - | 32324 |
diff --git a/spaces/kevinwang676/rvc-mlbb-v2/lib/infer_pack/onnx_inference.py b/spaces/kevinwang676/rvc-mlbb-v2/lib/infer_pack/onnx_inference.py
deleted file mode 100644
index c78324cbc08414fffcc689f325312de0e51bd6b4..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/rvc-mlbb-v2/lib/infer_pack/onnx_inference.py
+++ /dev/null
@@ -1,143 +0,0 @@
-import onnxruntime
-import librosa
-import numpy as np
-import soundfile
-
-
-class ContentVec:
- def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None):
- print("load model(s) from {}".format(vec_path))
- if device == "cpu" or device is None:
- providers = ["CPUExecutionProvider"]
- elif device == "cuda":
- providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
- elif device == "dml":
- providers = ["DmlExecutionProvider"]
- else:
- raise RuntimeError("Unsportted Device")
- self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
-
- def __call__(self, wav):
- return self.forward(wav)
-
- def forward(self, wav):
- feats = wav
- if feats.ndim == 2: # double channels
- feats = feats.mean(-1)
- assert feats.ndim == 1, feats.ndim
- feats = np.expand_dims(np.expand_dims(feats, 0), 0)
- onnx_input = {self.model.get_inputs()[0].name: feats}
- logits = self.model.run(None, onnx_input)[0]
- return logits.transpose(0, 2, 1)
-
-
-def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs):
- if f0_predictor == "pm":
- from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor
-
- f0_predictor_object = PMF0Predictor(
- hop_length=hop_length, sampling_rate=sampling_rate
- )
- elif f0_predictor == "harvest":
- from lib.infer_pack.modules.F0Predictor.HarvestF0Predictor import HarvestF0Predictor
-
- f0_predictor_object = HarvestF0Predictor(
- hop_length=hop_length, sampling_rate=sampling_rate
- )
- elif f0_predictor == "dio":
- from lib.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor
-
- f0_predictor_object = DioF0Predictor(
- hop_length=hop_length, sampling_rate=sampling_rate
- )
- else:
- raise Exception("Unknown f0 predictor")
- return f0_predictor_object
-
-
-class OnnxRVC:
- def __init__(
- self,
- model_path,
- sr=40000,
- hop_size=512,
- vec_path="vec-768-layer-12",
- device="cpu",
- ):
- vec_path = f"pretrained/{vec_path}.onnx"
- self.vec_model = ContentVec(vec_path, device)
- if device == "cpu" or device is None:
- providers = ["CPUExecutionProvider"]
- elif device == "cuda":
- providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
- elif device == "dml":
- providers = ["DmlExecutionProvider"]
- else:
- raise RuntimeError("Unsportted Device")
- self.model = onnxruntime.InferenceSession(model_path, providers=providers)
- self.sampling_rate = sr
- self.hop_size = hop_size
-
- def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd):
- onnx_input = {
- self.model.get_inputs()[0].name: hubert,
- self.model.get_inputs()[1].name: hubert_length,
- self.model.get_inputs()[2].name: pitch,
- self.model.get_inputs()[3].name: pitchf,
- self.model.get_inputs()[4].name: ds,
- self.model.get_inputs()[5].name: rnd,
- }
- return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16)
-
- def inference(
- self,
- raw_path,
- sid,
- f0_method="dio",
- f0_up_key=0,
- pad_time=0.5,
- cr_threshold=0.02,
- ):
- f0_min = 50
- f0_max = 1100
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
- f0_predictor = get_f0_predictor(
- f0_method,
- hop_length=self.hop_size,
- sampling_rate=self.sampling_rate,
- threshold=cr_threshold,
- )
- wav, sr = librosa.load(raw_path, sr=self.sampling_rate)
- org_length = len(wav)
- if org_length / sr > 50.0:
- raise RuntimeError("Reached Max Length")
-
- wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000)
- wav16k = wav16k
-
- hubert = self.vec_model(wav16k)
- hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32)
- hubert_length = hubert.shape[1]
-
- pitchf = f0_predictor.compute_f0(wav, hubert_length)
- pitchf = pitchf * 2 ** (f0_up_key / 12)
- pitch = pitchf.copy()
- f0_mel = 1127 * np.log(1 + pitch / 700)
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
- f0_mel_max - f0_mel_min
- ) + 1
- f0_mel[f0_mel <= 1] = 1
- f0_mel[f0_mel > 255] = 255
- pitch = np.rint(f0_mel).astype(np.int64)
-
- pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32)
- pitch = pitch.reshape(1, len(pitch))
- ds = np.array([sid]).astype(np.int64)
-
- rnd = np.random.randn(1, 192, hubert_length).astype(np.float32)
- hubert_length = np.array([hubert_length]).astype(np.int64)
-
- out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze()
- out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant")
- return out_wav[0:org_length]
diff --git a/spaces/kevinwang676/rvc-models-new/infer_pack/models_onnx_moess.py b/spaces/kevinwang676/rvc-models-new/infer_pack/models_onnx_moess.py
deleted file mode 100644
index 12efb0629a2e3d0d746a34f467254536c2bdbe5f..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/rvc-models-new/infer_pack/models_onnx_moess.py
+++ /dev/null
@@ -1,849 +0,0 @@
-import math, pdb, os
-from time import time as ttime
-import torch
-from torch import nn
-from torch.nn import functional as F
-from infer_pack import modules
-from infer_pack import attentions
-from infer_pack import commons
-from infer_pack.commons import init_weights, get_padding
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from infer_pack.commons import init_weights
-import numpy as np
-from infer_pack import commons
-
-
-class TextEncoder256(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(256, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return m, logs, x_mask
-
-
-class TextEncoder256Sim(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(256, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- x = self.proj(x) * x_mask
- return x, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(
- self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0,
- ):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(
- modules.ResidualCouplingLayer(
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- mean_only=True,
- )
- )
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
- def remove_weight_norm(self):
- for i in range(self.n_flows):
- self.flows[i * 2].remove_weight_norm()
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
- def remove_weight_norm(self):
- self.enc.remove_weight_norm()
-
-
-class Generator(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=0,
- ):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class SineGen(torch.nn.Module):
- """Definition of sine generator
- SineGen(samp_rate, harmonic_num = 0,
- sine_amp = 0.1, noise_std = 0.003,
- voiced_threshold = 0,
- flag_for_pulse=False)
- samp_rate: sampling rate in Hz
- harmonic_num: number of harmonic overtones (default 0)
- sine_amp: amplitude of sine-wavefrom (default 0.1)
- noise_std: std of Gaussian noise (default 0.003)
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
- Note: when flag_for_pulse is True, the first time step of a voiced
- segment is always sin(np.pi) or cos(0)
- """
-
- def __init__(
- self,
- samp_rate,
- harmonic_num=0,
- sine_amp=0.1,
- noise_std=0.003,
- voiced_threshold=0,
- flag_for_pulse=False,
- ):
- super(SineGen, self).__init__()
- self.sine_amp = sine_amp
- self.noise_std = noise_std
- self.harmonic_num = harmonic_num
- self.dim = self.harmonic_num + 1
- self.sampling_rate = samp_rate
- self.voiced_threshold = voiced_threshold
-
- def _f02uv(self, f0):
- # generate uv signal
- uv = torch.ones_like(f0)
- uv = uv * (f0 > self.voiced_threshold)
- return uv
-
- def forward(self, f0, upp):
- """sine_tensor, uv = forward(f0)
- input F0: tensor(batchsize=1, length, dim=1)
- f0 for unvoiced steps should be 0
- output sine_tensor: tensor(batchsize=1, length, dim)
- output uv: tensor(batchsize=1, length, 1)
- """
- with torch.no_grad():
- f0 = f0[:, None].transpose(1, 2)
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
- # fundamental component
- f0_buf[:, :, 0] = f0[:, :, 0]
- for idx in np.arange(self.harmonic_num):
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
- idx + 2
- ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
- rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
- rand_ini = torch.rand(
- f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
- )
- rand_ini[:, 0] = 0
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
- tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
- tmp_over_one *= upp
- tmp_over_one = F.interpolate(
- tmp_over_one.transpose(2, 1),
- scale_factor=upp,
- mode="linear",
- align_corners=True,
- ).transpose(2, 1)
- rad_values = F.interpolate(
- rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(
- 2, 1
- ) #######
- tmp_over_one %= 1
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
- cumsum_shift = torch.zeros_like(rad_values)
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
- sine_waves = torch.sin(
- torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
- )
- sine_waves = sine_waves * self.sine_amp
- uv = self._f02uv(f0)
- uv = F.interpolate(
- uv.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(2, 1)
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
- noise = noise_amp * torch.randn_like(sine_waves)
- sine_waves = sine_waves * uv + noise
- return sine_waves, uv, noise
-
-
-class SourceModuleHnNSF(torch.nn.Module):
- """SourceModule for hn-nsf
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
- add_noise_std=0.003, voiced_threshod=0)
- sampling_rate: sampling_rate in Hz
- harmonic_num: number of harmonic above F0 (default: 0)
- sine_amp: amplitude of sine source signal (default: 0.1)
- add_noise_std: std of additive Gaussian noise (default: 0.003)
- note that amplitude of noise in unvoiced is decided
- by sine_amp
- voiced_threshold: threhold to set U/V given F0 (default: 0)
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
- F0_sampled (batchsize, length, 1)
- Sine_source (batchsize, length, 1)
- noise_source (batchsize, length 1)
- uv (batchsize, length, 1)
- """
-
- def __init__(
- self,
- sampling_rate,
- harmonic_num=0,
- sine_amp=0.1,
- add_noise_std=0.003,
- voiced_threshod=0,
- is_half=True,
- ):
- super(SourceModuleHnNSF, self).__init__()
-
- self.sine_amp = sine_amp
- self.noise_std = add_noise_std
- self.is_half = is_half
- # to produce sine waveforms
- self.l_sin_gen = SineGen(
- sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
- )
-
- # to merge source harmonics into a single excitation
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
- self.l_tanh = torch.nn.Tanh()
-
- def forward(self, x, upp=None):
- sine_wavs, uv, _ = self.l_sin_gen(x, upp)
- if self.is_half:
- sine_wavs = sine_wavs.half()
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
- return sine_merge, None, None # noise, uv
-
-
-class GeneratorNSF(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels,
- sr,
- is_half=False,
- ):
- super(GeneratorNSF, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
-
- self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
- self.m_source = SourceModuleHnNSF(
- sampling_rate=sr, harmonic_num=0, is_half=is_half
- )
- self.noise_convs = nn.ModuleList()
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- c_cur = upsample_initial_channel // (2 ** (i + 1))
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
- if i + 1 < len(upsample_rates):
- stride_f0 = np.prod(upsample_rates[i + 1 :])
- self.noise_convs.append(
- Conv1d(
- 1,
- c_cur,
- kernel_size=stride_f0 * 2,
- stride=stride_f0,
- padding=stride_f0 // 2,
- )
- )
- else:
- self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- self.upp = np.prod(upsample_rates)
-
- def forward(self, x, f0, g=None):
- har_source, noi_source, uv = self.m_source(f0, self.upp)
- har_source = har_source.transpose(1, 2)
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- x_source = self.noise_convs[i](har_source)
- x = x + x_source
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-sr2sr = {
- "32k": 32000,
- "40k": 40000,
- "48k": 48000,
-}
-
-
-class SynthesizerTrnMs256NSFsidM(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr,
- **kwargs
- ):
- super().__init__()
- if type(sr) == type("strr"):
- sr = sr2sr[sr]
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder256(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- self.dec = GeneratorNSF(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- sr=sr,
- is_half=kwargs["is_half"],
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(self, phone, phone_lengths, pitch, nsff0, sid, rnd, max_len=None):
- g = self.emb_g(sid).unsqueeze(-1)
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
- return o
-
-
-class SynthesizerTrnMs256NSFsid_sim(nn.Module):
- """
- Synthesizer for Training
- """
-
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- # hop_length,
- gin_channels=0,
- use_sdp=True,
- **kwargs
- ):
- super().__init__()
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder256Sim(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- self.dec = GeneratorNSF(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- is_half=kwargs["is_half"],
- )
-
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(
- self, phone, phone_lengths, pitch, pitchf, ds, max_len=None
- ): # y是spec不需要了现在
- g = self.emb_g(ds.unsqueeze(0)).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- x, x_mask = self.enc_p(phone, pitch, phone_lengths)
- x = self.flow(x, x_mask, g=g, reverse=True)
- o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g)
- return o
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2, 3, 5, 7, 11, 17]
- # periods = [3, 5, 7, 11, 17, 23, 37]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
- ]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = [] #
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- # for j in range(len(fmap_r)):
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ]
- )
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(
- Conv2d(
- 1,
- 32,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 32,
- 128,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 128,
- 512,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 512,
- 1024,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 1024,
- 1024,
- (kernel_size, 1),
- 1,
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- ]
- )
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
diff --git a/spaces/kira4424/Tacotron-zero-short-voice-clone/web/static/js/wav.js b/spaces/kira4424/Tacotron-zero-short-voice-clone/web/static/js/wav.js
deleted file mode 100644
index 461908c4fd68655a6f4cbc573b469fd97ed6ed8d..0000000000000000000000000000000000000000
--- a/spaces/kira4424/Tacotron-zero-short-voice-clone/web/static/js/wav.js
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
-wav编码器+编码引擎
-https://github.com/xiangyuecn/Recorder
-
-当然最佳推荐使用mp3、wav格式,代码也是优先照顾这两种格式
-浏览器支持情况
-https://developer.mozilla.org/en-US/docs/Web/HTML/Supported_media_formats
-
-编码原理:给pcm数据加上一个44直接的wav头即成wav文件;pcm数据就是Recorder中的buffers原始数据(重新采样),16位时为LE小端模式(Little Endian),实质上是未经过任何编码处理
-*/
-(function(){
-"use strict";
-
-Recorder.prototype.enc_wav={
- stable:true
- ,testmsg:"支持位数8位、16位(填在比特率里面),采样率取值无限制"
-};
-Recorder.prototype.wav=function(res,True,False){
- var This=this,set=This.set
- ,size=res.length
- ,sampleRate=set.sampleRate
- ,bitRate=set.bitRate==8?8:16;
-
- //编码数据 https://github.com/mattdiamond/Recorderjs https://www.cnblogs.com/blqw/p/3782420.html https://www.cnblogs.com/xiaoqi/p/6993912.html
- var dataLength=size*(bitRate/8);
- var buffer=new ArrayBuffer(44+dataLength);
- var data=new DataView(buffer);
-
- var offset=0;
- var writeString=function(str){
- for (var i=0;i>8)+128;
- data.setInt8(offset,val,true);
- };
- }else{
- for (var i=0;i list[tuple[str, str]]:
- """Extract hyperlinks from a BeautifulSoup object
-
- Args:
- soup (BeautifulSoup): The BeautifulSoup object
- base_url (str): The base URL
-
- Returns:
- List[Tuple[str, str]]: The extracted hyperlinks
- """
- return [
- (link.text, urljoin(base_url, link["href"]))
- for link in soup.find_all("a", href=True)
- ]
-
-
-def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]:
- """Format hyperlinks to be displayed to the user
-
- Args:
- hyperlinks (List[Tuple[str, str]]): The hyperlinks to format
-
- Returns:
- List[str]: The formatted hyperlinks
- """
- return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks]
diff --git a/spaces/kukuhtw/VToonify/vtoonify/model/raft/README.md b/spaces/kukuhtw/VToonify/vtoonify/model/raft/README.md
deleted file mode 100644
index 650275ed7c4cda12822587c6a4358f057fffe494..0000000000000000000000000000000000000000
--- a/spaces/kukuhtw/VToonify/vtoonify/model/raft/README.md
+++ /dev/null
@@ -1,80 +0,0 @@
-# RAFT
-This repository contains the source code for our paper:
-
-[RAFT: Recurrent All Pairs Field Transforms for Optical Flow](https://arxiv.org/pdf/2003.12039.pdf)
-ECCV 2020
-Zachary Teed and Jia Deng
-
-
-
-## Requirements
-The code has been tested with PyTorch 1.6 and Cuda 10.1.
-```Shell
-conda create --name raft
-conda activate raft
-conda install pytorch=1.6.0 torchvision=0.7.0 cudatoolkit=10.1 matplotlib tensorboard scipy opencv -c pytorch
-```
-
-## Demos
-Pretrained models can be downloaded by running
-```Shell
-./download_models.sh
-```
-or downloaded from [google drive](https://drive.google.com/drive/folders/1sWDsfuZ3Up38EUQt7-JDTT1HcGHuJgvT?usp=sharing)
-
-You can demo a trained model on a sequence of frames
-```Shell
-python demo.py --model=models/raft-things.pth --path=demo-frames
-```
-
-## Required Data
-To evaluate/train RAFT, you will need to download the required datasets.
-* [FlyingChairs](https://lmb.informatik.uni-freiburg.de/resources/datasets/FlyingChairs.en.html#flyingchairs)
-* [FlyingThings3D](https://lmb.informatik.uni-freiburg.de/resources/datasets/SceneFlowDatasets.en.html)
-* [Sintel](http://sintel.is.tue.mpg.de/)
-* [KITTI](http://www.cvlibs.net/datasets/kitti/eval_scene_flow.php?benchmark=flow)
-* [HD1K](http://hci-benchmark.iwr.uni-heidelberg.de/) (optional)
-
-
-By default `datasets.py` will search for the datasets in these locations. You can create symbolic links to wherever the datasets were downloaded in the `datasets` folder
-
-```Shell
-├── datasets
- ├── Sintel
- ├── test
- ├── training
- ├── KITTI
- ├── testing
- ├── training
- ├── devkit
- ├── FlyingChairs_release
- ├── data
- ├── FlyingThings3D
- ├── frames_cleanpass
- ├── frames_finalpass
- ├── optical_flow
-```
-
-## Evaluation
-You can evaluate a trained model using `evaluate.py`
-```Shell
-python evaluate.py --model=models/raft-things.pth --dataset=sintel --mixed_precision
-```
-
-## Training
-We used the following training schedule in our paper (2 GPUs). Training logs will be written to the `runs` which can be visualized using tensorboard
-```Shell
-./train_standard.sh
-```
-
-If you have a RTX GPU, training can be accelerated using mixed precision. You can expect similiar results in this setting (1 GPU)
-```Shell
-./train_mixed.sh
-```
-
-## (Optional) Efficent Implementation
-You can optionally use our alternate (efficent) implementation by compiling the provided cuda extension
-```Shell
-cd alt_cuda_corr && python setup.py install && cd ..
-```
-and running `demo.py` and `evaluate.py` with the `--alternate_corr` flag Note, this implementation is somewhat slower than all-pairs, but uses significantly less GPU memory during the forward pass.
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/__init__.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/__init__.py
deleted file mode 100644
index caaad2cc7b96947857fe3ba3de903be65644bde6..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/__init__.py
+++ /dev/null
@@ -1,96 +0,0 @@
-import pkgutil
-
-import gradio.components as components
-import gradio.inputs as inputs
-import gradio.outputs as outputs
-import gradio.processing_utils
-import gradio.templates
-import gradio.themes as themes
-from gradio.blocks import Blocks
-from gradio.components import (
- HTML,
- JSON,
- AnnotatedImage,
- Annotatedimage,
- Audio,
- BarPlot,
- Button,
- Carousel,
- Chatbot,
- Checkbox,
- CheckboxGroup,
- Checkboxgroup,
- Code,
- ColorPicker,
- DataFrame,
- Dataframe,
- Dataset,
- Dropdown,
- File,
- Gallery,
- Highlight,
- HighlightedText,
- Highlightedtext,
- Image,
- Interpretation,
- Json,
- Label,
- LinePlot,
- Markdown,
- Model3D,
- Number,
- Plot,
- Radio,
- ScatterPlot,
- Slider,
- State,
- StatusTracker,
- Text,
- Textbox,
- TimeSeries,
- Timeseries,
- UploadButton,
- Variable,
- Video,
- component,
-)
-from gradio.deploy_space import deploy
-from gradio.events import SelectData
-from gradio.exceptions import Error
-from gradio.external import load
-from gradio.flagging import (
- CSVLogger,
- FlaggingCallback,
- HuggingFaceDatasetJSONSaver,
- HuggingFaceDatasetSaver,
- SimpleCSVLogger,
-)
-from gradio.helpers import EventData, Progress, make_waveform, skip, update
-from gradio.helpers import create_examples as Examples # noqa: N812
-from gradio.interface import Interface, TabbedInterface, close_all
-from gradio.ipython_ext import load_ipython_extension
-from gradio.layouts import Accordion, Box, Column, Group, Row, Tab, TabItem, Tabs
-from gradio.mix import Parallel, Series
-from gradio.routes import Request, mount_gradio_app
-from gradio.templates import (
- Files,
- ImageMask,
- ImagePaint,
- List,
- Matrix,
- Mic,
- Microphone,
- Numpy,
- Paint,
- Pil,
- PlayableVideo,
- Sketchpad,
- TextArea,
- Webcam,
-)
-from gradio.themes import Base as Theme
-
-current_pkg_version = (
- (pkgutil.get_data(__name__, "version.txt") or b"").decode("ascii").strip()
-)
-__version__ = current_pkg_version
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Blocks-005a10ea.css b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Blocks-005a10ea.css
deleted file mode 100644
index 1feac101230266e476fc5f389f286813260505b5..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Blocks-005a10ea.css
+++ /dev/null
@@ -1 +0,0 @@
-.wrap.svelte-1i3r921.svelte-1i3r921{padding:var(--size-6)}.attention.svelte-1i3r921.svelte-1i3r921{font-weight:var(--weight-bold);font-size:var(--text-lg)}.attention.svelte-1i3r921 code.svelte-1i3r921{border:none;background:none;color:var(--color-accent);font-weight:var(--weight-bold)}button.svelte-1i3r921.svelte-1i3r921{position:absolute;top:var(--size-5);right:var(--size-6);width:var(--size-4);color:var(--body-text-color)}button.svelte-1i3r921.svelte-1i3r921:hover{color:var(--color-accent)}@media (min-width: 768px){button.svelte-1i3r921.svelte-1i3r921{top:var(--size-6)}}h2.svelte-9i27qi.svelte-9i27qi{display:flex;color:var(--body-text-color);font-weight:var(--weight-semibold)}h2.svelte-9i27qi img.svelte-9i27qi{margin-right:var(--size-2);width:var(--size-4)}span.svelte-9i27qi.svelte-9i27qi{color:var(--color-accent)}button.svelte-9i27qi.svelte-9i27qi{position:absolute;top:var(--size-5);right:var(--size-6);width:var(--size-4);color:var(--body-text-color)}button.svelte-9i27qi.svelte-9i27qi:hover{color:var(--color-accent)}@media (min-width: 768px){button.svelte-9i27qi.svelte-9i27qi{top:var(--size-6)}h2.svelte-9i27qi img.svelte-9i27qi{width:var(--size-5)}}.counts.svelte-9i27qi.svelte-9i27qi{margin-top:auto;margin-right:var(--size-8);margin-bottom:auto;margin-left:auto;color:var(--body-text-color);font-weight:var(--weight-light)}.load-wrap.svelte-1c7hj3i{display:flex;justify-content:center;align-items:center}h4.svelte-1c7hj3i{display:flex;align-items:center;margin-top:var(--size-6);margin-bottom:var(--size-3);color:var(--body-text-color);font-weight:var(--weight-bold)}.toggle-icon.svelte-1c7hj3i{display:flex;align-items:center;margin-right:var(--size-2);border-radius:var(--radius-full);background:var(--color-grey-300);width:12px;height:4px}.toggle-dot.svelte-1c7hj3i{margin-left:auto;border-radius:var(--radius-full);background:var(--color-grey-700);width:6px;height:6px}.response-wrap.svelte-1c7hj3i{font-family:var(--font-mono)}.desc.svelte-1c7hj3i{color:var(--body-text-color-subdued)}.hide.svelte-1c7hj3i{display:none}.second-level.svelte-1c7hj3i{margin-left:var(--size-4)}code.svelte-1pu3gsl pre.svelte-1pu3gsl{overflow-x:auto;color:var(--body-text-color);font-family:var(--font-mono);tab-size:2}code.svelte-1pu3gsl.svelte-1pu3gsl{position:relative}.copy.svelte-1pu3gsl.svelte-1pu3gsl{position:absolute;top:0;right:0;margin-top:-5px;margin-right:-5px}h3.svelte-41kcm6{color:var(--body-text-color);font-weight:var(--section-header-text-weight);font-size:var(--text-lg)}.post.svelte-41kcm6{margin-right:var(--size-2);border:1px solid var(--border-color-accent);border-radius:var(--radius-sm);background:var(--color-accent-soft);padding-right:var(--size-1);padding-bottom:var(--size-1);padding-left:var(--size-1);color:var(--color-accent);font-weight:var(--weight-semibold)}code.svelte-1bqxtsy pre.svelte-1bqxtsy{overflow-x:auto;color:var(--body-text-color);font-family:var(--font-mono);tab-size:2}.token.string.svelte-1bqxtsy.svelte-1bqxtsy{display:contents;color:var(--color-accent-base)}code.svelte-1bqxtsy.svelte-1bqxtsy{position:relative}.copy.svelte-1bqxtsy.svelte-1bqxtsy{position:absolute;top:0;right:0;margin-top:-5px;margin-right:-5px}.container.svelte-1bqxtsy.svelte-1bqxtsy{display:flex;flex-direction:column;gap:var(--spacing-xxl);margin-top:var(--size-3);margin-bottom:var(--size-3)}.error.svelte-1bqxtsy.svelte-1bqxtsy{color:var(--error-text-color)}.desc.svelte-1bqxtsy.svelte-1bqxtsy{color:var(--body-text-color-subdued)}.example-inputs.svelte-1bqxtsy.svelte-1bqxtsy{border:1px solid var(--border-color-accent);border-radius:var(--radius-sm);background:var(--color-accent-soft);padding-right:var(--size-1);padding-left:var(--size-1);color:var(--color-accent)}.space.svelte-1j8n062{display:flex;flex-basis:1;margin-top:var(--size-4)}.banner-wrap.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{position:relative;border-bottom:1px solid var(--border-color-primary);padding:var(--size-4) var(--size-6);font-size:var(--text-md)}@media (min-width: 768px){.banner-wrap.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{font-size:var(--text-xl)}}.docs-wrap.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{display:flex;flex-direction:column;gap:var(--spacing-xxl)}.endpoint.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{border-radius:var(--radius-md);background:var(--background-fill-primary);padding:var(--size-6);padding-top:var(--size-1);font-size:var(--text-md)}.client-doc.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{padding-top:var(--size-6);padding-right:var(--size-6);padding-left:var(--size-6);font-size:var(--text-xl)}.library.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{border:1px solid var(--border-color-accent);border-radius:var(--radius-sm);background:var(--color-accent-soft);padding-right:var(--size-1);padding-bottom:var(--size-1);padding-left:var(--size-1);color:var(--color-accent)}.snippets.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{display:flex;align-items:center;margin-bottom:var(--size-4)}.snippets.svelte-rzp0ym>.svelte-rzp0ym+.svelte-rzp0ym{margin-left:var(--size-2)}.snippet.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{display:flex;align-items:center;border:1px solid var(--border-color-primary);border-radius:var(--radius-md);padding:var(--size-1) var(--size-1-5);color:var(--body-text-color-subdued);color:var(--body-text-color);line-height:1;user-select:none;text-transform:capitalize}.current-lang.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{border:1px solid var(--body-text-color-subdued);color:var(--body-text-color)}.inactive-lang.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{cursor:pointer;color:var(--body-text-color-subdued)}.inactive-lang.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym:hover,.inactive-lang.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym:focus{box-shadow:var(--shadow-drop);color:var(--body-text-color)}.snippet.svelte-rzp0ym img.svelte-rzp0ym.svelte-rzp0ym{margin-right:var(--size-1-5);width:var(--size-3)}.header.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{margin-top:var(--size-3);margin-bottom:var(--size-3);font-size:var(--text-xl)}.endpoint-container.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{margin-top:var(--size-3);margin-bottom:var(--size-3);border:1px solid var(--border-color-primary);border-radius:var(--radius-xl);padding:var(--size-3);padding-top:0}.wrap.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr{display:flex;flex-grow:1;flex-direction:column;width:var(--size-full);font-weight:var(--body-text-weight);font-size:var(--body-text-size)}footer.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr{display:flex;justify-content:center;margin-top:var(--size-4);color:var(--body-text-color-subdued)}footer.svelte-1lyswbr>.svelte-1lyswbr+.svelte-1lyswbr{margin-left:var(--size-2)}.show-api.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr{display:flex;align-items:center}.show-api.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr:hover{color:var(--body-text-color)}.show-api.svelte-1lyswbr img.svelte-1lyswbr.svelte-1lyswbr{margin-right:var(--size-1);margin-left:var(--size-2);width:var(--size-3)}.built-with.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr{display:flex;align-items:center}.built-with.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr:hover{color:var(--body-text-color)}.built-with.svelte-1lyswbr img.svelte-1lyswbr.svelte-1lyswbr{margin-right:var(--size-1);margin-left:var(--size-2);width:var(--size-3)}.api-docs.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr{display:flex;position:fixed;top:0;right:0;z-index:var(--layer-5);background:rgba(0,0,0,.5);width:var(--size-screen);height:var(--size-screen-h)}.backdrop.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr{flex:1 1 0%;backdrop-filter:blur(4px)}.api-docs-wrap.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr{box-shadow:var(--shadow-drop-lg);background:var(--background-fill-primary);overflow-x:hidden;overflow-y:auto}@media (min-width: 768px){.api-docs-wrap.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr{border-top-left-radius:var(--radius-lg);border-bottom-left-radius:var(--radius-lg);width:950px}}@media (min-width: 1536px){.api-docs-wrap.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr{width:1150px}}
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/DropdownArrow-5fa4dd09.css b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/DropdownArrow-5fa4dd09.css
deleted file mode 100644
index c47d6f6f010f0626b0036068fe41d683b37b2954..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/DropdownArrow-5fa4dd09.css
+++ /dev/null
@@ -1 +0,0 @@
-.dropdown-arrow.svelte-p5edak{fill:var(--body-text-color);margin-right:var(--size-2);width:var(--size-5)}
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/tests/test_afm.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/tests/test_afm.py
deleted file mode 100644
index e5c6a83937cd68e3ae20a14d8babb9a99bd5a4f5..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/tests/test_afm.py
+++ /dev/null
@@ -1,137 +0,0 @@
-from io import BytesIO
-import pytest
-import logging
-
-from matplotlib import _afm
-from matplotlib import font_manager as fm
-
-
-# See note in afm.py re: use of comma as decimal separator in the
-# UnderlineThickness field and re: use of non-ASCII characters in the Notice
-# field.
-AFM_TEST_DATA = b"""StartFontMetrics 2.0
-Comment Comments are ignored.
-Comment Creation Date:Mon Nov 13 12:34:11 GMT 2017
-FontName MyFont-Bold
-EncodingScheme FontSpecific
-FullName My Font Bold
-FamilyName Test Fonts
-Weight Bold
-ItalicAngle 0.0
-IsFixedPitch false
-UnderlinePosition -100
-UnderlineThickness 56,789
-Version 001.000
-Notice Copyright \xa9 2017 No one.
-FontBBox 0 -321 1234 369
-StartCharMetrics 3
-C 0 ; WX 250 ; N space ; B 0 0 0 0 ;
-C 42 ; WX 1141 ; N foo ; B 40 60 800 360 ;
-C 99 ; WX 583 ; N bar ; B 40 -10 543 210 ;
-EndCharMetrics
-EndFontMetrics
-"""
-
-
-def test_nonascii_str():
- # This tests that we also decode bytes as utf-8 properly.
- # Else, font files with non ascii characters fail to load.
- inp_str = "привет"
- byte_str = inp_str.encode("utf8")
-
- ret = _afm._to_str(byte_str)
- assert ret == inp_str
-
-
-def test_parse_header():
- fh = BytesIO(AFM_TEST_DATA)
- header = _afm._parse_header(fh)
- assert header == {
- b'StartFontMetrics': 2.0,
- b'FontName': 'MyFont-Bold',
- b'EncodingScheme': 'FontSpecific',
- b'FullName': 'My Font Bold',
- b'FamilyName': 'Test Fonts',
- b'Weight': 'Bold',
- b'ItalicAngle': 0.0,
- b'IsFixedPitch': False,
- b'UnderlinePosition': -100,
- b'UnderlineThickness': 56.789,
- b'Version': '001.000',
- b'Notice': b'Copyright \xa9 2017 No one.',
- b'FontBBox': [0, -321, 1234, 369],
- b'StartCharMetrics': 3,
- }
-
-
-def test_parse_char_metrics():
- fh = BytesIO(AFM_TEST_DATA)
- _afm._parse_header(fh) # position
- metrics = _afm._parse_char_metrics(fh)
- assert metrics == (
- {0: (250.0, 'space', [0, 0, 0, 0]),
- 42: (1141.0, 'foo', [40, 60, 800, 360]),
- 99: (583.0, 'bar', [40, -10, 543, 210]),
- },
- {'space': (250.0, 'space', [0, 0, 0, 0]),
- 'foo': (1141.0, 'foo', [40, 60, 800, 360]),
- 'bar': (583.0, 'bar', [40, -10, 543, 210]),
- })
-
-
-def test_get_familyname_guessed():
- fh = BytesIO(AFM_TEST_DATA)
- font = _afm.AFM(fh)
- del font._header[b'FamilyName'] # remove FamilyName, so we have to guess
- assert font.get_familyname() == 'My Font'
-
-
-def test_font_manager_weight_normalization():
- font = _afm.AFM(BytesIO(
- AFM_TEST_DATA.replace(b"Weight Bold\n", b"Weight Custom\n")))
- assert fm.afmFontProperty("", font).weight == "normal"
-
-
-@pytest.mark.parametrize(
- "afm_data",
- [
- b"""nope
-really nope""",
- b"""StartFontMetrics 2.0
-Comment Comments are ignored.
-Comment Creation Date:Mon Nov 13 12:34:11 GMT 2017
-FontName MyFont-Bold
-EncodingScheme FontSpecific""",
- ],
-)
-def test_bad_afm(afm_data):
- fh = BytesIO(afm_data)
- with pytest.raises(RuntimeError):
- _afm._parse_header(fh)
-
-
-@pytest.mark.parametrize(
- "afm_data",
- [
- b"""StartFontMetrics 2.0
-Comment Comments are ignored.
-Comment Creation Date:Mon Nov 13 12:34:11 GMT 2017
-Aardvark bob
-FontName MyFont-Bold
-EncodingScheme FontSpecific
-StartCharMetrics 3""",
- b"""StartFontMetrics 2.0
-Comment Comments are ignored.
-Comment Creation Date:Mon Nov 13 12:34:11 GMT 2017
-ItalicAngle zero degrees
-FontName MyFont-Bold
-EncodingScheme FontSpecific
-StartCharMetrics 3""",
- ],
-)
-def test_malformed_header(afm_data, caplog):
- fh = BytesIO(afm_data)
- with caplog.at_level(logging.ERROR):
- _afm._parse_header(fh)
-
- assert len(caplog.records) == 1
diff --git a/spaces/laocao1798/laocaoAI/Dockerfile b/spaces/laocao1798/laocaoAI/Dockerfile
deleted file mode 100644
index 3698c7cb7938e025afc53b18a571ae2961fbdffe..0000000000000000000000000000000000000000
--- a/spaces/laocao1798/laocaoAI/Dockerfile
+++ /dev/null
@@ -1,34 +0,0 @@
-# Build Stage
-# 使用 golang:alpine 作为构建阶段的基础镜像
-FROM golang:alpine AS builder
-
-# 添加 git,以便之后能从GitHub克隆项目
-RUN apk --no-cache add git
-
-# 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下
-RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app
-
-# 设置工作目录为之前克隆的项目目录
-WORKDIR /workspace/app
-
-# 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小
-RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go
-
-# Runtime Stage
-# 使用轻量级的 alpine 镜像作为运行时的基础镜像
-FROM alpine
-
-# 设置工作目录
-WORKDIR /workspace/app
-
-# 从构建阶段复制编译后的二进制文件到运行时镜像中
-COPY --from=builder /workspace/app/go-proxy-bingai .
-
-# 设置环境变量,此处为随机字符
-ENV Go_Proxy_BingAI_USER_TOKEN_1="kJs8hD92ncMzLaoQWYtX5rG6bE3fZ4iO"
-
-# 暴露8080端口
-EXPOSE 8080
-
-# 容器启动时运行的命令
-CMD ["/workspace/app/go-proxy-bingai"]
\ No newline at end of file
diff --git a/spaces/lc202301/ChuanhuChatGPT/overwrites.py b/spaces/lc202301/ChuanhuChatGPT/overwrites.py
deleted file mode 100644
index a87499a81bb3c23bf34c1faadcc02085567cd447..0000000000000000000000000000000000000000
--- a/spaces/lc202301/ChuanhuChatGPT/overwrites.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from __future__ import annotations
-import logging
-
-from llama_index import Prompt
-from typing import List, Tuple
-import mdtex2html
-
-from presets import *
-from llama_func import *
-
-
-def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]:
- logging.debug("Compacting text chunks...🚀🚀🚀")
- combined_str = [c.strip() for c in text_chunks if c.strip()]
- combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)]
- combined_str = "\n\n".join(combined_str)
- # resplit based on self.max_chunk_overlap
- text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1)
- return text_splitter.split_text(combined_str)
-
-
-def postprocess(
- self, y: List[Tuple[str | None, str | None]]
-) -> List[Tuple[str | None, str | None]]:
- """
- Parameters:
- y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format.
- Returns:
- List of tuples representing the message and response. Each message and response will be a string of HTML.
- """
- if y is None or y == []:
- return []
- tag_regex = re.compile(r"^<\w+>[^<]+\w+>")
- if tag_regex.search(y[-1][1]):
- y[-1] = (convert_user(y[-1][0]), y[-1][1])
- else:
- y[-1] = (convert_user(y[-1][0]), convert_mdtext(y[-1][1]))
- return y
-
-with open("./assets/custom.js", "r", encoding="utf-8") as f, open("./assets/Kelpy-Codos.js", "r", encoding="utf-8") as f2:
- customJS = f.read()
- kelpyCodos = f2.read()
-
-def reload_javascript():
- print("Reloading javascript...")
- js = f''
- def template_response(*args, **kwargs):
- res = GradioTemplateResponseOriginal(*args, **kwargs)
- res.body = res.body.replace(b'