diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crack Zwcad 2011 English Professional 11.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crack Zwcad 2011 English Professional 11.md deleted file mode 100644 index d3f74a88ff523c33a5be311739e4da1cf8489013..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crack Zwcad 2011 English Professional 11.md +++ /dev/null @@ -1,143 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Conclusión

-

Un video de letras en pantalla negra es una gran manera de mostrar su música en línea. Es fácil y barato de hacer, atrae a más espectadores y oyentes, y expresa su creatividad y estilo. Puede utilizar cualquier software o aplicación que se adapte a sus necesidades, pero recomendamos VEED.IO, Kapwing o Alight Motion como algunos de los mejores. Aquí hay algunos consejos para hacer un buen video de letras de pantalla negra:

- -

Preguntas frecuentes

-

¿Cuáles son algunos ejemplos de videos de letras en pantalla negra?

-

Algunos ejemplos de videos de letras en pantalla negra son:

- -

¿Cómo puedo añadir música a mi video de letras de pantalla negra?

-

Puede agregar música a su video de letras de pantalla negra subiendo su archivo de canción al software o aplicación que está utilizando. También puede utilizar la biblioteca de música incorporada o la función de búsqueda de música en línea de algunos programas o aplicaciones para encontrar y agregar música a su video.

-

¿Cómo puedo optimizar mi video de letras en pantalla negra para diferentes plataformas de redes sociales?

-

Puedes optimizar tus letras de vídeo en pantalla negra para diferentes plataformas de redes sociales ajustando la resolución, el formato y la duración de tu vídeo según los requisitos de la plataforma. Por ejemplo: resultado/p> -

-

¿Cómo puedo evitar problemas de derechos de autor al hacer un video de letras de pantalla negra?

-

Puede evitar problemas de derechos de autor al hacer un video de letras de pantalla negra por:

- -
  • Usando su propia canción original o una canción que tiene permiso o licencia para usar.
  • -
  • Dar crédito y atribución adecuada al propietario o creador de la canción original.
  • -
  • Siguiendo las pautas y principios de uso justo cuando se usa una canción con fines educativos, de comentario o de parodia.
  • -
  • Usando música libre de derechos o música que está en el dominio público.
  • - -

    ¿Cómo puedo promover mi video de letras de pantalla negra en línea?

    -

    Puedes promocionar tu video en pantalla negra por:

    - -

    Espero que este artículo te haya ayudado a aprender cómo hacer un video de letras de pantalla negra para tu música. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. ¡Gracias por leer!

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Bingsu/color_textual_inversion/README.md b/spaces/Bingsu/color_textual_inversion/README.md deleted file mode 100644 index 86bd73a9a6568b7c9214bdab479ec4a794b0ac5f..0000000000000000000000000000000000000000 --- a/spaces/Bingsu/color_textual_inversion/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: color_textual_inversion -emoji: 🖌️ -sdk: streamlit -python_version: 3.9 -sdk_version: 1.10.0 -app_file: app.py ---- - -# color_textual_inversion diff --git a/spaces/Brasd99/AnswerMate/README.md b/spaces/Brasd99/AnswerMate/README.md deleted file mode 100644 index 24586afd40fc85d6a0f819729f7360466cfef224..0000000000000000000000000000000000000000 --- a/spaces/Brasd99/AnswerMate/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: AnswerMate -emoji: 🧠 -colorFrom: purple -colorTo: yellow -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/CVPR/LIVE/thrust/testing/unittest/system.h b/spaces/CVPR/LIVE/thrust/testing/unittest/system.h deleted file mode 100644 index b3552c2b321068d9a7f5eef21fed456574806f65..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/testing/unittest/system.h +++ /dev/null @@ -1,33 +0,0 @@ -#pragma once - -// for demangling the result of type_info.name() -// with msvc, type_info.name() is already demangled -#ifdef __GNUC__ -#include -#endif // __GNUC__ - -#include -#include - -namespace unittest -{ - -#if __GNUC__ && !__NVCOMPILER_CUDA__ -inline std::string demangle(const char* name) -{ - int status = 0; - char* realname = abi::__cxa_demangle(name, 0, 0, &status); - std::string result(realname); - std::free(realname); - - return result; -} -#else -inline std::string demangle(const char* name) -{ - return name; -} -#endif - -} // end unittest - diff --git a/spaces/CVPR/LIVE/thrust/testing/unittest/unittest.h b/spaces/CVPR/LIVE/thrust/testing/unittest/unittest.h deleted file mode 100644 index 49c9daf429ade8877027382a22712a42677e6043..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/testing/unittest/unittest.h +++ /dev/null @@ -1,11 +0,0 @@ -#pragma once - -// this is the only header included by unittests -// it pulls in all the others used for unittesting - -#include -#include -#include -#include -#include - diff --git a/spaces/CVPR/LIVE/thrust/thrust/detail/functional/operators/assignment_operator.h b/spaces/CVPR/LIVE/thrust/thrust/detail/functional/operators/assignment_operator.h deleted file mode 100644 index a2f18339bc3956871e63f81b697cfd87d065ad62..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/detail/functional/operators/assignment_operator.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include -#include -#include -#include - -namespace thrust -{ - -// XXX WAR circular inclusion with this forward declaration -template struct binary_function; - -namespace detail -{ -namespace functional -{ - -// XXX WAR circular inclusion with this forward declaration -template struct as_actor; - -// there's no standard assign functional, so roll an ad hoc one here -struct assign -{ - using is_transparent = void; - - __thrust_exec_check_disable__ - template - __host__ __device__ - constexpr auto operator()(T1&& t1, T2&& t2) const - noexcept(noexcept(THRUST_FWD(t1) = THRUST_FWD(t2))) - -> decltype(THRUST_FWD(t1) = THRUST_FWD(t2)) - { - return THRUST_FWD(t1) = THRUST_FWD(t2); - } -}; - -template - struct assign_result -{ - typedef actor< - composite< - transparent_binary_operator, - actor, - typename as_actor::type - > - > type; -}; // end assign_result - -template - __host__ __device__ - typename assign_result::type - do_assign(const actor &_1, const T &_2) -{ - return compose(transparent_binary_operator(), - _1, - as_actor::convert(_2)); -} // end do_assign() - -} // end functional -} // end detail -} // end thrust - diff --git a/spaces/ChandraMohanNayal/AutoGPT/autogpt/speech/say.py b/spaces/ChandraMohanNayal/AutoGPT/autogpt/speech/say.py deleted file mode 100644 index 727983d12bf334205550a54bcd69a7a36824eda4..0000000000000000000000000000000000000000 --- a/spaces/ChandraMohanNayal/AutoGPT/autogpt/speech/say.py +++ /dev/null @@ -1,41 +0,0 @@ -""" Text to speech module """ -import threading -from threading import Semaphore - -from autogpt.config import Config -from autogpt.speech.brian import BrianSpeech -from autogpt.speech.eleven_labs import ElevenLabsSpeech -from autogpt.speech.gtts import GTTSVoice -from autogpt.speech.macos_tts import MacOSTTS - -CFG = Config() -DEFAULT_VOICE_ENGINE = GTTSVoice() -VOICE_ENGINE = None -if CFG.elevenlabs_api_key: - VOICE_ENGINE = ElevenLabsSpeech() -elif CFG.use_mac_os_tts == "True": - VOICE_ENGINE = MacOSTTS() -elif CFG.use_brian_tts == "True": - VOICE_ENGINE = BrianSpeech() -else: - VOICE_ENGINE = GTTSVoice() - - -QUEUE_SEMAPHORE = Semaphore( - 1 -) # The amount of sounds to queue before blocking the main thread - - -def say_text(text: str, voice_index: int = 0) -> None: - """Speak the given text using the given voice index""" - - def speak() -> None: - success = VOICE_ENGINE.say(text, voice_index) - if not success: - DEFAULT_VOICE_ENGINE.say(text) - - QUEUE_SEMAPHORE.release() - - QUEUE_SEMAPHORE.acquire(True) - thread = threading.Thread(target=speak) - thread.start() diff --git a/spaces/Cicooo/vits-uma-genshin-honkai/mel_processing.py b/spaces/Cicooo/vits-uma-genshin-honkai/mel_processing.py deleted file mode 100644 index 3e252e76320522a8a4195a60665168f22769aec2..0000000000000000000000000000000000000000 --- a/spaces/Cicooo/vits-uma-genshin-honkai/mel_processing.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/Cpp4App/Cpp4App/CDM/detect_compo/deprecated/ocr_classify_text.py b/spaces/Cpp4App/Cpp4App/CDM/detect_compo/deprecated/ocr_classify_text.py deleted file mode 100644 index ae9c63604ea88e7289168557069e80640092815f..0000000000000000000000000000000000000000 --- a/spaces/Cpp4App/Cpp4App/CDM/detect_compo/deprecated/ocr_classify_text.py +++ /dev/null @@ -1,113 +0,0 @@ -import pytesseract as pyt -import cv2 - -import lib_ip.ip_draw as draw -from config.CONFIG_UIED import Config - -C = Config() - - -def is_text(img, min_word_area, show=False): - broad = img.copy() - area_word = 0 - area_total = img.shape[0] * img.shape[1] - - try: - # ocr text detection - data = pyt.image_to_data(img).split('\n') - except: - print(img.shape) - return -1 - word = [] - for d in data[1:]: - d = d.split() - if d[-1] != '-1': - if d[-1] != '-' and d[-1] != '—' and int(d[-3]) < 50 and int(d[-4]) < 100: - word.append(d) - t_l = (int(d[-6]), int(d[-5])) - b_r = (int(d[-6]) + int(d[-4]), int(d[-5]) + int(d[-3])) - area_word += int(d[-4]) * int(d[-3]) - cv2.rectangle(broad, t_l, b_r, (0,0,255), 1) - - if show: - for d in word: print(d) - print(area_word/area_total) - cv2.imshow('a', broad) - cv2.waitKey(0) - cv2.destroyAllWindows() - # no text in this clip or relatively small text area - if len(word) == 0 or area_word/area_total < min_word_area: - return False - return True - - -def text_detection(org, img_clean): - try: - data = pyt.image_to_data(img_clean).split('\n') - except: - return org, None - corners_word = [] - for d in data[1:]: - d = d.split() - if d[-1] != '-1': - if d[-1] != '-' and d[-1] != '—' and 5 < int(d[-3]) < 40 and 5 < int(d[-4]) < 100: - t_l = (int(d[-6]), int(d[-5])) - b_r = (int(d[-6]) + int(d[-4]), int(d[-5]) + int(d[-3])) - corners_word.append((t_l, b_r)) - return corners_word - - -# def text_merge_word_into_line(org, corners_word, max_words_gap=C.THRESHOLD_TEXT_MAX_WORD_GAP): -# -# def is_in_line(word): -# for i in range(len(lines)): -# line = lines[i] -# # at the same row -# if abs(line['center'][1] - word['center'][1]) < max_words_gap: -# # small gap between words -# if (abs(line['center'][0] - word['center'][0]) - abs(line['width']/2 + word['width']/2)) < max_words_gap: -# return i -# return -1 -# -# def merge_line(word, index): -# line = lines[index] -# # on the left -# if word['center'][0] < line['center'][0]: -# line['col_min'] = word['col_min'] -# # on the right -# else: -# line['col_max'] = word['col_max'] -# line['row_min'] = min(line['row_min'], word['row_min']) -# line['row_max'] = max(line['row_max'], word['row_max']) -# line['width'] = line['col_max'] - line['col_min'] -# line['height'] = line['row_max'] - line['row_min'] -# line['center'] = ((line['col_max'] + line['col_min'])/2, (line['row_max'] + line['row_min'])/2) -# -# words = [] -# for corner in corners_word: -# word = {} -# (top_left, bottom_right) = corner -# (col_min, row_min) = top_left -# (col_max, row_max) = bottom_right -# word['col_min'], word['col_max'], word['row_min'], word['row_max'] = col_min, col_max, row_min, row_max -# word['height'] = row_max - row_min -# word['width'] = col_max - col_min -# word['center'] = ((col_max + col_min)/2, (row_max + row_min)/2) -# words.append(word) -# -# lines = [] -# for word in words: -# line_index = is_in_line(word) -# # word is in current line -# if line_index != -1: -# merge_line(word, line_index) -# # word is not in current line -# else: -# # this single word as a new line -# lines.append(word) -# -# corners_line = [] -# for l in lines: -# corners_line.append(((l['col_min'], l['row_min']), (l['col_max'], l['row_max']))) -# return corners_line - diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/multipart.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/multipart.py deleted file mode 100644 index 73801f459aa274ca6aae7bf28a2c5bb3bf075d11..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/multipart.py +++ /dev/null @@ -1,961 +0,0 @@ -import base64 -import binascii -import json -import re -import uuid -import warnings -import zlib -from collections import deque -from types import TracebackType -from typing import ( - TYPE_CHECKING, - Any, - AsyncIterator, - Deque, - Dict, - Iterator, - List, - Mapping, - Optional, - Sequence, - Tuple, - Type, - Union, - cast, -) -from urllib.parse import parse_qsl, unquote, urlencode - -from multidict import CIMultiDict, CIMultiDictProxy, MultiMapping - -from .hdrs import ( - CONTENT_DISPOSITION, - CONTENT_ENCODING, - CONTENT_LENGTH, - CONTENT_TRANSFER_ENCODING, - CONTENT_TYPE, -) -from .helpers import CHAR, TOKEN, parse_mimetype, reify -from .http import HeadersParser -from .payload import ( - JsonPayload, - LookupError, - Order, - Payload, - StringPayload, - get_payload, - payload_type, -) -from .streams import StreamReader - -__all__ = ( - "MultipartReader", - "MultipartWriter", - "BodyPartReader", - "BadContentDispositionHeader", - "BadContentDispositionParam", - "parse_content_disposition", - "content_disposition_filename", -) - - -if TYPE_CHECKING: # pragma: no cover - from .client_reqrep import ClientResponse - - -class BadContentDispositionHeader(RuntimeWarning): - pass - - -class BadContentDispositionParam(RuntimeWarning): - pass - - -def parse_content_disposition( - header: Optional[str], -) -> Tuple[Optional[str], Dict[str, str]]: - def is_token(string: str) -> bool: - return bool(string) and TOKEN >= set(string) - - def is_quoted(string: str) -> bool: - return string[0] == string[-1] == '"' - - def is_rfc5987(string: str) -> bool: - return is_token(string) and string.count("'") == 2 - - def is_extended_param(string: str) -> bool: - return string.endswith("*") - - def is_continuous_param(string: str) -> bool: - pos = string.find("*") + 1 - if not pos: - return False - substring = string[pos:-1] if string.endswith("*") else string[pos:] - return substring.isdigit() - - def unescape(text: str, *, chars: str = "".join(map(re.escape, CHAR))) -> str: - return re.sub(f"\\\\([{chars}])", "\\1", text) - - if not header: - return None, {} - - disptype, *parts = header.split(";") - if not is_token(disptype): - warnings.warn(BadContentDispositionHeader(header)) - return None, {} - - params: Dict[str, str] = {} - while parts: - item = parts.pop(0) - - if "=" not in item: - warnings.warn(BadContentDispositionHeader(header)) - return None, {} - - key, value = item.split("=", 1) - key = key.lower().strip() - value = value.lstrip() - - if key in params: - warnings.warn(BadContentDispositionHeader(header)) - return None, {} - - if not is_token(key): - warnings.warn(BadContentDispositionParam(item)) - continue - - elif is_continuous_param(key): - if is_quoted(value): - value = unescape(value[1:-1]) - elif not is_token(value): - warnings.warn(BadContentDispositionParam(item)) - continue - - elif is_extended_param(key): - if is_rfc5987(value): - encoding, _, value = value.split("'", 2) - encoding = encoding or "utf-8" - else: - warnings.warn(BadContentDispositionParam(item)) - continue - - try: - value = unquote(value, encoding, "strict") - except UnicodeDecodeError: # pragma: nocover - warnings.warn(BadContentDispositionParam(item)) - continue - - else: - failed = True - if is_quoted(value): - failed = False - value = unescape(value[1:-1].lstrip("\\/")) - elif is_token(value): - failed = False - elif parts: - # maybe just ; in filename, in any case this is just - # one case fix, for proper fix we need to redesign parser - _value = f"{value};{parts[0]}" - if is_quoted(_value): - parts.pop(0) - value = unescape(_value[1:-1].lstrip("\\/")) - failed = False - - if failed: - warnings.warn(BadContentDispositionHeader(header)) - return None, {} - - params[key] = value - - return disptype.lower(), params - - -def content_disposition_filename( - params: Mapping[str, str], name: str = "filename" -) -> Optional[str]: - name_suf = "%s*" % name - if not params: - return None - elif name_suf in params: - return params[name_suf] - elif name in params: - return params[name] - else: - parts = [] - fnparams = sorted( - (key, value) for key, value in params.items() if key.startswith(name_suf) - ) - for num, (key, value) in enumerate(fnparams): - _, tail = key.split("*", 1) - if tail.endswith("*"): - tail = tail[:-1] - if tail == str(num): - parts.append(value) - else: - break - if not parts: - return None - value = "".join(parts) - if "'" in value: - encoding, _, value = value.split("'", 2) - encoding = encoding or "utf-8" - return unquote(value, encoding, "strict") - return value - - -class MultipartResponseWrapper: - """Wrapper around the MultipartReader. - - It takes care about - underlying connection and close it when it needs in. - """ - - def __init__( - self, - resp: "ClientResponse", - stream: "MultipartReader", - ) -> None: - self.resp = resp - self.stream = stream - - def __aiter__(self) -> "MultipartResponseWrapper": - return self - - async def __anext__( - self, - ) -> Union["MultipartReader", "BodyPartReader"]: - part = await self.next() - if part is None: - raise StopAsyncIteration - return part - - def at_eof(self) -> bool: - """Returns True when all response data had been read.""" - return self.resp.content.at_eof() - - async def next( - self, - ) -> Optional[Union["MultipartReader", "BodyPartReader"]]: - """Emits next multipart reader object.""" - item = await self.stream.next() - if self.stream.at_eof(): - await self.release() - return item - - async def release(self) -> None: - """Release the connection gracefully. - - All remaining content is read to the void. - """ - await self.resp.release() - - -class BodyPartReader: - """Multipart reader for single body part.""" - - chunk_size = 8192 - - def __init__( - self, boundary: bytes, headers: "CIMultiDictProxy[str]", content: StreamReader - ) -> None: - self.headers = headers - self._boundary = boundary - self._content = content - self._at_eof = False - length = self.headers.get(CONTENT_LENGTH, None) - self._length = int(length) if length is not None else None - self._read_bytes = 0 - # TODO: typeing.Deque is not supported by Python 3.5 - self._unread: Deque[bytes] = deque() - self._prev_chunk: Optional[bytes] = None - self._content_eof = 0 - self._cache: Dict[str, Any] = {} - - def __aiter__(self) -> AsyncIterator["BodyPartReader"]: - return self # type: ignore[return-value] - - async def __anext__(self) -> bytes: - part = await self.next() - if part is None: - raise StopAsyncIteration - return part - - async def next(self) -> Optional[bytes]: - item = await self.read() - if not item: - return None - return item - - async def read(self, *, decode: bool = False) -> bytes: - """Reads body part data. - - decode: Decodes data following by encoding - method from Content-Encoding header. If it missed - data remains untouched - """ - if self._at_eof: - return b"" - data = bytearray() - while not self._at_eof: - data.extend(await self.read_chunk(self.chunk_size)) - if decode: - return self.decode(data) - return data - - async def read_chunk(self, size: int = chunk_size) -> bytes: - """Reads body part content chunk of the specified size. - - size: chunk size - """ - if self._at_eof: - return b"" - if self._length: - chunk = await self._read_chunk_from_length(size) - else: - chunk = await self._read_chunk_from_stream(size) - - self._read_bytes += len(chunk) - if self._read_bytes == self._length: - self._at_eof = True - if self._at_eof: - clrf = await self._content.readline() - assert ( - b"\r\n" == clrf - ), "reader did not read all the data or it is malformed" - return chunk - - async def _read_chunk_from_length(self, size: int) -> bytes: - # Reads body part content chunk of the specified size. - # The body part must has Content-Length header with proper value. - assert self._length is not None, "Content-Length required for chunked read" - chunk_size = min(size, self._length - self._read_bytes) - chunk = await self._content.read(chunk_size) - return chunk - - async def _read_chunk_from_stream(self, size: int) -> bytes: - # Reads content chunk of body part with unknown length. - # The Content-Length header for body part is not necessary. - assert ( - size >= len(self._boundary) + 2 - ), "Chunk size must be greater or equal than boundary length + 2" - first_chunk = self._prev_chunk is None - if first_chunk: - self._prev_chunk = await self._content.read(size) - - chunk = await self._content.read(size) - self._content_eof += int(self._content.at_eof()) - assert self._content_eof < 3, "Reading after EOF" - assert self._prev_chunk is not None - window = self._prev_chunk + chunk - sub = b"\r\n" + self._boundary - if first_chunk: - idx = window.find(sub) - else: - idx = window.find(sub, max(0, len(self._prev_chunk) - len(sub))) - if idx >= 0: - # pushing boundary back to content - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) - self._content.unread_data(window[idx:]) - if size > idx: - self._prev_chunk = self._prev_chunk[:idx] - chunk = window[len(self._prev_chunk) : idx] - if not chunk: - self._at_eof = True - result = self._prev_chunk - self._prev_chunk = chunk - return result - - async def readline(self) -> bytes: - """Reads body part by line by line.""" - if self._at_eof: - return b"" - - if self._unread: - line = self._unread.popleft() - else: - line = await self._content.readline() - - if line.startswith(self._boundary): - # the very last boundary may not come with \r\n, - # so set single rules for everyone - sline = line.rstrip(b"\r\n") - boundary = self._boundary - last_boundary = self._boundary + b"--" - # ensure that we read exactly the boundary, not something alike - if sline == boundary or sline == last_boundary: - self._at_eof = True - self._unread.append(line) - return b"" - else: - next_line = await self._content.readline() - if next_line.startswith(self._boundary): - line = line[:-2] # strip CRLF but only once - self._unread.append(next_line) - - return line - - async def release(self) -> None: - """Like read(), but reads all the data to the void.""" - if self._at_eof: - return - while not self._at_eof: - await self.read_chunk(self.chunk_size) - - async def text(self, *, encoding: Optional[str] = None) -> str: - """Like read(), but assumes that body part contains text data.""" - data = await self.read(decode=True) - # see https://www.w3.org/TR/html5/forms.html#multipart/form-data-encoding-algorithm # NOQA - # and https://dvcs.w3.org/hg/xhr/raw-file/tip/Overview.html#dom-xmlhttprequest-send # NOQA - encoding = encoding or self.get_charset(default="utf-8") - return data.decode(encoding) - - async def json(self, *, encoding: Optional[str] = None) -> Optional[Dict[str, Any]]: - """Like read(), but assumes that body parts contains JSON data.""" - data = await self.read(decode=True) - if not data: - return None - encoding = encoding or self.get_charset(default="utf-8") - return cast(Dict[str, Any], json.loads(data.decode(encoding))) - - async def form(self, *, encoding: Optional[str] = None) -> List[Tuple[str, str]]: - """Like read(), but assumes that body parts contain form urlencoded data.""" - data = await self.read(decode=True) - if not data: - return [] - if encoding is not None: - real_encoding = encoding - else: - real_encoding = self.get_charset(default="utf-8") - return parse_qsl( - data.rstrip().decode(real_encoding), - keep_blank_values=True, - encoding=real_encoding, - ) - - def at_eof(self) -> bool: - """Returns True if the boundary was reached or False otherwise.""" - return self._at_eof - - def decode(self, data: bytes) -> bytes: - """Decodes data. - - Decoding is done according the specified Content-Encoding - or Content-Transfer-Encoding headers value. - """ - if CONTENT_TRANSFER_ENCODING in self.headers: - data = self._decode_content_transfer(data) - if CONTENT_ENCODING in self.headers: - return self._decode_content(data) - return data - - def _decode_content(self, data: bytes) -> bytes: - encoding = self.headers.get(CONTENT_ENCODING, "").lower() - - if encoding == "deflate": - return zlib.decompress(data, -zlib.MAX_WBITS) - elif encoding == "gzip": - return zlib.decompress(data, 16 + zlib.MAX_WBITS) - elif encoding == "identity": - return data - else: - raise RuntimeError(f"unknown content encoding: {encoding}") - - def _decode_content_transfer(self, data: bytes) -> bytes: - encoding = self.headers.get(CONTENT_TRANSFER_ENCODING, "").lower() - - if encoding == "base64": - return base64.b64decode(data) - elif encoding == "quoted-printable": - return binascii.a2b_qp(data) - elif encoding in ("binary", "8bit", "7bit"): - return data - else: - raise RuntimeError( - "unknown content transfer encoding: {}" "".format(encoding) - ) - - def get_charset(self, default: str) -> str: - """Returns charset parameter from Content-Type header or default.""" - ctype = self.headers.get(CONTENT_TYPE, "") - mimetype = parse_mimetype(ctype) - return mimetype.parameters.get("charset", default) - - @reify - def name(self) -> Optional[str]: - """Returns name specified in Content-Disposition header. - - If the header is missing or malformed, returns None. - """ - _, params = parse_content_disposition(self.headers.get(CONTENT_DISPOSITION)) - return content_disposition_filename(params, "name") - - @reify - def filename(self) -> Optional[str]: - """Returns filename specified in Content-Disposition header. - - Returns None if the header is missing or malformed. - """ - _, params = parse_content_disposition(self.headers.get(CONTENT_DISPOSITION)) - return content_disposition_filename(params, "filename") - - -@payload_type(BodyPartReader, order=Order.try_first) -class BodyPartReaderPayload(Payload): - def __init__(self, value: BodyPartReader, *args: Any, **kwargs: Any) -> None: - super().__init__(value, *args, **kwargs) - - params: Dict[str, str] = {} - if value.name is not None: - params["name"] = value.name - if value.filename is not None: - params["filename"] = value.filename - - if params: - self.set_content_disposition("attachment", True, **params) - - async def write(self, writer: Any) -> None: - field = self._value - chunk = await field.read_chunk(size=2**16) - while chunk: - await writer.write(field.decode(chunk)) - chunk = await field.read_chunk(size=2**16) - - -class MultipartReader: - """Multipart body reader.""" - - #: Response wrapper, used when multipart readers constructs from response. - response_wrapper_cls = MultipartResponseWrapper - #: Multipart reader class, used to handle multipart/* body parts. - #: None points to type(self) - multipart_reader_cls = None - #: Body part reader class for non multipart/* content types. - part_reader_cls = BodyPartReader - - def __init__(self, headers: Mapping[str, str], content: StreamReader) -> None: - self.headers = headers - self._boundary = ("--" + self._get_boundary()).encode() - self._content = content - self._last_part: Optional[Union["MultipartReader", BodyPartReader]] = None - self._at_eof = False - self._at_bof = True - self._unread: List[bytes] = [] - - def __aiter__( - self, - ) -> AsyncIterator["BodyPartReader"]: - return self # type: ignore[return-value] - - async def __anext__( - self, - ) -> Optional[Union["MultipartReader", BodyPartReader]]: - part = await self.next() - if part is None: - raise StopAsyncIteration - return part - - @classmethod - def from_response( - cls, - response: "ClientResponse", - ) -> MultipartResponseWrapper: - """Constructs reader instance from HTTP response. - - :param response: :class:`~aiohttp.client.ClientResponse` instance - """ - obj = cls.response_wrapper_cls( - response, cls(response.headers, response.content) - ) - return obj - - def at_eof(self) -> bool: - """Returns True if the final boundary was reached, false otherwise.""" - return self._at_eof - - async def next( - self, - ) -> Optional[Union["MultipartReader", BodyPartReader]]: - """Emits the next multipart body part.""" - # So, if we're at BOF, we need to skip till the boundary. - if self._at_eof: - return None - await self._maybe_release_last_part() - if self._at_bof: - await self._read_until_first_boundary() - self._at_bof = False - else: - await self._read_boundary() - if self._at_eof: # we just read the last boundary, nothing to do there - return None - self._last_part = await self.fetch_next_part() - return self._last_part - - async def release(self) -> None: - """Reads all the body parts to the void till the final boundary.""" - while not self._at_eof: - item = await self.next() - if item is None: - break - await item.release() - - async def fetch_next_part( - self, - ) -> Union["MultipartReader", BodyPartReader]: - """Returns the next body part reader.""" - headers = await self._read_headers() - return self._get_part_reader(headers) - - def _get_part_reader( - self, - headers: "CIMultiDictProxy[str]", - ) -> Union["MultipartReader", BodyPartReader]: - """Dispatches the response by the `Content-Type` header. - - Returns a suitable reader instance. - - :param dict headers: Response headers - """ - ctype = headers.get(CONTENT_TYPE, "") - mimetype = parse_mimetype(ctype) - - if mimetype.type == "multipart": - if self.multipart_reader_cls is None: - return type(self)(headers, self._content) - return self.multipart_reader_cls(headers, self._content) - else: - return self.part_reader_cls(self._boundary, headers, self._content) - - def _get_boundary(self) -> str: - mimetype = parse_mimetype(self.headers[CONTENT_TYPE]) - - assert mimetype.type == "multipart", "multipart/* content type expected" - - if "boundary" not in mimetype.parameters: - raise ValueError( - "boundary missed for Content-Type: %s" % self.headers[CONTENT_TYPE] - ) - - boundary = mimetype.parameters["boundary"] - if len(boundary) > 70: - raise ValueError("boundary %r is too long (70 chars max)" % boundary) - - return boundary - - async def _readline(self) -> bytes: - if self._unread: - return self._unread.pop() - return await self._content.readline() - - async def _read_until_first_boundary(self) -> None: - while True: - chunk = await self._readline() - if chunk == b"": - raise ValueError( - "Could not find starting boundary %r" % (self._boundary) - ) - chunk = chunk.rstrip() - if chunk == self._boundary: - return - elif chunk == self._boundary + b"--": - self._at_eof = True - return - - async def _read_boundary(self) -> None: - chunk = (await self._readline()).rstrip() - if chunk == self._boundary: - pass - elif chunk == self._boundary + b"--": - self._at_eof = True - epilogue = await self._readline() - next_line = await self._readline() - - # the epilogue is expected and then either the end of input or the - # parent multipart boundary, if the parent boundary is found then - # it should be marked as unread and handed to the parent for - # processing - if next_line[:2] == b"--": - self._unread.append(next_line) - # otherwise the request is likely missing an epilogue and both - # lines should be passed to the parent for processing - # (this handles the old behavior gracefully) - else: - self._unread.extend([next_line, epilogue]) - else: - raise ValueError(f"Invalid boundary {chunk!r}, expected {self._boundary!r}") - - async def _read_headers(self) -> "CIMultiDictProxy[str]": - lines = [b""] - while True: - chunk = await self._content.readline() - chunk = chunk.strip() - lines.append(chunk) - if not chunk: - break - parser = HeadersParser() - headers, raw_headers = parser.parse_headers(lines) - return headers - - async def _maybe_release_last_part(self) -> None: - """Ensures that the last read body part is read completely.""" - if self._last_part is not None: - if not self._last_part.at_eof(): - await self._last_part.release() - self._unread.extend(self._last_part._unread) - self._last_part = None - - -_Part = Tuple[Payload, str, str] - - -class MultipartWriter(Payload): - """Multipart body writer.""" - - def __init__(self, subtype: str = "mixed", boundary: Optional[str] = None) -> None: - boundary = boundary if boundary is not None else uuid.uuid4().hex - # The underlying Payload API demands a str (utf-8), not bytes, - # so we need to ensure we don't lose anything during conversion. - # As a result, require the boundary to be ASCII only. - # In both situations. - - try: - self._boundary = boundary.encode("ascii") - except UnicodeEncodeError: - raise ValueError("boundary should contain ASCII only chars") from None - ctype = f"multipart/{subtype}; boundary={self._boundary_value}" - - super().__init__(None, content_type=ctype) - - self._parts: List[_Part] = [] - - def __enter__(self) -> "MultipartWriter": - return self - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - pass - - def __iter__(self) -> Iterator[_Part]: - return iter(self._parts) - - def __len__(self) -> int: - return len(self._parts) - - def __bool__(self) -> bool: - return True - - _valid_tchar_regex = re.compile(rb"\A[!#$%&'*+\-.^_`|~\w]+\Z") - _invalid_qdtext_char_regex = re.compile(rb"[\x00-\x08\x0A-\x1F\x7F]") - - @property - def _boundary_value(self) -> str: - """Wrap boundary parameter value in quotes, if necessary. - - Reads self.boundary and returns a unicode sting. - """ - # Refer to RFCs 7231, 7230, 5234. - # - # parameter = token "=" ( token / quoted-string ) - # token = 1*tchar - # quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE - # qdtext = HTAB / SP / %x21 / %x23-5B / %x5D-7E / obs-text - # obs-text = %x80-FF - # quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) - # tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" - # / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" - # / DIGIT / ALPHA - # ; any VCHAR, except delimiters - # VCHAR = %x21-7E - value = self._boundary - if re.match(self._valid_tchar_regex, value): - return value.decode("ascii") # cannot fail - - if re.search(self._invalid_qdtext_char_regex, value): - raise ValueError("boundary value contains invalid characters") - - # escape %x5C and %x22 - quoted_value_content = value.replace(b"\\", b"\\\\") - quoted_value_content = quoted_value_content.replace(b'"', b'\\"') - - return '"' + quoted_value_content.decode("ascii") + '"' - - @property - def boundary(self) -> str: - return self._boundary.decode("ascii") - - def append(self, obj: Any, headers: Optional[MultiMapping[str]] = None) -> Payload: - if headers is None: - headers = CIMultiDict() - - if isinstance(obj, Payload): - obj.headers.update(headers) - return self.append_payload(obj) - else: - try: - payload = get_payload(obj, headers=headers) - except LookupError: - raise TypeError("Cannot create payload from %r" % obj) - else: - return self.append_payload(payload) - - def append_payload(self, payload: Payload) -> Payload: - """Adds a new body part to multipart writer.""" - # compression - encoding: Optional[str] = payload.headers.get( - CONTENT_ENCODING, - "", - ).lower() - if encoding and encoding not in ("deflate", "gzip", "identity"): - raise RuntimeError(f"unknown content encoding: {encoding}") - if encoding == "identity": - encoding = None - - # te encoding - te_encoding: Optional[str] = payload.headers.get( - CONTENT_TRANSFER_ENCODING, - "", - ).lower() - if te_encoding not in ("", "base64", "quoted-printable", "binary"): - raise RuntimeError( - "unknown content transfer encoding: {}" "".format(te_encoding) - ) - if te_encoding == "binary": - te_encoding = None - - # size - size = payload.size - if size is not None and not (encoding or te_encoding): - payload.headers[CONTENT_LENGTH] = str(size) - - self._parts.append((payload, encoding, te_encoding)) # type: ignore[arg-type] - return payload - - def append_json( - self, obj: Any, headers: Optional[MultiMapping[str]] = None - ) -> Payload: - """Helper to append JSON part.""" - if headers is None: - headers = CIMultiDict() - - return self.append_payload(JsonPayload(obj, headers=headers)) - - def append_form( - self, - obj: Union[Sequence[Tuple[str, str]], Mapping[str, str]], - headers: Optional[MultiMapping[str]] = None, - ) -> Payload: - """Helper to append form urlencoded part.""" - assert isinstance(obj, (Sequence, Mapping)) - - if headers is None: - headers = CIMultiDict() - - if isinstance(obj, Mapping): - obj = list(obj.items()) - data = urlencode(obj, doseq=True) - - return self.append_payload( - StringPayload( - data, headers=headers, content_type="application/x-www-form-urlencoded" - ) - ) - - @property - def size(self) -> Optional[int]: - """Size of the payload.""" - total = 0 - for part, encoding, te_encoding in self._parts: - if encoding or te_encoding or part.size is None: - return None - - total += int( - 2 - + len(self._boundary) - + 2 - + part.size # b'--'+self._boundary+b'\r\n' - + len(part._binary_headers) - + 2 # b'\r\n' - ) - - total += 2 + len(self._boundary) + 4 # b'--'+self._boundary+b'--\r\n' - return total - - async def write(self, writer: Any, close_boundary: bool = True) -> None: - """Write body.""" - for part, encoding, te_encoding in self._parts: - await writer.write(b"--" + self._boundary + b"\r\n") - await writer.write(part._binary_headers) - - if encoding or te_encoding: - w = MultipartPayloadWriter(writer) - if encoding: - w.enable_compression(encoding) - if te_encoding: - w.enable_encoding(te_encoding) - await part.write(w) # type: ignore[arg-type] - await w.write_eof() - else: - await part.write(writer) - - await writer.write(b"\r\n") - - if close_boundary: - await writer.write(b"--" + self._boundary + b"--\r\n") - - -class MultipartPayloadWriter: - def __init__(self, writer: Any) -> None: - self._writer = writer - self._encoding: Optional[str] = None - self._compress: Any = None - self._encoding_buffer: Optional[bytearray] = None - - def enable_encoding(self, encoding: str) -> None: - if encoding == "base64": - self._encoding = encoding - self._encoding_buffer = bytearray() - elif encoding == "quoted-printable": - self._encoding = "quoted-printable" - - def enable_compression( - self, encoding: str = "deflate", strategy: int = zlib.Z_DEFAULT_STRATEGY - ) -> None: - zlib_mode = 16 + zlib.MAX_WBITS if encoding == "gzip" else -zlib.MAX_WBITS - self._compress = zlib.compressobj(wbits=zlib_mode, strategy=strategy) - - async def write_eof(self) -> None: - if self._compress is not None: - chunk = self._compress.flush() - if chunk: - self._compress = None - await self.write(chunk) - - if self._encoding == "base64": - if self._encoding_buffer: - await self._writer.write(base64.b64encode(self._encoding_buffer)) - - async def write(self, chunk: bytes) -> None: - if self._compress is not None: - if chunk: - chunk = self._compress.compress(chunk) - if not chunk: - return - - if self._encoding == "base64": - buf = self._encoding_buffer - assert buf is not None - buf.extend(chunk) - - if buf: - div, mod = divmod(len(buf), 3) - enc_chunk, self._encoding_buffer = (buf[: div * 3], buf[div * 3 :]) - if enc_chunk: - b64chunk = base64.b64encode(enc_chunk) - await self._writer.write(b64chunk) - elif self._encoding == "quoted-printable": - await self._writer.write(binascii.b2a_qp(chunk)) - else: - await self._writer.write(chunk) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/testing.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/testing.py deleted file mode 100644 index e0df0d2a657fe19523957b85964b9956e5c78a30..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/testing.py +++ /dev/null @@ -1,479 +0,0 @@ -import contextlib -import io -import os -import shlex -import shutil -import sys -import tempfile -import typing as t -from types import TracebackType - -from . import formatting -from . import termui -from . import utils -from ._compat import _find_binary_reader - -if t.TYPE_CHECKING: - from .core import BaseCommand - - -class EchoingStdin: - def __init__(self, input: t.BinaryIO, output: t.BinaryIO) -> None: - self._input = input - self._output = output - self._paused = False - - def __getattr__(self, x: str) -> t.Any: - return getattr(self._input, x) - - def _echo(self, rv: bytes) -> bytes: - if not self._paused: - self._output.write(rv) - - return rv - - def read(self, n: int = -1) -> bytes: - return self._echo(self._input.read(n)) - - def read1(self, n: int = -1) -> bytes: - return self._echo(self._input.read1(n)) # type: ignore - - def readline(self, n: int = -1) -> bytes: - return self._echo(self._input.readline(n)) - - def readlines(self) -> t.List[bytes]: - return [self._echo(x) for x in self._input.readlines()] - - def __iter__(self) -> t.Iterator[bytes]: - return iter(self._echo(x) for x in self._input) - - def __repr__(self) -> str: - return repr(self._input) - - -@contextlib.contextmanager -def _pause_echo(stream: t.Optional[EchoingStdin]) -> t.Iterator[None]: - if stream is None: - yield - else: - stream._paused = True - yield - stream._paused = False - - -class _NamedTextIOWrapper(io.TextIOWrapper): - def __init__( - self, buffer: t.BinaryIO, name: str, mode: str, **kwargs: t.Any - ) -> None: - super().__init__(buffer, **kwargs) - self._name = name - self._mode = mode - - @property - def name(self) -> str: - return self._name - - @property - def mode(self) -> str: - return self._mode - - -def make_input_stream( - input: t.Optional[t.Union[str, bytes, t.IO[t.Any]]], charset: str -) -> t.BinaryIO: - # Is already an input stream. - if hasattr(input, "read"): - rv = _find_binary_reader(t.cast(t.IO[t.Any], input)) - - if rv is not None: - return rv - - raise TypeError("Could not find binary reader for input stream.") - - if input is None: - input = b"" - elif isinstance(input, str): - input = input.encode(charset) - - return io.BytesIO(input) - - -class Result: - """Holds the captured result of an invoked CLI script.""" - - def __init__( - self, - runner: "CliRunner", - stdout_bytes: bytes, - stderr_bytes: t.Optional[bytes], - return_value: t.Any, - exit_code: int, - exception: t.Optional[BaseException], - exc_info: t.Optional[ - t.Tuple[t.Type[BaseException], BaseException, TracebackType] - ] = None, - ): - #: The runner that created the result - self.runner = runner - #: The standard output as bytes. - self.stdout_bytes = stdout_bytes - #: The standard error as bytes, or None if not available - self.stderr_bytes = stderr_bytes - #: The value returned from the invoked command. - #: - #: .. versionadded:: 8.0 - self.return_value = return_value - #: The exit code as integer. - self.exit_code = exit_code - #: The exception that happened if one did. - self.exception = exception - #: The traceback - self.exc_info = exc_info - - @property - def output(self) -> str: - """The (standard) output as unicode string.""" - return self.stdout - - @property - def stdout(self) -> str: - """The standard output as unicode string.""" - return self.stdout_bytes.decode(self.runner.charset, "replace").replace( - "\r\n", "\n" - ) - - @property - def stderr(self) -> str: - """The standard error as unicode string.""" - if self.stderr_bytes is None: - raise ValueError("stderr not separately captured") - return self.stderr_bytes.decode(self.runner.charset, "replace").replace( - "\r\n", "\n" - ) - - def __repr__(self) -> str: - exc_str = repr(self.exception) if self.exception else "okay" - return f"<{type(self).__name__} {exc_str}>" - - -class CliRunner: - """The CLI runner provides functionality to invoke a Click command line - script for unittesting purposes in a isolated environment. This only - works in single-threaded systems without any concurrency as it changes the - global interpreter state. - - :param charset: the character set for the input and output data. - :param env: a dictionary with environment variables for overriding. - :param echo_stdin: if this is set to `True`, then reading from stdin writes - to stdout. This is useful for showing examples in - some circumstances. Note that regular prompts - will automatically echo the input. - :param mix_stderr: if this is set to `False`, then stdout and stderr are - preserved as independent streams. This is useful for - Unix-philosophy apps that have predictable stdout and - noisy stderr, such that each may be measured - independently - """ - - def __init__( - self, - charset: str = "utf-8", - env: t.Optional[t.Mapping[str, t.Optional[str]]] = None, - echo_stdin: bool = False, - mix_stderr: bool = True, - ) -> None: - self.charset = charset - self.env: t.Mapping[str, t.Optional[str]] = env or {} - self.echo_stdin = echo_stdin - self.mix_stderr = mix_stderr - - def get_default_prog_name(self, cli: "BaseCommand") -> str: - """Given a command object it will return the default program name - for it. The default is the `name` attribute or ``"root"`` if not - set. - """ - return cli.name or "root" - - def make_env( - self, overrides: t.Optional[t.Mapping[str, t.Optional[str]]] = None - ) -> t.Mapping[str, t.Optional[str]]: - """Returns the environment overrides for invoking a script.""" - rv = dict(self.env) - if overrides: - rv.update(overrides) - return rv - - @contextlib.contextmanager - def isolation( - self, - input: t.Optional[t.Union[str, bytes, t.IO[t.Any]]] = None, - env: t.Optional[t.Mapping[str, t.Optional[str]]] = None, - color: bool = False, - ) -> t.Iterator[t.Tuple[io.BytesIO, t.Optional[io.BytesIO]]]: - """A context manager that sets up the isolation for invoking of a - command line tool. This sets up stdin with the given input data - and `os.environ` with the overrides from the given dictionary. - This also rebinds some internals in Click to be mocked (like the - prompt functionality). - - This is automatically done in the :meth:`invoke` method. - - :param input: the input stream to put into sys.stdin. - :param env: the environment overrides as dictionary. - :param color: whether the output should contain color codes. The - application can still override this explicitly. - - .. versionchanged:: 8.0 - ``stderr`` is opened with ``errors="backslashreplace"`` - instead of the default ``"strict"``. - - .. versionchanged:: 4.0 - Added the ``color`` parameter. - """ - bytes_input = make_input_stream(input, self.charset) - echo_input = None - - old_stdin = sys.stdin - old_stdout = sys.stdout - old_stderr = sys.stderr - old_forced_width = formatting.FORCED_WIDTH - formatting.FORCED_WIDTH = 80 - - env = self.make_env(env) - - bytes_output = io.BytesIO() - - if self.echo_stdin: - bytes_input = echo_input = t.cast( - t.BinaryIO, EchoingStdin(bytes_input, bytes_output) - ) - - sys.stdin = text_input = _NamedTextIOWrapper( - bytes_input, encoding=self.charset, name="", mode="r" - ) - - if self.echo_stdin: - # Force unbuffered reads, otherwise TextIOWrapper reads a - # large chunk which is echoed early. - text_input._CHUNK_SIZE = 1 # type: ignore - - sys.stdout = _NamedTextIOWrapper( - bytes_output, encoding=self.charset, name="", mode="w" - ) - - bytes_error = None - if self.mix_stderr: - sys.stderr = sys.stdout - else: - bytes_error = io.BytesIO() - sys.stderr = _NamedTextIOWrapper( - bytes_error, - encoding=self.charset, - name="", - mode="w", - errors="backslashreplace", - ) - - @_pause_echo(echo_input) # type: ignore - def visible_input(prompt: t.Optional[str] = None) -> str: - sys.stdout.write(prompt or "") - val = text_input.readline().rstrip("\r\n") - sys.stdout.write(f"{val}\n") - sys.stdout.flush() - return val - - @_pause_echo(echo_input) # type: ignore - def hidden_input(prompt: t.Optional[str] = None) -> str: - sys.stdout.write(f"{prompt or ''}\n") - sys.stdout.flush() - return text_input.readline().rstrip("\r\n") - - @_pause_echo(echo_input) # type: ignore - def _getchar(echo: bool) -> str: - char = sys.stdin.read(1) - - if echo: - sys.stdout.write(char) - - sys.stdout.flush() - return char - - default_color = color - - def should_strip_ansi( - stream: t.Optional[t.IO[t.Any]] = None, color: t.Optional[bool] = None - ) -> bool: - if color is None: - return not default_color - return not color - - old_visible_prompt_func = termui.visible_prompt_func - old_hidden_prompt_func = termui.hidden_prompt_func - old__getchar_func = termui._getchar - old_should_strip_ansi = utils.should_strip_ansi # type: ignore - termui.visible_prompt_func = visible_input - termui.hidden_prompt_func = hidden_input - termui._getchar = _getchar - utils.should_strip_ansi = should_strip_ansi # type: ignore - - old_env = {} - try: - for key, value in env.items(): - old_env[key] = os.environ.get(key) - if value is None: - try: - del os.environ[key] - except Exception: - pass - else: - os.environ[key] = value - yield (bytes_output, bytes_error) - finally: - for key, value in old_env.items(): - if value is None: - try: - del os.environ[key] - except Exception: - pass - else: - os.environ[key] = value - sys.stdout = old_stdout - sys.stderr = old_stderr - sys.stdin = old_stdin - termui.visible_prompt_func = old_visible_prompt_func - termui.hidden_prompt_func = old_hidden_prompt_func - termui._getchar = old__getchar_func - utils.should_strip_ansi = old_should_strip_ansi # type: ignore - formatting.FORCED_WIDTH = old_forced_width - - def invoke( - self, - cli: "BaseCommand", - args: t.Optional[t.Union[str, t.Sequence[str]]] = None, - input: t.Optional[t.Union[str, bytes, t.IO[t.Any]]] = None, - env: t.Optional[t.Mapping[str, t.Optional[str]]] = None, - catch_exceptions: bool = True, - color: bool = False, - **extra: t.Any, - ) -> Result: - """Invokes a command in an isolated environment. The arguments are - forwarded directly to the command line script, the `extra` keyword - arguments are passed to the :meth:`~clickpkg.Command.main` function of - the command. - - This returns a :class:`Result` object. - - :param cli: the command to invoke - :param args: the arguments to invoke. It may be given as an iterable - or a string. When given as string it will be interpreted - as a Unix shell command. More details at - :func:`shlex.split`. - :param input: the input data for `sys.stdin`. - :param env: the environment overrides. - :param catch_exceptions: Whether to catch any other exceptions than - ``SystemExit``. - :param extra: the keyword arguments to pass to :meth:`main`. - :param color: whether the output should contain color codes. The - application can still override this explicitly. - - .. versionchanged:: 8.0 - The result object has the ``return_value`` attribute with - the value returned from the invoked command. - - .. versionchanged:: 4.0 - Added the ``color`` parameter. - - .. versionchanged:: 3.0 - Added the ``catch_exceptions`` parameter. - - .. versionchanged:: 3.0 - The result object has the ``exc_info`` attribute with the - traceback if available. - """ - exc_info = None - with self.isolation(input=input, env=env, color=color) as outstreams: - return_value = None - exception: t.Optional[BaseException] = None - exit_code = 0 - - if isinstance(args, str): - args = shlex.split(args) - - try: - prog_name = extra.pop("prog_name") - except KeyError: - prog_name = self.get_default_prog_name(cli) - - try: - return_value = cli.main(args=args or (), prog_name=prog_name, **extra) - except SystemExit as e: - exc_info = sys.exc_info() - e_code = t.cast(t.Optional[t.Union[int, t.Any]], e.code) - - if e_code is None: - e_code = 0 - - if e_code != 0: - exception = e - - if not isinstance(e_code, int): - sys.stdout.write(str(e_code)) - sys.stdout.write("\n") - e_code = 1 - - exit_code = e_code - - except Exception as e: - if not catch_exceptions: - raise - exception = e - exit_code = 1 - exc_info = sys.exc_info() - finally: - sys.stdout.flush() - stdout = outstreams[0].getvalue() - if self.mix_stderr: - stderr = None - else: - stderr = outstreams[1].getvalue() # type: ignore - - return Result( - runner=self, - stdout_bytes=stdout, - stderr_bytes=stderr, - return_value=return_value, - exit_code=exit_code, - exception=exception, - exc_info=exc_info, # type: ignore - ) - - @contextlib.contextmanager - def isolated_filesystem( - self, temp_dir: t.Optional[t.Union[str, "os.PathLike[str]"]] = None - ) -> t.Iterator[str]: - """A context manager that creates a temporary directory and - changes the current working directory to it. This isolates tests - that affect the contents of the CWD to prevent them from - interfering with each other. - - :param temp_dir: Create the temporary directory under this - directory. If given, the created directory is not removed - when exiting. - - .. versionchanged:: 8.0 - Added the ``temp_dir`` parameter. - """ - cwd = os.getcwd() - dt = tempfile.mkdtemp(dir=temp_dir) - os.chdir(dt) - - try: - yield dt - finally: - os.chdir(cwd) - - if temp_dir is None: - try: - shutil.rmtree(dt) - except OSError: # noqa: B014 - pass diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpcore/_sync/http_proxy.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpcore/_sync/http_proxy.py deleted file mode 100644 index bb368dd42d559a6de6961c95b0cdef855b868c97..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpcore/_sync/http_proxy.py +++ /dev/null @@ -1,350 +0,0 @@ -import logging -import ssl -from base64 import b64encode -from typing import Iterable, List, Mapping, Optional, Sequence, Tuple, Union - -from .._backends.base import SOCKET_OPTION, NetworkBackend -from .._exceptions import ProxyError -from .._models import ( - URL, - Origin, - Request, - Response, - enforce_bytes, - enforce_headers, - enforce_url, -) -from .._ssl import default_ssl_context -from .._synchronization import Lock -from .._trace import Trace -from .connection import HTTPConnection -from .connection_pool import ConnectionPool -from .http11 import HTTP11Connection -from .interfaces import ConnectionInterface - -HeadersAsSequence = Sequence[Tuple[Union[bytes, str], Union[bytes, str]]] -HeadersAsMapping = Mapping[Union[bytes, str], Union[bytes, str]] - - -logger = logging.getLogger("httpcore.proxy") - - -def merge_headers( - default_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, - override_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, -) -> List[Tuple[bytes, bytes]]: - """ - Append default_headers and override_headers, de-duplicating if a key exists - in both cases. - """ - default_headers = [] if default_headers is None else list(default_headers) - override_headers = [] if override_headers is None else list(override_headers) - has_override = set(key.lower() for key, value in override_headers) - default_headers = [ - (key, value) - for key, value in default_headers - if key.lower() not in has_override - ] - return default_headers + override_headers - - -def build_auth_header(username: bytes, password: bytes) -> bytes: - userpass = username + b":" + password - return b"Basic " + b64encode(userpass) - - -class HTTPProxy(ConnectionPool): - """ - A connection pool that sends requests via an HTTP proxy. - """ - - def __init__( - self, - proxy_url: Union[URL, bytes, str], - proxy_auth: Optional[Tuple[Union[bytes, str], Union[bytes, str]]] = None, - proxy_headers: Union[HeadersAsMapping, HeadersAsSequence, None] = None, - ssl_context: Optional[ssl.SSLContext] = None, - max_connections: Optional[int] = 10, - max_keepalive_connections: Optional[int] = None, - keepalive_expiry: Optional[float] = None, - http1: bool = True, - http2: bool = False, - retries: int = 0, - local_address: Optional[str] = None, - uds: Optional[str] = None, - network_backend: Optional[NetworkBackend] = None, - socket_options: Optional[Iterable[SOCKET_OPTION]] = None, - ) -> None: - """ - A connection pool for making HTTP requests. - - Parameters: - proxy_url: The URL to use when connecting to the proxy server. - For example `"http://127.0.0.1:8080/"`. - proxy_auth: Any proxy authentication as a two-tuple of - (username, password). May be either bytes or ascii-only str. - proxy_headers: Any HTTP headers to use for the proxy requests. - For example `{"Proxy-Authorization": "Basic :"}`. - ssl_context: An SSL context to use for verifying connections. - If not specified, the default `httpcore.default_ssl_context()` - will be used. - max_connections: The maximum number of concurrent HTTP connections that - the pool should allow. Any attempt to send a request on a pool that - would exceed this amount will block until a connection is available. - max_keepalive_connections: The maximum number of idle HTTP connections - that will be maintained in the pool. - keepalive_expiry: The duration in seconds that an idle HTTP connection - may be maintained for before being expired from the pool. - http1: A boolean indicating if HTTP/1.1 requests should be supported - by the connection pool. Defaults to True. - http2: A boolean indicating if HTTP/2 requests should be supported by - the connection pool. Defaults to False. - retries: The maximum number of retries when trying to establish - a connection. - local_address: Local address to connect from. Can also be used to - connect using a particular address family. Using - `local_address="0.0.0.0"` will connect using an `AF_INET` address - (IPv4), while using `local_address="::"` will connect using an - `AF_INET6` address (IPv6). - uds: Path to a Unix Domain Socket to use instead of TCP sockets. - network_backend: A backend instance to use for handling network I/O. - """ - super().__init__( - ssl_context=ssl_context, - max_connections=max_connections, - max_keepalive_connections=max_keepalive_connections, - keepalive_expiry=keepalive_expiry, - http1=http1, - http2=http2, - network_backend=network_backend, - retries=retries, - local_address=local_address, - uds=uds, - socket_options=socket_options, - ) - self._ssl_context = ssl_context - self._proxy_url = enforce_url(proxy_url, name="proxy_url") - self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") - if proxy_auth is not None: - username = enforce_bytes(proxy_auth[0], name="proxy_auth") - password = enforce_bytes(proxy_auth[1], name="proxy_auth") - authorization = build_auth_header(username, password) - self._proxy_headers = [ - (b"Proxy-Authorization", authorization) - ] + self._proxy_headers - - def create_connection(self, origin: Origin) -> ConnectionInterface: - if origin.scheme == b"http": - return ForwardHTTPConnection( - proxy_origin=self._proxy_url.origin, - proxy_headers=self._proxy_headers, - remote_origin=origin, - keepalive_expiry=self._keepalive_expiry, - network_backend=self._network_backend, - ) - return TunnelHTTPConnection( - proxy_origin=self._proxy_url.origin, - proxy_headers=self._proxy_headers, - remote_origin=origin, - ssl_context=self._ssl_context, - keepalive_expiry=self._keepalive_expiry, - http1=self._http1, - http2=self._http2, - network_backend=self._network_backend, - ) - - -class ForwardHTTPConnection(ConnectionInterface): - def __init__( - self, - proxy_origin: Origin, - remote_origin: Origin, - proxy_headers: Union[HeadersAsMapping, HeadersAsSequence, None] = None, - keepalive_expiry: Optional[float] = None, - network_backend: Optional[NetworkBackend] = None, - socket_options: Optional[Iterable[SOCKET_OPTION]] = None, - ) -> None: - self._connection = HTTPConnection( - origin=proxy_origin, - keepalive_expiry=keepalive_expiry, - network_backend=network_backend, - socket_options=socket_options, - ) - self._proxy_origin = proxy_origin - self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") - self._remote_origin = remote_origin - - def handle_request(self, request: Request) -> Response: - headers = merge_headers(self._proxy_headers, request.headers) - url = URL( - scheme=self._proxy_origin.scheme, - host=self._proxy_origin.host, - port=self._proxy_origin.port, - target=bytes(request.url), - ) - proxy_request = Request( - method=request.method, - url=url, - headers=headers, - content=request.stream, - extensions=request.extensions, - ) - return self._connection.handle_request(proxy_request) - - def can_handle_request(self, origin: Origin) -> bool: - return origin == self._remote_origin - - def close(self) -> None: - self._connection.close() - - def info(self) -> str: - return self._connection.info() - - def is_available(self) -> bool: - return self._connection.is_available() - - def has_expired(self) -> bool: - return self._connection.has_expired() - - def is_idle(self) -> bool: - return self._connection.is_idle() - - def is_closed(self) -> bool: - return self._connection.is_closed() - - def __repr__(self) -> str: - return f"<{self.__class__.__name__} [{self.info()}]>" - - -class TunnelHTTPConnection(ConnectionInterface): - def __init__( - self, - proxy_origin: Origin, - remote_origin: Origin, - ssl_context: Optional[ssl.SSLContext] = None, - proxy_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, - keepalive_expiry: Optional[float] = None, - http1: bool = True, - http2: bool = False, - network_backend: Optional[NetworkBackend] = None, - socket_options: Optional[Iterable[SOCKET_OPTION]] = None, - ) -> None: - self._connection: ConnectionInterface = HTTPConnection( - origin=proxy_origin, - keepalive_expiry=keepalive_expiry, - network_backend=network_backend, - socket_options=socket_options, - ) - self._proxy_origin = proxy_origin - self._remote_origin = remote_origin - self._ssl_context = ssl_context - self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") - self._keepalive_expiry = keepalive_expiry - self._http1 = http1 - self._http2 = http2 - self._connect_lock = Lock() - self._connected = False - - def handle_request(self, request: Request) -> Response: - timeouts = request.extensions.get("timeout", {}) - timeout = timeouts.get("connect", None) - - with self._connect_lock: - if not self._connected: - target = b"%b:%d" % (self._remote_origin.host, self._remote_origin.port) - - connect_url = URL( - scheme=self._proxy_origin.scheme, - host=self._proxy_origin.host, - port=self._proxy_origin.port, - target=target, - ) - connect_headers = merge_headers( - [(b"Host", target), (b"Accept", b"*/*")], self._proxy_headers - ) - connect_request = Request( - method=b"CONNECT", - url=connect_url, - headers=connect_headers, - extensions=request.extensions, - ) - connect_response = self._connection.handle_request( - connect_request - ) - - if connect_response.status < 200 or connect_response.status > 299: - reason_bytes = connect_response.extensions.get("reason_phrase", b"") - reason_str = reason_bytes.decode("ascii", errors="ignore") - msg = "%d %s" % (connect_response.status, reason_str) - self._connection.close() - raise ProxyError(msg) - - stream = connect_response.extensions["network_stream"] - - # Upgrade the stream to SSL - ssl_context = ( - default_ssl_context() - if self._ssl_context is None - else self._ssl_context - ) - alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"] - ssl_context.set_alpn_protocols(alpn_protocols) - - kwargs = { - "ssl_context": ssl_context, - "server_hostname": self._remote_origin.host.decode("ascii"), - "timeout": timeout, - } - with Trace("start_tls", logger, request, kwargs) as trace: - stream = stream.start_tls(**kwargs) - trace.return_value = stream - - # Determine if we should be using HTTP/1.1 or HTTP/2 - ssl_object = stream.get_extra_info("ssl_object") - http2_negotiated = ( - ssl_object is not None - and ssl_object.selected_alpn_protocol() == "h2" - ) - - # Create the HTTP/1.1 or HTTP/2 connection - if http2_negotiated or (self._http2 and not self._http1): - from .http2 import HTTP2Connection - - self._connection = HTTP2Connection( - origin=self._remote_origin, - stream=stream, - keepalive_expiry=self._keepalive_expiry, - ) - else: - self._connection = HTTP11Connection( - origin=self._remote_origin, - stream=stream, - keepalive_expiry=self._keepalive_expiry, - ) - - self._connected = True - return self._connection.handle_request(request) - - def can_handle_request(self, origin: Origin) -> bool: - return origin == self._remote_origin - - def close(self) -> None: - self._connection.close() - - def info(self) -> str: - return self._connection.info() - - def is_available(self) -> bool: - return self._connection.is_available() - - def has_expired(self) -> bool: - return self._connection.has_expired() - - def is_idle(self) -> bool: - return self._connection.is_idle() - - def is_closed(self) -> bool: - return self._connection.is_closed() - - def __repr__(self) -> str: - return f"<{self.__class__.__name__} [{self.info()}]>" diff --git a/spaces/DaleChen/AutoGPT/tests/test_token_counter.py b/spaces/DaleChen/AutoGPT/tests/test_token_counter.py deleted file mode 100644 index 6d7ae016b2f823123b0b69b2eeb3eab50d94f00f..0000000000000000000000000000000000000000 --- a/spaces/DaleChen/AutoGPT/tests/test_token_counter.py +++ /dev/null @@ -1,63 +0,0 @@ -import unittest - -import tests.context -from autogpt.token_counter import count_message_tokens, count_string_tokens - - -class TestTokenCounter(unittest.TestCase): - def test_count_message_tokens(self): - messages = [ - {"role": "user", "content": "Hello"}, - {"role": "assistant", "content": "Hi there!"}, - ] - self.assertEqual(count_message_tokens(messages), 17) - - def test_count_message_tokens_with_name(self): - messages = [ - {"role": "user", "content": "Hello", "name": "John"}, - {"role": "assistant", "content": "Hi there!"}, - ] - self.assertEqual(count_message_tokens(messages), 17) - - def test_count_message_tokens_empty_input(self): - self.assertEqual(count_message_tokens([]), 3) - - def test_count_message_tokens_invalid_model(self): - messages = [ - {"role": "user", "content": "Hello"}, - {"role": "assistant", "content": "Hi there!"}, - ] - with self.assertRaises(KeyError): - count_message_tokens(messages, model="invalid_model") - - def test_count_message_tokens_gpt_4(self): - messages = [ - {"role": "user", "content": "Hello"}, - {"role": "assistant", "content": "Hi there!"}, - ] - self.assertEqual(count_message_tokens(messages, model="gpt-4-0314"), 15) - - def test_count_string_tokens(self): - string = "Hello, world!" - self.assertEqual( - count_string_tokens(string, model_name="gpt-3.5-turbo-0301"), 4 - ) - - def test_count_string_tokens_empty_input(self): - self.assertEqual(count_string_tokens("", model_name="gpt-3.5-turbo-0301"), 0) - - def test_count_message_tokens_invalid_model(self): - messages = [ - {"role": "user", "content": "Hello"}, - {"role": "assistant", "content": "Hi there!"}, - ] - with self.assertRaises(NotImplementedError): - count_message_tokens(messages, model="invalid_model") - - def test_count_string_tokens_gpt_4(self): - string = "Hello, world!" - self.assertEqual(count_string_tokens(string, model_name="gpt-4-0314"), 4) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/Davidsamuel101/PPTGenerator/src/summarizer.py b/spaces/Davidsamuel101/PPTGenerator/src/summarizer.py deleted file mode 100644 index 7fdc9a28f10cbe522b35f35653cb3f55ba95fe26..0000000000000000000000000000000000000000 --- a/spaces/Davidsamuel101/PPTGenerator/src/summarizer.py +++ /dev/null @@ -1,82 +0,0 @@ -from typing import Dict, List, Tuple, Optional -from tqdm import tqdm -from transformers import PegasusForConditionalGeneration, PegasusTokenizer -from src.text_extractor import TextExtractor -from mdutils.mdutils import MdUtils - -import torch -import fitz -import copy - -class Summarizer(): - def __init__(self, model_name: str): - self.device = "cuda" if torch.cuda.is_available() else "cpu" - self.tokenizer = PegasusTokenizer.from_pretrained(model_name) - self.model = PegasusForConditionalGeneration.from_pretrained(model_name).to(self.device) - self.preprocess = TextExtractor() - - def extract_text(self, document: object) -> Dict[str, List[Tuple[str, str]]]: - doc = fitz.open(document) - self.filename = doc.name.split('/')[-1].split('.')[0] - font_counts, styles = self.preprocess.get_font_info(doc, granularity=False) - size_tag = self.preprocess.get_font_tags(font_counts, styles) - texts = self.preprocess.assign_tags(doc, size_tag) - slide_content = self.preprocess.get_slides(texts) - return slide_content - - def __call__(self, slides: Dict[str, List[Tuple[str, str]]]) -> Dict[str, List[Tuple[str, str]]]: - summarized_slides = copy.deepcopy(slides) - for page, contents in tqdm(summarized_slides.items()): - for idx, (tag, content) in enumerate(contents): - if tag.startswith('p'): - try: - input = self.tokenizer(content, truncation=True, padding="longest", return_tensors="pt").to(self.device) - tensor = self.model.generate(**input) - summary = self.tokenizer.batch_decode(tensor, skip_special_tokens=True)[0] - contents[idx] = (tag, summary) - except Exception as e: - print(f"Summarization Fails, Error: {e}") - - return summarized_slides - - def convert2markdown(self, summarized_slides: Dict[str, List[Tuple[str, str]]], target_path: Optional[str]=None) -> str: - filename = self.filename - if target_path: - filename = target_path - mdFile = MdUtils(file_name=filename) - for k, v in summarized_slides.items(): - mdFile.new_line('---\n') - for section in v: - tag = section[0] - content = section[1] - if tag.startswith('h'): - try: - mdFile.new_header(level=int(tag[1]), title=content) - except: - continue - if tag == 'p': - contents = content.split('') - for content in contents: - mdFile.new_line(f"{content}\n") - markdown = mdFile.create_md_file() - return markdown - - def remove_leading_empty_lines(self, file_path) -> None: - with open(file_path, 'r') as file: - lines = file.readlines() - - non_empty_lines = [] - found_first_word = False - - for line in lines: - stripped_line = line.strip() - if stripped_line and not found_first_word: - found_first_word = True - if found_first_word or stripped_line: - non_empty_lines.append(line) - - with open(file_path, 'w') as file: - file.writelines(non_empty_lines) - return - - \ No newline at end of file diff --git a/spaces/Dinoking/Guccio-AI-Designer/models/__init__.py b/spaces/Dinoking/Guccio-AI-Designer/models/__init__.py deleted file mode 100644 index 9941a7bb29d1b9a0a00f9cf90ddf2c48f1e38ed9..0000000000000000000000000000000000000000 --- a/spaces/Dinoking/Guccio-AI-Designer/models/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright 2020 Erik Härkönen. All rights reserved. -# This file is licensed to you under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software distributed under -# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS -# OF ANY KIND, either express or implied. See the License for the specific language -# governing permissions and limitations under the License. - -from .wrappers import * \ No newline at end of file diff --git a/spaces/Djacon/emotion_detection/main.py b/spaces/Djacon/emotion_detection/main.py deleted file mode 100644 index f6045903f0b028d8133ba30cae97a12e5afbf5e9..0000000000000000000000000000000000000000 --- a/spaces/Djacon/emotion_detection/main.py +++ /dev/null @@ -1,84 +0,0 @@ -from fastapi import FastAPI, Request -from pydantic import BaseModel -from jinja2 import TemplateNotFound -from fastapi.responses import HTMLResponse -from fastapi.staticfiles import StaticFiles -from fastapi.templating import Jinja2Templates - -from youtube import get_youtube_caption -from inference import predict_emotions, predict_summarization - -MAX_ITER_SIZE = 3000 - -app = FastAPI() -app.mount("/files", StaticFiles(directory="files"), name="files") -templates = Jinja2Templates(directory="static") - - -@app.exception_handler(TemplateNotFound) -async def not_found_exception_handler(request: Request, exc: TemplateNotFound): - return templates.TemplateResponse("404.html", {"request": request}, - status_code=404) - - -@app.get("/", response_class=HTMLResponse) -async def read_homepage(request: Request): - return templates.TemplateResponse(f"index.html", {"request": request, - "page": "index"}) - - -@app.get("/{page}", response_class=HTMLResponse) -async def read_html(request: Request, page: str = 'index'): - if page.endswith(".html"): - page = page[:-5] - return templates.TemplateResponse(f"{page}.html", {"request": request, - "page": page}) - - -class EmotionRequest(BaseModel): - sum_type: str - text: str - - -@app.post('/predict_emotion') -async def predict_emo(request: EmotionRequest): - if request.sum_type == 'sum-video': - text = get_youtube_caption(request.text) - if not text: - return 'Invalid Link' - elif text == 'err': - return 'Something goes wrong...' - elif text == 'no-cap': - return "Unfortunately, this youtube video doesn't contain captions" - else: - text = request.text - return predict_emotions(text) - - -@app.post('/predict_summarization') -async def predict_sum(request: EmotionRequest): - if request.sum_type == 'sum-video': - text = get_youtube_caption(request.text) - if not text: - return 'Invalid Link' - elif text == 'err': - return 'Something goes wrong...' - elif text == 'no-cap': - return "Unfortunately, this youtube video doesn't contain captions" - else: - text = request.text - - try: - if len(text) < MAX_ITER_SIZE: - return predict_summarization(text) - - arr = [] - for i in range(min(len(text), 20_000) // MAX_ITER_SIZE): - res = predict_summarization( - text[MAX_ITER_SIZE*i:MAX_ITER_SIZE*(i+1)]).replace('\n', ' ') - res = f'{res[0].upper()}{res[1:]}' - arr.append(res) - return '\n\n'.join(arr) - except Exception as e: - print('ERR:', e) - return 'Something goes wrong...' diff --git a/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/stylegan2/op/upfirdn2d.py b/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/stylegan2/op/upfirdn2d.py deleted file mode 100644 index 6727f2bf0857c1f4e0d50de363de75e7b8d4de50..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/stylegan2/op/upfirdn2d.py +++ /dev/null @@ -1,60 +0,0 @@ -import os - -import torch -from torch.nn import functional as F - - -module_path = os.path.dirname(__file__) - - -def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): - out = upfirdn2d_native( - input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1] - ) - - return out - - -def upfirdn2d_native( - input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 -): - _, channel, in_h, in_w = input.shape - input = input.reshape(-1, in_h, in_w, 1) - - _, in_h, in_w, minor = input.shape - kernel_h, kernel_w = kernel.shape - - out = input.view(-1, in_h, 1, in_w, 1, minor) - out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) - out = out.view(-1, in_h * up_y, in_w * up_x, minor) - - out = F.pad( - out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), - max(pad_y0, 0), max(pad_y1, 0)] - ) - out = out[ - :, - max(-pad_y0, 0): out.shape[1] - max(-pad_y1, 0), - max(-pad_x0, 0): out.shape[2] - max(-pad_x1, 0), - :, - ] - - out = out.permute(0, 3, 1, 2) - out = out.reshape( - [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1] - ) - w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) - out = F.conv2d(out, w) - out = out.reshape( - -1, - minor, - in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, - in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, - ) - out = out.permute(0, 2, 3, 1) - out = out[:, ::down_y, ::down_x, :] - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - - return out.view(-1, channel, out_h, out_w) diff --git a/spaces/ElainaFanBoy/MusicGen/audiocraft/data/__init__.py b/spaces/ElainaFanBoy/MusicGen/audiocraft/data/__init__.py deleted file mode 100644 index 708a3dcead8dda89374a021177481dacae9f7fe9..0000000000000000000000000000000000000000 --- a/spaces/ElainaFanBoy/MusicGen/audiocraft/data/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# flake8: noqa -from . import audio, audio_dataset diff --git a/spaces/EtheE/SecurityAgent/README.md b/spaces/EtheE/SecurityAgent/README.md deleted file mode 100644 index 661387339449a4f49e55375b6e2f9a39a2d70887..0000000000000000000000000000000000000000 --- a/spaces/EtheE/SecurityAgent/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Hack Challenge - SecurityAgent | PromptDefender -colorFrom: purple -colorTo: gray -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: hwchase17/langchain-demo ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/EuroPython2022/banking_intent_classification/README.md b/spaces/EuroPython2022/banking_intent_classification/README.md deleted file mode 100644 index 39c37a0f8668f3bd0e09c3acb99e7f2773617a27..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/banking_intent_classification/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Banking Intent Classification -emoji: 💳 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.0.26 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/FauziNL/Voice_anime2/infer_pack/modules.py b/spaces/FauziNL/Voice_anime2/infer_pack/modules.py deleted file mode 100644 index 960481cedad9a6106f2bf0b9e86e82b120f7b33f..0000000000000000000000000000000000000000 --- a/spaces/FauziNL/Voice_anime2/infer_pack/modules.py +++ /dev/null @@ -1,522 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from infer_pack.transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size**i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append( - nn.Conv1d( - channels, - channels, - kernel_size, - groups=channels, - dilation=dilation, - padding=padding, - ) - ) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate**i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, : self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class ConvFlow(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - n_layers, - num_bins=10, - tail_bound=5.0, - ): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) - self.proj = nn.Conv1d( - filter_channels, self.half_channels * (num_bins * 3 - 1), 1 - ) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( - self.filter_channels - ) - unnormalized_derivatives = h[..., 2 * self.num_bins :] - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/Fcou/ChatGPT3.5/app.py b/spaces/Fcou/ChatGPT3.5/app.py deleted file mode 100644 index edb3fd3ef68afcae19891d82fe8e4e8edd3860a2..0000000000000000000000000000000000000000 --- a/spaces/Fcou/ChatGPT3.5/app.py +++ /dev/null @@ -1,64 +0,0 @@ -import openai -import os -import gradio as gr - -openai.api_key = os.environ.get("OPENAI_API_KEY") - -class Conversation: - def __init__(self, prompt, num_of_round): - self.prompt = prompt - self.num_of_round = num_of_round - self.messages = [] - self.messages.append({"role": "system", "content": self.prompt}) - - def ask(self, question): - try: - self.messages.append( {"role": "user", "content": question}) - response = openai.Completion.create( - engine="davinci", - prompt=self.prompt + "\n" + question, - max_tokens=2048, - temperature=0.5, - top_p=1, - frequency_penalty=0, - presence_penalty=0 - ) - except Exception as e: - print(e) - return e - - message = response["choices"][0]["text"] - self.messages.append({"role": "assistant", "content": message}) - - if len(self.messages) > self.num_of_round*2 + 1: - del self.messages[1:3] - return message - - -prompt = """你用中文回答编程相关的问题。你的回答需要满足以下要求: -1. 你的回答应该首先用中文,需要英文的时候才使用英文。 -2. 用markdown语法来格式化你的回答。 -""" - -conv = Conversation(prompt, 5) - -def predict(input, history=[]): - history.append(input) - response = conv.ask(input) - history.append(response) - responses = [(u,b) for u,b in zip(history[::2], history[1::2])] - return responses, history - - - - -with gr.Blocks(css="#chatbot{height:350px} .overflow-y-auto{height:500px}") as demo: - chatbot = gr.Chatbot(elem_id="chatbot") - state = gr.State([]) - - with gr.Row(): - txt = gr.Textbox(show_label=False, placeholder="").style(container=False) - - txt.submit(predict, [txt, state], [chatbot, state]) - -demo.launch() diff --git a/spaces/Ferion/image-matting-app/ppmatting/models/layers/gca_module.py b/spaces/Ferion/image-matting-app/ppmatting/models/layers/gca_module.py deleted file mode 100644 index ba8654efc9bd24de2e127393ad8338d21964e4a5..0000000000000000000000000000000000000000 --- a/spaces/Ferion/image-matting-app/ppmatting/models/layers/gca_module.py +++ /dev/null @@ -1,211 +0,0 @@ -# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# The gca code was heavily based on https://github.com/Yaoyi-Li/GCA-Matting -# and https://github.com/open-mmlab/mmediting - -import paddle -import paddle.nn as nn -import paddle.nn.functional as F - -from paddleseg.cvlibs import param_init - - -class GuidedCxtAtten(nn.Layer): - def __init__(self, - out_channels, - guidance_channels, - kernel_size=3, - stride=1, - rate=2): - super().__init__() - - self.kernel_size = kernel_size - self.rate = rate - self.stride = stride - self.guidance_conv = nn.Conv2D( - in_channels=guidance_channels, - out_channels=guidance_channels // 2, - kernel_size=1) - - self.out_conv = nn.Sequential( - nn.Conv2D( - in_channels=out_channels, - out_channels=out_channels, - kernel_size=1, - bias_attr=False), - nn.BatchNorm(out_channels)) - - self.init_weight() - - def init_weight(self): - param_init.xavier_uniform(self.guidance_conv.weight) - param_init.constant_init(self.guidance_conv.bias, value=0.0) - param_init.xavier_uniform(self.out_conv[0].weight) - param_init.constant_init(self.out_conv[1].weight, value=1e-3) - param_init.constant_init(self.out_conv[1].bias, value=0.0) - - def forward(self, img_feat, alpha_feat, unknown=None, softmax_scale=1.): - - img_feat = self.guidance_conv(img_feat) - img_feat = F.interpolate( - img_feat, scale_factor=1 / self.rate, mode='nearest') - - # process unknown mask - unknown, softmax_scale = self.process_unknown_mask(unknown, img_feat, - softmax_scale) - - img_ps, alpha_ps, unknown_ps = self.extract_feature_maps_patches( - img_feat, alpha_feat, unknown) - - self_mask = self.get_self_correlation_mask(img_feat) - - # split tensors by batch dimension; tuple is returned - img_groups = paddle.split(img_feat, 1, axis=0) - img_ps_groups = paddle.split(img_ps, 1, axis=0) - alpha_ps_groups = paddle.split(alpha_ps, 1, axis=0) - unknown_ps_groups = paddle.split(unknown_ps, 1, axis=0) - scale_groups = paddle.split(softmax_scale, 1, axis=0) - groups = (img_groups, img_ps_groups, alpha_ps_groups, unknown_ps_groups, - scale_groups) - - y = [] - - for img_i, img_ps_i, alpha_ps_i, unknown_ps_i, scale_i in zip(*groups): - # conv for compare - similarity_map = self.compute_similarity_map(img_i, img_ps_i) - - gca_score = self.compute_guided_attention_score( - similarity_map, unknown_ps_i, scale_i, self_mask) - - yi = self.propagate_alpha_feature(gca_score, alpha_ps_i) - - y.append(yi) - - y = paddle.concat(y, axis=0) # back to the mini-batch - y = paddle.reshape(y, alpha_feat.shape) - - y = self.out_conv(y) + alpha_feat - - return y - - def extract_feature_maps_patches(self, img_feat, alpha_feat, unknown): - - # extract image feature patches with shape: - # (N, img_h*img_w, img_c, img_ks, img_ks) - img_ks = self.kernel_size - img_ps = self.extract_patches(img_feat, img_ks, self.stride) - - # extract alpha feature patches with shape: - # (N, img_h*img_w, alpha_c, alpha_ks, alpha_ks) - alpha_ps = self.extract_patches(alpha_feat, self.rate * 2, self.rate) - - # extract unknown mask patches with shape: (N, img_h*img_w, 1, 1) - unknown_ps = self.extract_patches(unknown, img_ks, self.stride) - unknown_ps = unknown_ps.squeeze(axis=2) # squeeze channel dimension - unknown_ps = unknown_ps.mean(axis=[2, 3], keepdim=True) - - return img_ps, alpha_ps, unknown_ps - - def extract_patches(self, x, kernel_size, stride): - n, c, _, _ = x.shape - x = self.pad(x, kernel_size, stride) - x = F.unfold(x, [kernel_size, kernel_size], strides=[stride, stride]) - x = paddle.transpose(x, (0, 2, 1)) - x = paddle.reshape(x, (n, -1, c, kernel_size, kernel_size)) - - return x - - def pad(self, x, kernel_size, stride): - left = (kernel_size - stride + 1) // 2 - right = (kernel_size - stride) // 2 - pad = (left, right, left, right) - return F.pad(x, pad, mode='reflect') - - def compute_guided_attention_score(self, similarity_map, unknown_ps, scale, - self_mask): - # scale the correlation with predicted scale factor for known and - # unknown area - unknown_scale, known_scale = scale[0] - out = similarity_map * ( - unknown_scale * paddle.greater_than(unknown_ps, - paddle.to_tensor([0.])) + - known_scale * paddle.less_equal(unknown_ps, paddle.to_tensor([0.]))) - # mask itself, self-mask only applied to unknown area - out = out + self_mask * unknown_ps - gca_score = F.softmax(out, axis=1) - - return gca_score - - def propagate_alpha_feature(self, gca_score, alpha_ps): - - alpha_ps = alpha_ps[0] # squeeze dim 0 - if self.rate == 1: - gca_score = self.pad(gca_score, kernel_size=2, stride=1) - alpha_ps = paddle.transpose(alpha_ps, (1, 0, 2, 3)) - out = F.conv2d(gca_score, alpha_ps) / 4. - else: - out = F.conv2d_transpose( - gca_score, alpha_ps, stride=self.rate, padding=1) / 4. - - return out - - def compute_similarity_map(self, img_feat, img_ps): - img_ps = img_ps[0] # squeeze dim 0 - # convolve the feature to get correlation (similarity) map - img_ps_normed = img_ps / paddle.clip(self.l2_norm(img_ps), 1e-4) - img_feat = F.pad(img_feat, (1, 1, 1, 1), mode='reflect') - similarity_map = F.conv2d(img_feat, img_ps_normed) - - return similarity_map - - def get_self_correlation_mask(self, img_feat): - _, _, h, w = img_feat.shape - self_mask = F.one_hot( - paddle.reshape(paddle.arange(h * w), (h, w)), - num_classes=int(h * w)) - - self_mask = paddle.transpose(self_mask, (2, 0, 1)) - self_mask = paddle.reshape(self_mask, (1, h * w, h, w)) - - return self_mask * (-1e4) - - def process_unknown_mask(self, unknown, img_feat, softmax_scale): - - n, _, h, w = img_feat.shape - - if unknown is not None: - unknown = unknown.clone() - unknown = F.interpolate( - unknown, scale_factor=1 / self.rate, mode='nearest') - unknown_mean = unknown.mean(axis=[2, 3]) - known_mean = 1 - unknown_mean - unknown_scale = paddle.clip( - paddle.sqrt(unknown_mean / known_mean), 0.1, 10) - known_scale = paddle.clip( - paddle.sqrt(known_mean / unknown_mean), 0.1, 10) - softmax_scale = paddle.concat([unknown_scale, known_scale], axis=1) - else: - unknown = paddle.ones([n, 1, h, w]) - softmax_scale = paddle.reshape( - paddle.to_tensor([softmax_scale, softmax_scale]), (1, 2)) - softmax_scale = paddle.expand(softmax_scale, (n, 2)) - - return unknown, softmax_scale - - @staticmethod - def l2_norm(x): - x = x**2 - x = x.sum(axis=[1, 2, 3], keepdim=True) - return paddle.sqrt(x) diff --git a/spaces/Fu-chiang/Bit-50-Glaucoma/app.py b/spaces/Fu-chiang/Bit-50-Glaucoma/app.py deleted file mode 100644 index 4d3b2412215242cc866b7ddb42cbea7e7e256de9..0000000000000000000000000000000000000000 --- a/spaces/Fu-chiang/Bit-50-Glaucoma/app.py +++ /dev/null @@ -1,15 +0,0 @@ -import gradio as gr -from transformers import pipeline - -pipeline = pipeline(task="image-classification", model="Fu-chiang/bit-50-Glaucoma") - -def predict(image): - predictions = pipeline(image) - return {p["label"]: p["score"] for p in predictions} - -gr.Interface( - predict, - inputs=gr.inputs.Image(label="Upload the fundus image", type="filepath"), - outputs=gr.outputs.Label(num_top_classes=2), - title="Glaucoma or Normal?", -).launch() diff --git a/spaces/GXSA/bingo/src/components/ui/select.tsx b/spaces/GXSA/bingo/src/components/ui/select.tsx deleted file mode 100644 index 77f12c2996f541b97663de4c9e20ab34d4ec2fac..0000000000000000000000000000000000000000 --- a/spaces/GXSA/bingo/src/components/ui/select.tsx +++ /dev/null @@ -1,123 +0,0 @@ -'use client' - -import * as React from 'react' -import * as SelectPrimitive from '@radix-ui/react-select' - -import { cn } from '@/lib/utils' -import { - IconArrowDown, - IconCheck, - IconChevronUpDown -} from '@/components/ui/icons' - -const Select = SelectPrimitive.Root - -const SelectGroup = SelectPrimitive.Group - -const SelectValue = SelectPrimitive.Value - -const SelectTrigger = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - {children} - - - - -)) -SelectTrigger.displayName = SelectPrimitive.Trigger.displayName - -const SelectContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, position = 'popper', ...props }, ref) => ( - - - - {children} - - - -)) -SelectContent.displayName = SelectPrimitive.Content.displayName - -const SelectLabel = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SelectLabel.displayName = SelectPrimitive.Label.displayName - -const SelectItem = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - - - - - {children} - -)) -SelectItem.displayName = SelectPrimitive.Item.displayName - -const SelectSeparator = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SelectSeparator.displayName = SelectPrimitive.Separator.displayName - -export { - Select, - SelectGroup, - SelectValue, - SelectTrigger, - SelectContent, - SelectLabel, - SelectItem, - SelectSeparator -} diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/nas_fcos/README.md b/spaces/Gradio-Blocks/uniformer_image_detection/configs/nas_fcos/README.md deleted file mode 100644 index 05ac996a40cfa2f600f239f21adb0878a284292b..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/nas_fcos/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# NAS-FCOS: Fast Neural Architecture Search for Object Detection - -## Introduction - -[ALGORITHM] - -```latex -@article{wang2019fcos, - title={Nas-fcos: Fast neural architecture search for object detection}, - author={Wang, Ning and Gao, Yang and Chen, Hao and Wang, Peng and Tian, Zhi and Shen, Chunhua}, - journal={arXiv preprint arXiv:1906.04423}, - year={2019} -} -``` - -## Results and Models - -| Head | Backbone | Style | GN-head | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -|:---------:|:---------:|:-------:|:-------:|:-------:|:--------:|:--------------:|:------:|:------:|:--------:| -| NAS-FCOSHead | R-50 | caffe | Y | 1x | | | 39.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520.log.json) | -| FCOSHead | R-50 | caffe | Y | 1x | | | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200521-7fdcbce0.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200521.log.json) | - -**Notes:** - -- To be consistent with the author's implementation, we use 4 GPUs with 4 images/GPU. diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py deleted file mode 100644 index 85004e02c31edeb487f765835815c6f80c18fb6f..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py' -model = dict( - pretrained='open-mmlab://res2net101_v1d_26w_4s', - backbone=dict(type='Res2Net', depth=101, scales=4, base_width=26)) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/dense_heads/transformer_head.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/dense_heads/transformer_head.py deleted file mode 100644 index 820fd069fcca295f6102f0d27366158a8c640249..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/dense_heads/transformer_head.py +++ /dev/null @@ -1,654 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import Conv2d, Linear, build_activation_layer -from mmcv.runner import force_fp32 - -from mmdet.core import (bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh, - build_assigner, build_sampler, multi_apply, - reduce_mean) -from mmdet.models.utils import (FFN, build_positional_encoding, - build_transformer) -from ..builder import HEADS, build_loss -from .anchor_free_head import AnchorFreeHead - - -@HEADS.register_module() -class TransformerHead(AnchorFreeHead): - """Implements the DETR transformer head. - - See `paper: End-to-End Object Detection with Transformers - `_ for details. - - Args: - num_classes (int): Number of categories excluding the background. - in_channels (int): Number of channels in the input feature map. - num_fcs (int, optional): Number of fully-connected layers used in - `FFN`, which is then used for the regression head. Default 2. - transformer (dict, optional): Config for transformer. - positional_encoding (dict, optional): Config for position encoding. - loss_cls (dict, optional): Config of the classification loss. - Default `CrossEntropyLoss`. - loss_bbox (dict, optional): Config of the regression loss. - Default `L1Loss`. - loss_iou (dict, optional): Config of the regression iou loss. - Default `GIoULoss`. - tran_cfg (dict, optional): Training config of transformer head. - test_cfg (dict, optional): Testing config of transformer head. - - Example: - >>> import torch - >>> self = TransformerHead(80, 2048) - >>> x = torch.rand(1, 2048, 32, 32) - >>> mask = torch.ones(1, 32, 32).to(x.dtype) - >>> mask[:, :16, :15] = 0 - >>> all_cls_scores, all_bbox_preds = self(x, mask) - """ - - def __init__(self, - num_classes, - in_channels, - num_fcs=2, - transformer=dict( - type='Transformer', - embed_dims=256, - num_heads=8, - num_encoder_layers=6, - num_decoder_layers=6, - feedforward_channels=2048, - dropout=0.1, - act_cfg=dict(type='ReLU', inplace=True), - norm_cfg=dict(type='LN'), - num_fcs=2, - pre_norm=False, - return_intermediate_dec=True), - positional_encoding=dict( - type='SinePositionalEncoding', - num_feats=128, - normalize=True), - loss_cls=dict( - type='CrossEntropyLoss', - bg_cls_weight=0.1, - use_sigmoid=False, - loss_weight=1.0, - class_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=5.0), - loss_iou=dict(type='GIoULoss', loss_weight=2.0), - train_cfg=dict( - assigner=dict( - type='HungarianAssigner', - cls_cost=dict(type='ClassificationCost', weight=1.), - reg_cost=dict(type='BBoxL1Cost', weight=5.0), - iou_cost=dict( - type='IoUCost', iou_mode='giou', weight=2.0))), - test_cfg=dict(max_per_img=100), - **kwargs): - # NOTE here use `AnchorFreeHead` instead of `TransformerHead`, - # since it brings inconvenience when the initialization of - # `AnchorFreeHead` is called. - super(AnchorFreeHead, self).__init__() - use_sigmoid_cls = loss_cls.get('use_sigmoid', False) - assert not use_sigmoid_cls, 'setting use_sigmoid_cls as True is ' \ - 'not supported in DETR, since background is needed for the ' \ - 'matching process.' - assert 'embed_dims' in transformer \ - and 'num_feats' in positional_encoding - num_feats = positional_encoding['num_feats'] - embed_dims = transformer['embed_dims'] - assert num_feats * 2 == embed_dims, 'embed_dims should' \ - f' be exactly 2 times of num_feats. Found {embed_dims}' \ - f' and {num_feats}.' - assert test_cfg is not None and 'max_per_img' in test_cfg - - class_weight = loss_cls.get('class_weight', None) - if class_weight is not None: - assert isinstance(class_weight, float), 'Expected ' \ - 'class_weight to have type float. Found ' \ - f'{type(class_weight)}.' - # NOTE following the official DETR rep0, bg_cls_weight means - # relative classification weight of the no-object class. - bg_cls_weight = loss_cls.get('bg_cls_weight', class_weight) - assert isinstance(bg_cls_weight, float), 'Expected ' \ - 'bg_cls_weight to have type float. Found ' \ - f'{type(bg_cls_weight)}.' - class_weight = torch.ones(num_classes + 1) * class_weight - # set background class as the last indice - class_weight[num_classes] = bg_cls_weight - loss_cls.update({'class_weight': class_weight}) - if 'bg_cls_weight' in loss_cls: - loss_cls.pop('bg_cls_weight') - self.bg_cls_weight = bg_cls_weight - - if train_cfg: - assert 'assigner' in train_cfg, 'assigner should be provided '\ - 'when train_cfg is set.' - assigner = train_cfg['assigner'] - assert loss_cls['loss_weight'] == assigner['cls_cost']['weight'], \ - 'The classification weight for loss and matcher should be' \ - 'exactly the same.' - assert loss_bbox['loss_weight'] == assigner['reg_cost'][ - 'weight'], 'The regression L1 weight for loss and matcher ' \ - 'should be exactly the same.' - assert loss_iou['loss_weight'] == assigner['iou_cost']['weight'], \ - 'The regression iou weight for loss and matcher should be' \ - 'exactly the same.' - self.assigner = build_assigner(assigner) - # DETR sampling=False, so use PseudoSampler - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - self.num_classes = num_classes - self.cls_out_channels = num_classes + 1 - self.in_channels = in_channels - self.num_fcs = num_fcs - self.train_cfg = train_cfg - self.test_cfg = test_cfg - self.use_sigmoid_cls = use_sigmoid_cls - self.embed_dims = embed_dims - self.num_query = test_cfg['max_per_img'] - self.fp16_enabled = False - self.loss_cls = build_loss(loss_cls) - self.loss_bbox = build_loss(loss_bbox) - self.loss_iou = build_loss(loss_iou) - self.act_cfg = transformer.get('act_cfg', - dict(type='ReLU', inplace=True)) - self.activate = build_activation_layer(self.act_cfg) - self.positional_encoding = build_positional_encoding( - positional_encoding) - self.transformer = build_transformer(transformer) - self._init_layers() - - def _init_layers(self): - """Initialize layers of the transformer head.""" - self.input_proj = Conv2d( - self.in_channels, self.embed_dims, kernel_size=1) - self.fc_cls = Linear(self.embed_dims, self.cls_out_channels) - self.reg_ffn = FFN( - self.embed_dims, - self.embed_dims, - self.num_fcs, - self.act_cfg, - dropout=0.0, - add_residual=False) - self.fc_reg = Linear(self.embed_dims, 4) - self.query_embedding = nn.Embedding(self.num_query, self.embed_dims) - - def init_weights(self, distribution='uniform'): - """Initialize weights of the transformer head.""" - # The initialization for transformer is important - self.transformer.init_weights() - - def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs): - """load checkpoints.""" - # NOTE here use `AnchorFreeHead` instead of `TransformerHead`, - # since `AnchorFreeHead._load_from_state_dict` should not be - # called here. Invoking the default `Module._load_from_state_dict` - # is enough. - super(AnchorFreeHead, - self)._load_from_state_dict(state_dict, prefix, local_metadata, - strict, missing_keys, - unexpected_keys, error_msgs) - - def forward(self, feats, img_metas): - """Forward function. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - img_metas (list[dict]): List of image information. - - Returns: - tuple[list[Tensor], list[Tensor]]: Outputs for all scale levels. - - - all_cls_scores_list (list[Tensor]): Classification scores \ - for each scale level. Each is a 4D-tensor with shape \ - [nb_dec, bs, num_query, cls_out_channels]. Note \ - `cls_out_channels` should includes background. - - all_bbox_preds_list (list[Tensor]): Sigmoid regression \ - outputs for each scale level. Each is a 4D-tensor with \ - normalized coordinate format (cx, cy, w, h) and shape \ - [nb_dec, bs, num_query, 4]. - """ - num_levels = len(feats) - img_metas_list = [img_metas for _ in range(num_levels)] - return multi_apply(self.forward_single, feats, img_metas_list) - - def forward_single(self, x, img_metas): - """"Forward function for a single feature level. - - Args: - x (Tensor): Input feature from backbone's single stage, shape - [bs, c, h, w]. - img_metas (list[dict]): List of image information. - - Returns: - all_cls_scores (Tensor): Outputs from the classification head, - shape [nb_dec, bs, num_query, cls_out_channels]. Note - cls_out_channels should includes background. - all_bbox_preds (Tensor): Sigmoid outputs from the regression - head with normalized coordinate format (cx, cy, w, h). - Shape [nb_dec, bs, num_query, 4]. - """ - # construct binary masks which used for the transformer. - # NOTE following the official DETR repo, non-zero values representing - # ignored positions, while zero values means valid positions. - batch_size = x.size(0) - input_img_h, input_img_w = img_metas[0]['batch_input_shape'] - masks = x.new_ones((batch_size, input_img_h, input_img_w)) - for img_id in range(batch_size): - img_h, img_w, _ = img_metas[img_id]['img_shape'] - masks[img_id, :img_h, :img_w] = 0 - - x = self.input_proj(x) - # interpolate masks to have the same spatial shape with x - masks = F.interpolate( - masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1) - # position encoding - pos_embed = self.positional_encoding(masks) # [bs, embed_dim, h, w] - # outs_dec: [nb_dec, bs, num_query, embed_dim] - outs_dec, _ = self.transformer(x, masks, self.query_embedding.weight, - pos_embed) - - all_cls_scores = self.fc_cls(outs_dec) - all_bbox_preds = self.fc_reg(self.activate( - self.reg_ffn(outs_dec))).sigmoid() - return all_cls_scores, all_bbox_preds - - @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) - def loss(self, - all_cls_scores_list, - all_bbox_preds_list, - gt_bboxes_list, - gt_labels_list, - img_metas, - gt_bboxes_ignore=None): - """"Loss function. - - Only outputs from the last feature level are used for computing - losses by default. - - Args: - all_cls_scores_list (list[Tensor]): Classification outputs - for each feature level. Each is a 4D-tensor with shape - [nb_dec, bs, num_query, cls_out_channels]. - all_bbox_preds_list (list[Tensor]): Sigmoid regression - outputs for each feature level. Each is a 4D-tensor with - normalized coordinate format (cx, cy, w, h) and shape - [nb_dec, bs, num_query, 4]. - gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image - with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels_list (list[Tensor]): Ground truth class indices for each - image with shape (num_gts, ). - img_metas (list[dict]): List of image meta information. - gt_bboxes_ignore (list[Tensor], optional): Bounding boxes - which can be ignored for each image. Default None. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - # NOTE defaultly only the outputs from the last feature scale is used. - all_cls_scores = all_cls_scores_list[-1] - all_bbox_preds = all_bbox_preds_list[-1] - assert gt_bboxes_ignore is None, \ - 'Only supports for gt_bboxes_ignore setting to None.' - - num_dec_layers = len(all_cls_scores) - all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)] - all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] - all_gt_bboxes_ignore_list = [ - gt_bboxes_ignore for _ in range(num_dec_layers) - ] - img_metas_list = [img_metas for _ in range(num_dec_layers)] - - losses_cls, losses_bbox, losses_iou = multi_apply( - self.loss_single, all_cls_scores, all_bbox_preds, - all_gt_bboxes_list, all_gt_labels_list, img_metas_list, - all_gt_bboxes_ignore_list) - - loss_dict = dict() - # loss from the last decoder layer - loss_dict['loss_cls'] = losses_cls[-1] - loss_dict['loss_bbox'] = losses_bbox[-1] - loss_dict['loss_iou'] = losses_iou[-1] - # loss from other decoder layers - num_dec_layer = 0 - for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1], - losses_bbox[:-1], - losses_iou[:-1]): - loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i - loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i - loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i - num_dec_layer += 1 - return loss_dict - - def loss_single(self, - cls_scores, - bbox_preds, - gt_bboxes_list, - gt_labels_list, - img_metas, - gt_bboxes_ignore_list=None): - """"Loss function for outputs from a single decoder layer of a single - feature level. - - Args: - cls_scores (Tensor): Box score logits from a single decoder layer - for all images. Shape [bs, num_query, cls_out_channels]. - bbox_preds (Tensor): Sigmoid outputs from a single decoder layer - for all images, with normalized coordinate (cx, cy, w, h) and - shape [bs, num_query, 4]. - gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image - with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels_list (list[Tensor]): Ground truth class indices for each - image with shape (num_gts, ). - img_metas (list[dict]): List of image meta information. - gt_bboxes_ignore_list (list[Tensor], optional): Bounding - boxes which can be ignored for each image. Default None. - - Returns: - dict[str, Tensor]: A dictionary of loss components for outputs from - a single decoder layer. - """ - num_imgs = cls_scores.size(0) - cls_scores_list = [cls_scores[i] for i in range(num_imgs)] - bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)] - cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list, - gt_bboxes_list, gt_labels_list, - img_metas, gt_bboxes_ignore_list) - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg) = cls_reg_targets - labels = torch.cat(labels_list, 0) - label_weights = torch.cat(label_weights_list, 0) - bbox_targets = torch.cat(bbox_targets_list, 0) - bbox_weights = torch.cat(bbox_weights_list, 0) - - # classification loss - cls_scores = cls_scores.reshape(-1, self.cls_out_channels) - # construct weighted avg_factor to match with the official DETR repo - cls_avg_factor = num_total_pos * 1.0 + \ - num_total_neg * self.bg_cls_weight - loss_cls = self.loss_cls( - cls_scores, labels, label_weights, avg_factor=cls_avg_factor) - - # Compute the average number of gt boxes accross all gpus, for - # normalization purposes - num_total_pos = loss_cls.new_tensor([num_total_pos]) - num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() - - # construct factors used for rescale bboxes - factors = [] - for img_meta, bbox_pred in zip(img_metas, bbox_preds): - img_h, img_w, _ = img_meta['img_shape'] - factor = bbox_pred.new_tensor([img_w, img_h, img_w, - img_h]).unsqueeze(0).repeat( - bbox_pred.size(0), 1) - factors.append(factor) - factors = torch.cat(factors, 0) - - # DETR regress the relative position of boxes (cxcywh) in the image, - # thus the learning target is normalized by the image size. So here - # we need to re-scale them for calculating IoU loss - bbox_preds = bbox_preds.reshape(-1, 4) - bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors - bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors - - # regression IoU loss, defaultly GIoU loss - loss_iou = self.loss_iou( - bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos) - - # regression L1 loss - loss_bbox = self.loss_bbox( - bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos) - return loss_cls, loss_bbox, loss_iou - - def get_targets(self, - cls_scores_list, - bbox_preds_list, - gt_bboxes_list, - gt_labels_list, - img_metas, - gt_bboxes_ignore_list=None): - """"Compute regression and classification targets for a batch image. - - Outputs from a single decoder layer of a single feature level are used. - - Args: - cls_scores_list (list[Tensor]): Box score logits from a single - decoder layer for each image with shape [num_query, - cls_out_channels]. - bbox_preds_list (list[Tensor]): Sigmoid outputs from a single - decoder layer for each image, with normalized coordinate - (cx, cy, w, h) and shape [num_query, 4]. - gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image - with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels_list (list[Tensor]): Ground truth class indices for each - image with shape (num_gts, ). - img_metas (list[dict]): List of image meta information. - gt_bboxes_ignore_list (list[Tensor], optional): Bounding - boxes which can be ignored for each image. Default None. - - Returns: - tuple: a tuple containing the following targets. - - - labels_list (list[Tensor]): Labels for all images. - - label_weights_list (list[Tensor]): Label weights for all \ - images. - - bbox_targets_list (list[Tensor]): BBox targets for all \ - images. - - bbox_weights_list (list[Tensor]): BBox weights for all \ - images. - - num_total_pos (int): Number of positive samples in all \ - images. - - num_total_neg (int): Number of negative samples in all \ - images. - """ - assert gt_bboxes_ignore_list is None, \ - 'Only supports for gt_bboxes_ignore setting to None.' - num_imgs = len(cls_scores_list) - gt_bboxes_ignore_list = [ - gt_bboxes_ignore_list for _ in range(num_imgs) - ] - - (labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply( - self._get_target_single, cls_scores_list, bbox_preds_list, - gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore_list) - num_total_pos = sum((inds.numel() for inds in pos_inds_list)) - num_total_neg = sum((inds.numel() for inds in neg_inds_list)) - return (labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, num_total_pos, num_total_neg) - - def _get_target_single(self, - cls_score, - bbox_pred, - gt_bboxes, - gt_labels, - img_meta, - gt_bboxes_ignore=None): - """"Compute regression and classification targets for one image. - - Outputs from a single decoder layer of a single feature level are used. - - Args: - cls_score (Tensor): Box score logits from a single decoder layer - for one image. Shape [num_query, cls_out_channels]. - bbox_pred (Tensor): Sigmoid outputs from a single decoder layer - for one image, with normalized coordinate (cx, cy, w, h) and - shape [num_query, 4]. - gt_bboxes (Tensor): Ground truth bboxes for one image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (Tensor): Ground truth class indices for one image - with shape (num_gts, ). - img_meta (dict): Meta information for one image. - gt_bboxes_ignore (Tensor, optional): Bounding boxes - which can be ignored. Default None. - - Returns: - tuple[Tensor]: a tuple containing the following for one image. - - - labels (Tensor): Labels of each image. - - label_weights (Tensor]): Label weights of each image. - - bbox_targets (Tensor): BBox targets of each image. - - bbox_weights (Tensor): BBox weights of each image. - - pos_inds (Tensor): Sampled positive indices for each image. - - neg_inds (Tensor): Sampled negative indices for each image. - """ - - num_bboxes = bbox_pred.size(0) - # assigner and sampler - assign_result = self.assigner.assign(bbox_pred, cls_score, gt_bboxes, - gt_labels, img_meta, - gt_bboxes_ignore) - sampling_result = self.sampler.sample(assign_result, bbox_pred, - gt_bboxes) - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - - # label targets - labels = gt_bboxes.new_full((num_bboxes, ), - self.num_classes, - dtype=torch.long) - labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] - label_weights = gt_bboxes.new_ones(num_bboxes) - - # bbox targets - bbox_targets = torch.zeros_like(bbox_pred) - bbox_weights = torch.zeros_like(bbox_pred) - bbox_weights[pos_inds] = 1.0 - img_h, img_w, _ = img_meta['img_shape'] - - # DETR regress the relative position of boxes (cxcywh) in the image. - # Thus the learning target should be normalized by the image size, also - # the box format should be converted from defaultly x1y1x2y2 to cxcywh. - factor = bbox_pred.new_tensor([img_w, img_h, img_w, - img_h]).unsqueeze(0) - pos_gt_bboxes_normalized = sampling_result.pos_gt_bboxes / factor - pos_gt_bboxes_targets = bbox_xyxy_to_cxcywh(pos_gt_bboxes_normalized) - bbox_targets[pos_inds] = pos_gt_bboxes_targets - return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, - neg_inds) - - # over-write because img_metas are needed as inputs for bbox_head. - def forward_train(self, - x, - img_metas, - gt_bboxes, - gt_labels=None, - gt_bboxes_ignore=None, - proposal_cfg=None, - **kwargs): - """Forward function for training mode. - - Args: - x (list[Tensor]): Features from backbone. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes (Tensor): Ground truth bboxes of the image, - shape (num_gts, 4). - gt_labels (Tensor): Ground truth labels of each box, - shape (num_gts,). - gt_bboxes_ignore (Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). - proposal_cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - assert proposal_cfg is None, '"proposal_cfg" must be None' - outs = self(x, img_metas) - if gt_labels is None: - loss_inputs = outs + (gt_bboxes, img_metas) - else: - loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) - losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) - return losses - - @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) - def get_bboxes(self, - all_cls_scores_list, - all_bbox_preds_list, - img_metas, - rescale=False): - """Transform network outputs for a batch into bbox predictions. - - Args: - all_cls_scores_list (list[Tensor]): Classification outputs - for each feature level. Each is a 4D-tensor with shape - [nb_dec, bs, num_query, cls_out_channels]. - all_bbox_preds_list (list[Tensor]): Sigmoid regression - outputs for each feature level. Each is a 4D-tensor with - normalized coordinate format (cx, cy, w, h) and shape - [nb_dec, bs, num_query, 4]. - img_metas (list[dict]): Meta information of each image. - rescale (bool, optional): If True, return boxes in original - image space. Default False. - - Returns: - list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. \ - The first item is an (n, 5) tensor, where the first 4 columns \ - are bounding box positions (tl_x, tl_y, br_x, br_y) and the \ - 5-th column is a score between 0 and 1. The second item is a \ - (n,) tensor where each item is the predicted class label of \ - the corresponding box. - """ - # NOTE defaultly only using outputs from the last feature level, - # and only the outputs from the last decoder layer is used. - cls_scores = all_cls_scores_list[-1][-1] - bbox_preds = all_bbox_preds_list[-1][-1] - - result_list = [] - for img_id in range(len(img_metas)): - cls_score = cls_scores[img_id] - bbox_pred = bbox_preds[img_id] - img_shape = img_metas[img_id]['img_shape'] - scale_factor = img_metas[img_id]['scale_factor'] - proposals = self._get_bboxes_single(cls_score, bbox_pred, - img_shape, scale_factor, - rescale) - result_list.append(proposals) - return result_list - - def _get_bboxes_single(self, - cls_score, - bbox_pred, - img_shape, - scale_factor, - rescale=False): - """Transform outputs from the last decoder layer into bbox predictions - for each image. - - Args: - cls_score (Tensor): Box score logits from the last decoder layer - for each image. Shape [num_query, cls_out_channels]. - bbox_pred (Tensor): Sigmoid outputs from the last decoder layer - for each image, with coordinate format (cx, cy, w, h) and - shape [num_query, 4]. - img_shape (tuple[int]): Shape of input image, (height, width, 3). - scale_factor (ndarray, optional): Scale factor of the image arange - as (w_scale, h_scale, w_scale, h_scale). - rescale (bool, optional): If True, return boxes in original image - space. Default False. - - Returns: - tuple[Tensor]: Results of detected bboxes and labels. - - - det_bboxes: Predicted bboxes with shape [num_query, 5], \ - where the first 4 columns are bounding box positions \ - (tl_x, tl_y, br_x, br_y) and the 5-th column are scores \ - between 0 and 1. - - det_labels: Predicted labels of the corresponding box with \ - shape [num_query]. - """ - assert len(cls_score) == len(bbox_pred) - # exclude background - scores, det_labels = F.softmax(cls_score, dim=-1)[..., :-1].max(-1) - det_bboxes = bbox_cxcywh_to_xyxy(bbox_pred) - det_bboxes[:, 0::2] = det_bboxes[:, 0::2] * img_shape[1] - det_bboxes[:, 1::2] = det_bboxes[:, 1::2] * img_shape[0] - det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1]) - det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0]) - if rescale: - det_bboxes /= det_bboxes.new_tensor(scale_factor) - det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(1)), -1) - return det_bboxes, det_labels diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/tools/convert_datasets/hrf.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/tools/convert_datasets/hrf.py deleted file mode 100644 index bdeb6e7e5668e097f30bc019c88f9eab6c7fcf07..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/tools/convert_datasets/hrf.py +++ /dev/null @@ -1,110 +0,0 @@ -import argparse -import os -import os.path as osp -import tempfile -import zipfile - -import mmcv - -HRF_LEN = 15 -TRAINING_LEN = 5 - - -def parse_args(): - parser = argparse.ArgumentParser( - description='Convert HRF dataset to mmsegmentation format') - parser.add_argument('healthy_path', help='the path of healthy.zip') - parser.add_argument( - 'healthy_manualsegm_path', help='the path of healthy_manualsegm.zip') - parser.add_argument('glaucoma_path', help='the path of glaucoma.zip') - parser.add_argument( - 'glaucoma_manualsegm_path', help='the path of glaucoma_manualsegm.zip') - parser.add_argument( - 'diabetic_retinopathy_path', - help='the path of diabetic_retinopathy.zip') - parser.add_argument( - 'diabetic_retinopathy_manualsegm_path', - help='the path of diabetic_retinopathy_manualsegm.zip') - parser.add_argument('--tmp_dir', help='path of the temporary directory') - parser.add_argument('-o', '--out_dir', help='output path') - args = parser.parse_args() - return args - - -def main(): - args = parse_args() - images_path = [ - args.healthy_path, args.glaucoma_path, args.diabetic_retinopathy_path - ] - annotations_path = [ - args.healthy_manualsegm_path, args.glaucoma_manualsegm_path, - args.diabetic_retinopathy_manualsegm_path - ] - if args.out_dir is None: - out_dir = osp.join('data', 'HRF') - else: - out_dir = args.out_dir - - print('Making directories...') - mmcv.mkdir_or_exist(out_dir) - mmcv.mkdir_or_exist(osp.join(out_dir, 'images')) - mmcv.mkdir_or_exist(osp.join(out_dir, 'images', 'training')) - mmcv.mkdir_or_exist(osp.join(out_dir, 'images', 'validation')) - mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations')) - mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations', 'training')) - mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations', 'validation')) - - print('Generating images...') - for now_path in images_path: - with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: - zip_file = zipfile.ZipFile(now_path) - zip_file.extractall(tmp_dir) - - assert len(os.listdir(tmp_dir)) == HRF_LEN, \ - 'len(os.listdir(tmp_dir)) != {}'.format(HRF_LEN) - - for filename in sorted(os.listdir(tmp_dir))[:TRAINING_LEN]: - img = mmcv.imread(osp.join(tmp_dir, filename)) - mmcv.imwrite( - img, - osp.join(out_dir, 'images', 'training', - osp.splitext(filename)[0] + '.png')) - for filename in sorted(os.listdir(tmp_dir))[TRAINING_LEN:]: - img = mmcv.imread(osp.join(tmp_dir, filename)) - mmcv.imwrite( - img, - osp.join(out_dir, 'images', 'validation', - osp.splitext(filename)[0] + '.png')) - - print('Generating annotations...') - for now_path in annotations_path: - with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: - zip_file = zipfile.ZipFile(now_path) - zip_file.extractall(tmp_dir) - - assert len(os.listdir(tmp_dir)) == HRF_LEN, \ - 'len(os.listdir(tmp_dir)) != {}'.format(HRF_LEN) - - for filename in sorted(os.listdir(tmp_dir))[:TRAINING_LEN]: - img = mmcv.imread(osp.join(tmp_dir, filename)) - # The annotation img should be divided by 128, because some of - # the annotation imgs are not standard. We should set a - # threshold to convert the nonstandard annotation imgs. The - # value divided by 128 is equivalent to '1 if value >= 128 - # else 0' - mmcv.imwrite( - img[:, :, 0] // 128, - osp.join(out_dir, 'annotations', 'training', - osp.splitext(filename)[0] + '.png')) - for filename in sorted(os.listdir(tmp_dir))[TRAINING_LEN:]: - img = mmcv.imread(osp.join(tmp_dir, filename)) - mmcv.imwrite( - img[:, :, 0] // 128, - osp.join(out_dir, 'annotations', 'validation', - osp.splitext(filename)[0] + '.png')) - - print('Done!') - - -if __name__ == '__main__': - main() diff --git a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/tests/__init__.py b/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/tests/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/tests/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/GuruVineeth/GenAIGPT/app.py b/spaces/GuruVineeth/GenAIGPT/app.py deleted file mode 100644 index 7611842e94b6fba45e4bb07be4645b05ff3db6db..0000000000000000000000000000000000000000 --- a/spaces/GuruVineeth/GenAIGPT/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') - -template = """You are a tech-savvy computer science student who spends countless hours coding, building apps, and keeping up with the latest tech trends. You enjoy discussing programming languages, AI, and gadgets and are always ready to troubleshoot tech-related problems. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -def get_text_response(user_message,history): - response = llm_chain.predict(user_message = user_message) - return response - -demo = gr.ChatInterface(get_text_response) - -if __name__ == "__main__": - demo.launch(debug=True) #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/HaHaBill/LandShapes-Antarctica/netdissect/segdata.py b/spaces/HaHaBill/LandShapes-Antarctica/netdissect/segdata.py deleted file mode 100644 index f3cb6dfac8985d9c55344abbc26cc26c4862aa85..0000000000000000000000000000000000000000 --- a/spaces/HaHaBill/LandShapes-Antarctica/netdissect/segdata.py +++ /dev/null @@ -1,74 +0,0 @@ -import os, numpy, torch, json -from .parallelfolder import ParallelImageFolders -from torchvision import transforms -from torchvision.transforms.functional import to_tensor, normalize - -class FieldDef(object): - def __init__(self, field, index, bitshift, bitmask, labels): - self.field = field - self.index = index - self.bitshift = bitshift - self.bitmask = bitmask - self.labels = labels - -class MultiSegmentDataset(object): - ''' - Just like ClevrMulticlassDataset, but the second stream is a one-hot - segmentation tensor rather than a flat one-hot presence vector. - - MultiSegmentDataset('dataset/clevrseg', - imgdir='images/train/positive', - segdir='images/train/segmentation') - ''' - def __init__(self, directory, transform=None, - imgdir='img', segdir='seg', val=False, size=None): - self.segdataset = ParallelImageFolders( - [os.path.join(directory, imgdir), - os.path.join(directory, segdir)], - transform=transform) - self.fields = [] - with open(os.path.join(directory, 'labelnames.json'), 'r') as f: - for defn in json.load(f): - self.fields.append(FieldDef( - defn['field'], defn['index'], defn['bitshift'], - defn['bitmask'], defn['label'])) - self.labels = ['-'] # Reserve label 0 to mean "no label" - self.categories = [] - self.label_category = [0] - for fieldnum, f in enumerate(self.fields): - self.categories.append(f.field) - f.firstchannel = len(self.labels) - f.channels = len(f.labels) - 1 - for lab in f.labels[1:]: - self.labels.append(lab) - self.label_category.append(fieldnum) - # Reserve 25% of the dataset for validation. - first_val = int(len(self.segdataset) * 0.75) - self.val = val - self.first = first_val if val else 0 - self.length = len(self.segdataset) - first_val if val else first_val - # Truncate the dataset if requested. - if size: - self.length = min(size, self.length) - - def __len__(self): - return self.length - - def __getitem__(self, index): - img, segimg = self.segdataset[index + self.first] - segin = numpy.array(segimg, numpy.uint8, copy=False) - segout = torch.zeros(len(self.categories), - segin.shape[0], segin.shape[1], dtype=torch.int64) - for i, field in enumerate(self.fields): - fielddata = ((torch.from_numpy(segin[:, :, field.index]) - >> field.bitshift) & field.bitmask) - segout[i] = field.firstchannel + fielddata - 1 - bincount = numpy.bincount(segout.flatten(), - minlength=len(self.labels)) - return img, segout, bincount - -if __name__ == '__main__': - ds = MultiSegmentDataset('dataset/clevrseg') - print(ds[0]) - import pdb; pdb.set_trace() - diff --git a/spaces/HaloMaster/chinesesummary/fengshen/examples/pegasus/pretrain_pegasus.sh b/spaces/HaloMaster/chinesesummary/fengshen/examples/pegasus/pretrain_pegasus.sh deleted file mode 100644 index 3a371ac45463317fa01fa84a72f5df6bb9ca6bd5..0000000000000000000000000000000000000000 --- a/spaces/HaloMaster/chinesesummary/fengshen/examples/pegasus/pretrain_pegasus.sh +++ /dev/null @@ -1,119 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=pegasus-base_last # create a short name for your job -#SBATCH --nodes=1 # node count -#SBATCH --ntasks-per-node=8 # number of tasks to run per node -#SBATCH --cpus-per-task=30 # cpu-cores per task (>1 if multi-threaded tasks) -#SBATCH --gres=gpu:8 # number of gpus per node -#SBATCH -o %x-%j.log # output and error log file names (%x for job id) - - -set -x -e - -echo "START TIME: $(date)" -MODEL_NAME=pegasus-base_test - -config_json="./$MODEL_NAME.ds_config.json" -export MASTER_PORT=$[RANDOM%10000+40000] - -MICRO_BATCH_SIZE=4 - -# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() -cat < $config_json -{ - "zero_optimization": { - "stage": 1 - }, - "fp16": { - "enabled": true, - "loss_scale": 0, - "loss_scale_window": 1000, - "initial_scale_power": 16, - "hysteresis": 2, - "min_loss_scale": 1 - }, - "optimizer": { - "params": { - "betas": [ - 0.9, - 0.999 - ], - "eps": 1e-08, - "lr": 1e-04, - "weight_decay": 0.01 - }, - "type": "Adam" - }, - "scheduler": { - "params": { - "warmup_max_lr": 1e-04, - "warmup_min_lr": 1e-05, - "total_num_steps": 80000000, - "warmup_num_steps" : 50000 - }, - "type": "WarmupDecayLR" - }, - "steps_per_print": 100, - "gradient_clipping": 1, - "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, - "zero_allow_untested_optimizer": false -} -EOT - -export PL_DEEPSPEED_CONFIG_PATH=$config_json -export TORCH_EXTENSIONS_DIR=/cognitive_comp/dongxiaoqun/torch_extendsions - -DATA_ARGS="\ - --datasets_name wudao_180g_512 \ - --num_workers 20 \ - --train_batchsize $MICRO_BATCH_SIZE \ - --val_batchsize 8 \ - --test_batchsize 8 \ - --max_seq_length 512 \ - --val_datasets_field valid \ - " - -MODEL_ARGS="\ - --model_path /cognitive_comp/dongxiaoqun/pretrained_model/pegasus-base/ \ - --learning_rate 1e-5 \ - --weight_decay 0.1 \ - --warmup_ratio 0.001 \ - " - -MODEL_CHECKPOINT_ARGS="\ - --monitor train_loss \ - --save_top_k 3 \ - --mode min \ - --every_n_train_steps 200 \ - --dirpath /cognitive_comp/dongxiaoqun/train_model/fengshen-$MODEL_NAME_debug/ckpt \ - --filename model-{step:02d}-{train_loss:.4f} \ - --save_last \ - " - -TRAINER_ARGS="\ - --gradient_clip_val 1.0 \ - --max_epochs 1 \ - --gpus 2 \ - --num_nodes 1 \ - --strategy ddp \ - --log_every_n_steps 100 \ - --val_check_interval 0.1 \ - --accumulate_grad_batches 8 \ - --default_root_dir /cognitive_comp/dongxiaoqun/train_model/fengshen-$MODEL_NAME_debug \ - --stopword_path /cognitive_comp/dongxiaoqun/pretrained_model/pegasus-large/stopwords \ - " - - -export options=" \ - $DATA_ARGS \ - $MODEL_ARGS \ - $MODEL_CHECKPOINT_ARGS \ - $TRAINER_ARGS \ - " - -SINGULARITY_PATH=/cognitive_comp/dongxiaoqun/software/docker/pytorch21_06_py3_docker_image_v2.sif -export SCRIPT_PATH=/cognitive_comp/dongxiaoqun/project/idea-ccnl/bug_fix/Fengshenbang-LM/fengshen/examples/pegasus/pretrain_pegasus.py - -# python $SCRIPT_PATH $options -source activate -conda activate torchnew -srun --nodes=1 --ntasks-per-node=1 --gres=gpu:2 --cpus-per-task=30 -o ${MODEL_NAME}-%J.log --jobid=226191 bash -c 'python3 $SCRIPT_PATH $options' diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/mean_pool.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/mean_pool.py deleted file mode 100644 index 4eea048ef3455cb3c897e74c18778c78fdc9fcbf..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/mean_pool.py +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env python3 -u -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import os -import os.path as osp -import math -import numpy as np -import tqdm -import torch -import torch.nn.functional as F -from shutil import copyfile - -from npy_append_array import NpyAppendArray - - -def get_parser(): - parser = argparse.ArgumentParser( - description="mean pools representations by compressing uniform splits of the data" - ) - # fmt: off - parser.add_argument('source', help='directory with features') - parser.add_argument('--split', help='which split to read', required=True) - parser.add_argument('--save-dir', help='where to save the output', required=True) - parser.add_argument('--subsample-rate', type=float, default=0.5, help='size to subsample data to') - - parser.add_argument('--remove-extra', action='store_true', help='if true, removes extra states that cant be pooled, otherwise pads with 0s') - # fmt: on - - return parser - - -def main(): - parser = get_parser() - args = parser.parse_args() - - source_path = osp.join(args.source, args.split) - - print(f"data path: {source_path}") - - features = np.load(source_path + ".npy", mmap_mode="r") - - os.makedirs(args.save_dir, exist_ok=True) - save_path = osp.join(args.save_dir, args.split) - - copyfile(source_path + ".tsv", save_path + ".tsv") - - if os.path.exists(source_path + ".phn"): - copyfile(source_path + ".phn", save_path + ".phn") - if os.path.exists(source_path + ".wrd"): - copyfile(source_path + ".wrd", save_path + ".wrd") - - if os.path.exists(osp.join(args.source, "dict.phn.txt")): - copyfile( - osp.join(args.source, "dict.phn.txt"), - osp.join(args.save_dir, "dict.phn.txt"), - ) - - if osp.exists(save_path + ".npy"): - os.remove(save_path + ".npy") - npaa = NpyAppendArray(save_path + ".npy") - - with open(source_path + ".lengths", "r") as lf: - lengths = lf.readlines() - - fsz = features.shape[-1] - start = 0 - with torch.no_grad(): - with open(save_path + ".lengths", "w") as lengths_out: - for length in tqdm.tqdm(lengths): - length = int(length) - end = start + length - feats = features[start:end] - start += length - x = torch.from_numpy(feats).cuda() - target_num = math.ceil(length * args.subsample_rate) - rem = length % target_num - - if rem > 0: - if args.remove_extra: - to_rem = target_num - rem - target_num -= 1 - x = x[:-to_rem] - else: - to_add = target_num - rem - x = F.pad(x, [0, 0, 0, to_add]) - x[-to_add:] = x[-to_add - 1] - - x = x.view(target_num, -1, fsz) - x = x.mean(dim=-2) - print(target_num, file=lengths_out) - npaa.append(x.cpu().numpy()) - - -if __name__ == "__main__": - main() diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/distributed/fully_sharded_data_parallel.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/distributed/fully_sharded_data_parallel.py deleted file mode 100644 index 8a96bfc76516682ac8e2b7e2c3bc2e6aa3d8ef0c..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/distributed/fully_sharded_data_parallel.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import contextlib -from typing import Optional - -import torch -from fairseq.dataclass.configs import DistributedTrainingConfig -from fairseq.distributed import utils as dist_utils - - -try: - from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP - - has_FSDP = True -except ImportError: - FSDP = torch.nn.Module - has_FSDP = False - - -class FullyShardedDataParallel(FSDP): - """ - A small wrapper around fairscale's FullyShardedDataParallel (FSDP) with some - fairseq-specific checkpoint saving/loading logic. - - Args: - use_sharded_state (bool): if True, then ``state_dict`` will return - ``FSDP.local_state_dict`` and ``load_state_dict`` will call - ``FSDP.load_local_state_dict``. Otherwise, ``state_dict`` will - return the full model weights on data parallel rank 0 (empty on - other ranks) and ``load_state_dict`` will broadcast model weights - from rank 0 to other ranks. - """ - - def __init__(self, *args, use_sharded_state: bool = False, **kwargs): - if not has_FSDP: - raise ImportError( - "Cannot find FullyShardedDataParallel. " - "Please install fairscale with: pip install fairscale" - ) - super().__init__(*args, **kwargs) - self.use_sharded_state = use_sharded_state - - @property - def unwrapped_module(self) -> torch.nn.Module: - if self.flatten_parameters: - return self.module.module - else: - return self.module - - def state_dict(self, destination=None, prefix="", keep_vars=False): - if self.use_sharded_state: - return super().local_state_dict( - destination=destination, prefix=prefix, keep_vars=keep_vars - ) - else: - if self.rank == 0: - return super().state_dict( - destination=destination, prefix=prefix, keep_vars=keep_vars - ) - else: - # We must call state_dict() due to use of communication - # primitives. But we don't use the result. - super().state_dict() - return destination or {} - - def load_state_dict(self, state_dict, strict=True, model_cfg=None): - if self.use_sharded_state: - return super().load_local_state_dict(state_dict, strict=strict) - else: - state_dict = dist_utils.broadcast_object( - state_dict, src_rank=0, group=self.process_group - ) - return super().load_state_dict(state_dict, strict=strict) - - -@contextlib.contextmanager -def fsdp_enable_wrap(cfg: DistributedTrainingConfig): - try: - from fairscale.nn import enable_wrap - except ImportError: - raise ImportError( - "Cannot find FullyShardedDataParallel. " - "Please install fairscale with: pip install fairscale" - ) - if cfg.memory_efficient_fp16: - assert cfg.fp16 # memory_efficient_fp16 should imply fp16 - group = dist_utils.get_data_parallel_group() - if group is None and cfg.distributed_world_size == 1: - from fairscale.utils.testing import DummyProcessGroup - - group = DummyProcessGroup(rank=0, size=1) - fsdp_config = { - "process_group": group, - "reshard_after_forward": not cfg.no_reshard_after_forward, - "mixed_precision": cfg.fp16 and not cfg.memory_efficient_fp16, - "fp32_reduce_scatter": cfg.fp32_reduce_scatter, - "flatten_parameters": True, - "cpu_offload": cfg.cpu_offload, - "compute_dtype": torch.float16 if cfg.fp16 else torch.float32, - "bucket_cap_mb": cfg.bucket_cap_mb, - "state_dict_device": torch.device("cpu"), # reduce GPU mem usage - } - with enable_wrap( - wrapper_cls=FullyShardedDataParallel, - use_sharded_state=cfg.use_sharded_state, - **fsdp_config, - ): - yield - - -def fsdp_wrap(module, min_num_params: Optional[int] = None, **kwargs): - """ - Helper to wrap layers/modules in FSDP. This falls back to a no-op if - fairscale is not available. - - Args: - module (nn.Module): module to (maybe) wrap - min_num_params (int, Optional): minimum number of layer params to wrap - """ - try: - from fairscale.nn import wrap - - if min_num_params is not None: - num_params = sum(p.numel() for p in module.parameters()) - if num_params >= min_num_params: - return wrap(module, **kwargs) - else: - return module - else: - return wrap(module, **kwargs) - except ImportError: - return module diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/dynamicconv_layer/dynamiconv_cpu.cpp b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/dynamicconv_layer/dynamiconv_cpu.cpp deleted file mode 100644 index d7e57c859085f98ec10960330ca763ae2764585a..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/dynamicconv_layer/dynamiconv_cpu.cpp +++ /dev/null @@ -1,29 +0,0 @@ -#include -#include - -std::vector -dynamicconv_cpu_forward(float* input, float* filters, int padding_l); - -std::vector dynamicconv_cpu_backward( - float* gradOutput, - int padding_l, - float* input, - float* filters); - -std::vector -dynamicconv_forward(float* input, float* filters, int padding_l) { - return dynamicconv_cpu_forward(input, filters, padding_l); -} - -std::vector dynamicconv_backward( - float* gradOutput, - int padding_l, - float* input, - float* filters) { - return dynamicconv_cpu_backward(gradOutput, padding_l, input, filters); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("forward", &dynamicconv_forward, "dynamicconv forward (CPU)"); - m.def("backward", &dynamicconv_backward, "dynamicconv backward (CPU)"); -} diff --git a/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/hifi_gan/models.py b/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/hifi_gan/models.py deleted file mode 100644 index be51fa51407e6ce1daaee5e8d090f6acdbee0db9..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/hifi_gan/models.py +++ /dev/null @@ -1,403 +0,0 @@ -import torch -import torch.nn.functional as F -import torch.nn as nn -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from utils import init_weights, get_padding - -LRELU_SLOPE = 0.1 - - -class ResBlock1(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.h = h - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - xt = c2(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.h = h - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Generator(torch.nn.Module): - def __init__(self, h): - super(Generator, self).__init__() - self.h = h - self.num_kernels = len(h.resblock_kernel_sizes) - self.num_upsamples = len(h.upsample_rates) - self.conv_pre = weight_norm( - Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3) - ) - resblock = ResBlock1 if h.resblock == "1" else ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - h.upsample_initial_channel // (2 ** i), - h.upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = h.upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes) - ): - self.resblocks.append(resblock(h, ch, k, d)) - - self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) - self.ups.apply(init_weights) - self.conv_post.apply(init_weights) - - def forward(self, x): - x = self.conv_pre(x) - for i in range(self.num_upsamples): - x = F.leaky_relu(x, LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print("Removing weight norm...") - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - remove_weight_norm(self.conv_pre) - remove_weight_norm(self.conv_post) - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(5, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(5, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(5, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(5, 1), 0), - ) - ), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self): - super(MultiPeriodDiscriminator, self).__init__() - self.discriminators = nn.ModuleList( - [ - DiscriminatorP(2), - DiscriminatorP(3), - DiscriminatorP(5), - DiscriminatorP(7), - DiscriminatorP(11), - ] - ) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 128, 15, 1, padding=7)), - norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)), - norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)), - norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiScaleDiscriminator(torch.nn.Module): - def __init__(self): - super(MultiScaleDiscriminator, self).__init__() - self.discriminators = nn.ModuleList( - [ - DiscriminatorS(use_spectral_norm=True), - DiscriminatorS(), - DiscriminatorS(), - ] - ) - self.meanpools = nn.ModuleList( - [AvgPool1d(4, 2, padding=2), AvgPool1d(4, 2, padding=2)] - ) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - if i != 0: - y = self.meanpools[i - 1](y) - y_hat = self.meanpools[i - 1](y_hat) - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - r_loss = torch.mean((1 - dr) ** 2) - g_loss = torch.mean(dg ** 2) - loss += r_loss + g_loss - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - l = torch.mean((1 - dg) ** 2) - gen_losses.append(l) - loss += l - - return loss, gen_losses diff --git a/spaces/HighCWu/GPEN/face_model/op/upfirdn2d.py b/spaces/HighCWu/GPEN/face_model/op/upfirdn2d.py deleted file mode 100644 index 29d8c17fcc0c02d346d756520b81e58ba618a38d..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/GPEN/face_model/op/upfirdn2d.py +++ /dev/null @@ -1,193 +0,0 @@ -import os -import platform - -import torch -import torch.nn.functional as F -from torch.autograd import Function -from torch.utils.cpp_extension import load, _import_module_from_library - -# if running GPEN without cuda, please comment line 10-18 -if platform.system() == 'Linux' and torch.cuda.is_available(): - module_path = os.path.dirname(__file__) - upfirdn2d_op = load( - 'upfirdn2d', - sources=[ - os.path.join(module_path, 'upfirdn2d.cpp'), - os.path.join(module_path, 'upfirdn2d_kernel.cu'), - ], - ) - - -#upfirdn2d_op = _import_module_from_library('upfirdn2d', '/tmp/torch_extensions/upfirdn2d', True) - -class UpFirDn2dBackward(Function): - @staticmethod - def forward( - ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, size, out_size - ): - - up_x, up_y = up - down_x, down_y = down - g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad - - grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) - - grad_input = upfirdn2d_op.upfirdn2d( - grad_output, - grad_kernel, - down_x, - down_y, - up_x, - up_y, - g_pad_x0, - g_pad_x1, - g_pad_y0, - g_pad_y1, - ) - grad_input = grad_input.view(size[0], size[1], size[2], size[3]) - - ctx.save_for_backward(kernel) - - pad_x0, pad_x1, pad_y0, pad_y1 = pad - - ctx.up_x = up_x - ctx.up_y = up_y - ctx.down_x = down_x - ctx.down_y = down_y - ctx.pad_x0 = pad_x0 - ctx.pad_x1 = pad_x1 - ctx.pad_y0 = pad_y0 - ctx.pad_y1 = pad_y1 - ctx.size = size - ctx.out_size = out_size - - return grad_input - - @staticmethod - def backward(ctx, gradgrad_input): - kernel, = ctx.saved_tensors - - gradgrad_input = gradgrad_input.reshape(-1, ctx.size[2], ctx.size[3], 1) - - gradgrad_out = upfirdn2d_op.upfirdn2d( - gradgrad_input, - kernel, - ctx.up_x, - ctx.up_y, - ctx.down_x, - ctx.down_y, - ctx.pad_x0, - ctx.pad_x1, - ctx.pad_y0, - ctx.pad_y1, - ) - # gradgrad_out = gradgrad_out.view(ctx.size[0], ctx.out_size[0], ctx.out_size[1], ctx.size[3]) - gradgrad_out = gradgrad_out.view( - ctx.size[0], ctx.size[1], ctx.out_size[0], ctx.out_size[1] - ) - - return gradgrad_out, None, None, None, None, None, None, None, None - - -class UpFirDn2d(Function): - @staticmethod - def forward(ctx, input, kernel, up, down, pad): - up_x, up_y = up - down_x, down_y = down - pad_x0, pad_x1, pad_y0, pad_y1 = pad - - kernel_h, kernel_w = kernel.shape - batch, channel, in_h, in_w = input.shape - ctx.size = input.shape - - input = input.reshape(-1, in_h, in_w, 1) - - ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - ctx.out_size = (out_h, out_w) - - ctx.up = (up_x, up_y) - ctx.down = (down_x, down_y) - ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1) - - g_pad_x0 = kernel_w - pad_x0 - 1 - g_pad_y0 = kernel_h - pad_y0 - 1 - g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 - g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 - - ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) - - out = upfirdn2d_op.upfirdn2d( - input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 - ) - # out = out.view(major, out_h, out_w, minor) - out = out.view(-1, channel, out_h, out_w) - - return out - - @staticmethod - def backward(ctx, grad_output): - kernel, grad_kernel = ctx.saved_tensors - - grad_input = UpFirDn2dBackward.apply( - grad_output, - kernel, - grad_kernel, - ctx.up, - ctx.down, - ctx.pad, - ctx.g_pad, - ctx.size, - ctx.out_size, - ) - - return grad_input, None, None, None, None - - -def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0), device='cpu'): - if platform.system() == 'Linux' and torch.cuda.is_available() and device != 'cpu': - out = UpFirDn2d.apply( - input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1]) - ) - else: - out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]) - - return out - - -def upfirdn2d_native( - input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 -): - input = input.permute(0, 2, 3, 1) - _, in_h, in_w, minor = input.shape - kernel_h, kernel_w = kernel.shape - out = input.view(-1, in_h, 1, in_w, 1, minor) - out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) - out = out.view(-1, in_h * up_y, in_w * up_x, minor) - - out = F.pad( - out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)] - ) - out = out[ - :, - max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0), - max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0), - :, - ] - - out = out.permute(0, 3, 1, 2) - out = out.reshape( - [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1] - ) - w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) - out = F.conv2d(out, w) - out = out.reshape( - -1, - minor, - in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, - in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, - ) - # out = out.permute(0, 2, 3, 1) - return out[:, :, ::down_y, ::down_x] diff --git a/spaces/HighCWu/Style2Paints-4.5-Gradio/ui/web-mobile/style-mobile.72851.css b/spaces/HighCWu/Style2Paints-4.5-Gradio/ui/web-mobile/style-mobile.72851.css deleted file mode 100644 index 122426fc1799e06885e092dfd1aa3fbedc739cf6..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/Style2Paints-4.5-Gradio/ui/web-mobile/style-mobile.72851.css +++ /dev/null @@ -1,122 +0,0 @@ -html { - -ms-touch-action: none; -} - -body, canvas, div { - display: block; - outline: none; - -webkit-tap-highlight-color: rgba(0, 0, 0, 0); - - -moz-user-select: none; - -webkit-user-select: none; - -ms-user-select: none; - -khtml-user-select: none; - -webkit-tap-highlight-color: rgba(0, 0, 0, 0); -} - -/* Remove spin of input type number */ -input::-webkit-outer-spin-button, -input::-webkit-inner-spin-button { - /* display: none; <- Crashes Chrome on hover */ - -webkit-appearance: none; - margin: 0; /* <-- Apparently some margin are still there even though it's hidden */ -} - -body { - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; - padding: 0; - border: 0; - margin: 0; - - cursor: default; - color: #888; - background-color: #333; - - text-align: center; - font-family: Helvetica, Verdana, Arial, sans-serif; - - display: flex; - flex-direction: column; -} - -#Cocos2dGameContainer { - position: absolute; - margin: 0; - overflow: hidden; - left: 0px; - top: 0px; - - display: -webkit-box; - -webkit-box-orient: horizontal; - -webkit-box-align: center; - -webkit-box-pack: center; -} - -canvas { - background-color: rgba(0, 0, 0, 0); -} - -a:link, a:visited { - color: #666; -} - -a:active, a:hover { - color: #666; -} - -p.header { - font-size: small; -} - -p.footer { - font-size: x-small; -} - -#splash { - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; - background: #171717 url(./splash.03ce1.png) no-repeat center; - background-size: 40%; -} - -.progress-bar { - background-color: #1a1a1a; - position: absolute; - left: 25%; - top: 80%; - height: 15px; - padding: 5px; - width: 50%; - /*margin: 0 -175px; */ - border-radius: 5px; - box-shadow: 0 1px 5px #000 inset, 0 1px 0 #444; -} - -.progress-bar span { - display: block; - height: 100%; - border-radius: 3px; - box-shadow: 0 1px 0 rgba(255, 255, 255, .5) inset; - transition: width .4s ease-in-out; - background-color: #34c2e3; -} - -.stripes span { - background-size: 30px 30px; - background-image: linear-gradient(135deg, rgba(255, 255, 255, .15) 25%, transparent 25%, - transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, - transparent 75%, transparent); - - animation: animate-stripes 1s linear infinite; -} - -@keyframes animate-stripes { - 0% {background-position: 0 0;} 100% {background-position: 60px 0;} -} diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/tunneling.py b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/tunneling.py deleted file mode 100644 index afb1a01d2c4e5196018e5235c358494b0a18af87..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/tunneling.py +++ /dev/null @@ -1,105 +0,0 @@ -import atexit -import os -import platform -import re -import subprocess -from pathlib import Path -from typing import List - -VERSION = "0.1" -CURRENT_TUNNELS: List["Tunnel"] = [] - - -class Tunnel: - def __init__(self, remote_host, remote_port, local_host, local_port): - self.proc = None - self.url = None - self.remote_host = remote_host - self.remote_port = remote_port - self.local_host = local_host - self.local_port = local_port - - @staticmethod - def download_binary(): - machine = platform.machine() - if machine == "x86_64": - machine = "amd64" - - # Check if the file exist - binary_name = f"frpc_{platform.system().lower()}_{machine.lower()}" - binary_path = str(Path(__file__).parent / binary_name) - - extension = ".exe" if os.name == "nt" else "" - - if not Path(binary_path).exists(): - import stat - - import requests - - binary_url = f"https://cdn-media.huggingface.co/frpc-gradio-{VERSION}/{binary_name}{extension}" - resp = requests.get(binary_url) - - if resp.status_code == 403: - raise OSError( - f"Cannot set up a share link as this platform is incompatible. Please " - f"create a GitHub issue with information about your platform: {platform.uname()}" - ) - - resp.raise_for_status() - - # Save file data to local copy - with open(binary_path, "wb") as file: - file.write(resp.content) - st = os.stat(binary_path) - os.chmod(binary_path, st.st_mode | stat.S_IEXEC) - - return binary_path - - def start_tunnel(self) -> str: - binary_path = self.download_binary() - self.url = self._start_tunnel(binary_path) - return self.url - - def kill(self): - if self.proc is not None: - print(f"Killing tunnel {self.local_host}:{self.local_port} <> {self.url}") - self.proc.terminate() - self.proc = None - - def _start_tunnel(self, binary: str) -> str: - CURRENT_TUNNELS.append(self) - command = [ - binary, - "http", - "-n", - "random", - "-l", - str(self.local_port), - "-i", - self.local_host, - "--uc", - "--sd", - "random", - "--ue", - "--server_addr", - f"{self.remote_host}:{self.remote_port}", - "--disable_log_color", - ] - - self.proc = subprocess.Popen( - command, stdout=subprocess.PIPE, stderr=subprocess.PIPE - ) - atexit.register(self.kill) - url = "" - while url == "": - if self.proc.stdout is None: - continue - line = self.proc.stdout.readline() - line = line.decode("utf-8") - if "start proxy success" in line: - result = re.search("start proxy success: (.+)\n", line) - if result is None: - raise ValueError("Could not create share URL") - else: - url = result.group(1) - return url diff --git a/spaces/HuggingAlgorithms/Object-Detection-with-YOLO/README.md b/spaces/HuggingAlgorithms/Object-Detection-with-YOLO/README.md deleted file mode 100644 index 89ce3d948a7eaf13684fd096f00c6dca9e2ebfca..0000000000000000000000000000000000000000 --- a/spaces/HuggingAlgorithms/Object-Detection-with-YOLO/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Object Detection With YOLO -emoji: ❤️‍🔥 -colorFrom: purple -colorTo: white -sdk: gradio -sdk_version: 3.37.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Huu-Mon12/test01/README.md b/spaces/Huu-Mon12/test01/README.md deleted file mode 100644 index 96fd34435b89204b4408b70002a7dc3b1f96898a..0000000000000000000000000000000000000000 --- a/spaces/Huu-Mon12/test01/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Test01 -emoji: 📈 -colorFrom: pink -colorTo: yellow -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ICML2022/resefa/third_party/stylegan2_official_ops/bias_act.h b/spaces/ICML2022/resefa/third_party/stylegan2_official_ops/bias_act.h deleted file mode 100644 index a32187e1fb7e3bae509d4eceaf900866866875a4..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/resefa/third_party/stylegan2_official_ops/bias_act.h +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -// -// NVIDIA CORPORATION and its licensors retain all intellectual property -// and proprietary rights in and to this software, related documentation -// and any modifications thereto. Any use, reproduction, disclosure or -// distribution of this software and related documentation without an express -// license agreement from NVIDIA CORPORATION is strictly prohibited. - -//------------------------------------------------------------------------ -// CUDA kernel parameters. - -struct bias_act_kernel_params -{ - const void* x; // [sizeX] - const void* b; // [sizeB] or NULL - const void* xref; // [sizeX] or NULL - const void* yref; // [sizeX] or NULL - const void* dy; // [sizeX] or NULL - void* y; // [sizeX] - - int grad; - int act; - float alpha; - float gain; - float clamp; - - int sizeX; - int sizeB; - int stepB; - int loopX; -}; - -//------------------------------------------------------------------------ -// CUDA kernel selection. - -template void* choose_bias_act_kernel(const bias_act_kernel_params& p); - -//------------------------------------------------------------------------ diff --git a/spaces/Illumotion/Koboldcpp/make_old_pyinstaller_cuda.bat b/spaces/Illumotion/Koboldcpp/make_old_pyinstaller_cuda.bat deleted file mode 100644 index 518fb165c192db09517750335c6b2191b4e910f3..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/make_old_pyinstaller_cuda.bat +++ /dev/null @@ -1,4 +0,0 @@ -echo This file is only for my own usage, please do not use it. I am lazy. - -set PATH=d:\\MainApplications\\KoboldAIGPT\\KoboldAI-Horde-Bridge\\python;d:\\MainApplications\\KoboldAIGPT\\KoboldAI-Horde-Bridge\\python\\Scripts;%PATH% -PyInstaller --noconfirm --onefile --clean --console --collect-all customtkinter --icon "./nikogreen.ico" --add-data "./klite.embd;." --add-data "./koboldcpp_default.dll;." --add-data "./koboldcpp_openblas.dll;." --add-data "./koboldcpp_failsafe.dll;." --add-data "./koboldcpp_noavx2.dll;." --add-data "./libopenblas.dll;." --add-data "./koboldcpp_clblast.dll;." --add-data "./clblast.dll;." --add-data "./koboldcpp_cublas.dll;." --add-data "./cublas64_11.dll;." --add-data "./cublasLt64_11.dll;." --add-data "./cudart64_110.dll;." --add-data "./msvcp140.dll;." --add-data "./vcruntime140.dll;." --add-data "./vcruntime140_1.dll;." --add-data "./rwkv_vocab.embd;." --add-data "./rwkv_world_vocab.embd;." "./koboldcpp.py" -n "koboldcpp.exe" \ No newline at end of file diff --git a/spaces/Intoval/privateChatGPT/chatgpt - windows.bat b/spaces/Intoval/privateChatGPT/chatgpt - windows.bat deleted file mode 100644 index 0b78fdc3a559abd692e3a9e9af5e482124d13a99..0000000000000000000000000000000000000000 --- a/spaces/Intoval/privateChatGPT/chatgpt - windows.bat +++ /dev/null @@ -1,14 +0,0 @@ -@echo off -echo Opening ChuanhuChatGPT... - -REM Open powershell via bat -start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py" - -REM The web page can be accessed with delayed start http://127.0.0.1:7860/ -ping -n 5 127.0.0.1>nul - -REM access chargpt via your default browser -start "" "http://127.0.0.1:7860/" - - -echo Finished opening ChuanhuChatGPT (http://127.0.0.1:7860/). \ No newline at end of file diff --git a/spaces/JeffJing/ZookChatBot/steamship/client/__init__.py b/spaces/JeffJing/ZookChatBot/steamship/client/__init__.py deleted file mode 100644 index d5128bcd6258855d8720a391c43d32a4dce01af2..0000000000000000000000000000000000000000 --- a/spaces/JeffJing/ZookChatBot/steamship/client/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .steamship import Steamship - -__all__ = ["Steamship"] diff --git a/spaces/JunchuanYu/SegRS/README.md b/spaces/JunchuanYu/SegRS/README.md deleted file mode 100644 index 959b484054b6a584f1c43901adcc8775cf852d67..0000000000000000000000000000000000000000 --- a/spaces/JunchuanYu/SegRS/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Segment-RS -emoji: 🛰️ -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.27.0 -app_file: run.py -pinned: True ---- diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/vocoder/hifigan/meldataset.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/vocoder/hifigan/meldataset.py deleted file mode 100644 index eb0682b0f6c03319a4fd5d16f67d8aa843a0216e..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/vocoder/hifigan/meldataset.py +++ /dev/null @@ -1,178 +0,0 @@ -import math -import os -import random -import torch -import torch.utils.data -import numpy as np -from librosa.util import normalize -from scipy.io.wavfile import read -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def load_wav(full_path): - sampling_rate, data = read(full_path) - return data, sampling_rate - - -def dynamic_range_compression(x, C=1, clip_val=1e-5): - return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) - - -def dynamic_range_decompression(x, C=1): - return np.exp(x) / C - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - if fmax not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device) - hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9)) - - spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec) - spec = spectral_normalize_torch(spec) - - return spec - - -def get_dataset_filelist(a): - # with open(a.input_training_file, 'r', encoding='utf-8') as fi: - # training_files = [os.path.join(a.input_wavs_dir, x.split('|')[0] + '.wav') - # for x in fi.read().split('\n') if len(x) > 0] - - # with open(a.input_validation_file, 'r', encoding='utf-8') as fi: - # validation_files = [os.path.join(a.input_wavs_dir, x.split('|')[0] + '.wav') - # for x in fi.read().split('\n') if len(x) > 0] - - files = os.listdir(a.input_wavs_dir) - random.shuffle(files) - files = [os.path.join(a.input_wavs_dir, f) for f in files] - training_files = files[: -int(len(files)*0.05)] - validation_files = files[-int(len(files)*0.05): ] - - return training_files, validation_files - - -class MelDataset(torch.utils.data.Dataset): - def __init__(self, training_files, segment_size, n_fft, num_mels, - hop_size, win_size, sampling_rate, fmin, fmax, split=True, shuffle=True, n_cache_reuse=1, - device=None, fmax_loss=None, fine_tuning=False, base_mels_path=None): - self.audio_files = training_files - random.seed(1234) - if shuffle: - random.shuffle(self.audio_files) - self.segment_size = segment_size - self.sampling_rate = sampling_rate - self.split = split - self.n_fft = n_fft - self.num_mels = num_mels - self.hop_size = hop_size - self.win_size = win_size - self.fmin = fmin - self.fmax = fmax - self.fmax_loss = fmax_loss - self.cached_wav = None - self.n_cache_reuse = n_cache_reuse - self._cache_ref_count = 0 - self.device = device - self.fine_tuning = fine_tuning - self.base_mels_path = base_mels_path - - def __getitem__(self, index): - filename = self.audio_files[index] - if self._cache_ref_count == 0: - # audio, sampling_rate = load_wav(filename) - # audio = audio / MAX_WAV_VALUE - audio = np.load(filename) - if not self.fine_tuning: - audio = normalize(audio) * 0.95 - self.cached_wav = audio - # if sampling_rate != self.sampling_rate: - # raise ValueError("{} SR doesn't match target {} SR".format( - # sampling_rate, self.sampling_rate)) - self._cache_ref_count = self.n_cache_reuse - else: - audio = self.cached_wav - self._cache_ref_count -= 1 - - audio = torch.FloatTensor(audio) - audio = audio.unsqueeze(0) - - if not self.fine_tuning: - if self.split: - if audio.size(1) >= self.segment_size: - max_audio_start = audio.size(1) - self.segment_size - audio_start = random.randint(0, max_audio_start) - audio = audio[:, audio_start:audio_start+self.segment_size] - else: - audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant') - - mel = mel_spectrogram(audio, self.n_fft, self.num_mels, - self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax, - center=False) - else: - mel_path = os.path.join(self.base_mels_path, "mel" + "-" + filename.split("/")[-1].split("-")[-1]) - mel = np.load(mel_path).T - # mel = np.load( - # os.path.join(self.base_mels_path, os.path.splitext(os.path.split(filename)[-1])[0] + '.npy')) - mel = torch.from_numpy(mel) - - if len(mel.shape) < 3: - mel = mel.unsqueeze(0) - - if self.split: - frames_per_seg = math.ceil(self.segment_size / self.hop_size) - - if audio.size(1) >= self.segment_size: - mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1) - mel = mel[:, :, mel_start:mel_start + frames_per_seg] - audio = audio[:, mel_start * self.hop_size:(mel_start + frames_per_seg) * self.hop_size] - else: - mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), 'constant') - audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant') - - mel_loss = mel_spectrogram(audio, self.n_fft, self.num_mels, - self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax_loss, - center=False) - - return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze()) - - def __len__(self): - return len(self.audio_files) diff --git a/spaces/KevinQHLin/UniVTG/model/base_qfvs.py b/spaces/KevinQHLin/UniVTG/model/base_qfvs.py deleted file mode 100644 index 19e9553ef35763eebd97125964c7c4864b9dd7db..0000000000000000000000000000000000000000 --- a/spaces/KevinQHLin/UniVTG/model/base_qfvs.py +++ /dev/null @@ -1,476 +0,0 @@ -import pdb -import torch -import torch.nn.functional as F -from torch import nn -import numpy as np - -from model.transformer_encoder_droppath import build_transformer -from model.matcher import build_matcher -from model.position_encoding import build_position_encoding -from utils.span_utils import generalized_temporal_iou, span_cxw_to_xx - -def init_weights(module): - if isinstance(module, (nn.Linear, nn.Embedding)): - module.weight.data.normal_(mean=0.0, std=0.02) - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - - if isinstance(module, nn.Linear) and module.bias is not None: - module.bias.data.zero_() - -def mask_logits(inputs, mask, mask_value=-1e30): - mask = mask.type(torch.float32) - return inputs + (1.0 - mask) * mask_value - -def sim_matrix(a, b, eps=1e-8): - """ - added eps for numerical stability - """ - a_n, b_n = a.norm(dim=1)[:, None], b.norm(dim=1)[:, None] - a_norm = a / torch.max(a_n, eps * torch.ones_like(a_n)) - b_norm = b / torch.max(b_n, eps * torch.ones_like(b_n)) - sim_mt = torch.mm(a_norm, b_norm.transpose(0, 1)) - return sim_mt - -class WeightedPool(nn.Module): - def __init__(self, dim): - super(WeightedPool, self).__init__() - weight = torch.empty(dim, 1) - nn.init.xavier_uniform_(weight) - self.weight = nn.Parameter(weight, requires_grad=True) - - def forward(self, x, mask): - alpha = torch.tensordot(x, self.weight, dims=1) # shape = (batch_size, seq_length, 1) - alpha = mask_logits(alpha, mask=mask.unsqueeze(2)) - alphas = nn.Softmax(dim=1)(alpha) - pooled_x = torch.matmul(x.transpose(1, 2), alphas) # (batch_size, dim, 1) - pooled_x = pooled_x.squeeze(2) - return pooled_x - -class Model(nn.Module): - """ This is the UniVTG module that performs moment localization. """ - - def __init__(self, transformer, position_embed, txt_position_embed, txt_dim, vid_dim, - input_dropout, aux_loss=False, - max_v_l=75, span_loss_type="l1", use_txt_pos=False, n_input_proj=2): - """ Initializes the model. - Parameters: - transformer: torch module of the transformer architecture. See transformer.py - position_embed: torch module of the position_embedding, See position_encoding.py - txt_position_embed: position_embedding for text - txt_dim: int, text query input dimension - vid_dim: int, video feature input dimension - max_v_l: int, maximum #clips in videos - span_loss_type: str, one of [l1, ce] - l1: (center-x, width) regression. - ce: (st_idx, ed_idx) classification. - # foreground_thd: float, intersection over prediction >= foreground_thd: labeled as foreground - # background_thd: float, intersection over prediction <= background_thd: labeled background - """ - super().__init__() - self.transformer = transformer - self.position_embed = position_embed - self.txt_position_embed = txt_position_embed - hidden_dim = transformer.d_model - self.span_loss_type = span_loss_type - self.max_v_l = max_v_l - span_pred_dim = 2 if span_loss_type == "l1" else max_v_l * 2 - - self.token_type_embeddings = nn.Embedding(2, hidden_dim) - self.token_type_embeddings.apply(init_weights) - - # Conv projector - self.span_embed = Conv(hidden_dim, hidden_dim, span_pred_dim, 3, kernel_size=3) - self.class_embed = Conv(hidden_dim, hidden_dim, 1, 3, kernel_size=3) # 0: background, 1: foreground - - self.use_txt_pos = use_txt_pos - self.n_input_proj = n_input_proj - relu_args = [True] * 3 - relu_args[n_input_proj-1] = False - self.input_txt_proj = nn.Sequential(*[ - LinearLayer(txt_dim, hidden_dim, layer_norm=True, dropout=input_dropout, relu=relu_args[0]), - LinearLayer(hidden_dim, hidden_dim, layer_norm=True, dropout=input_dropout, relu=relu_args[1]), - LinearLayer(hidden_dim, hidden_dim, layer_norm=True, dropout=input_dropout, relu=relu_args[2]) - ][:n_input_proj]) - self.input_vid_proj = nn.Sequential(*[ - LinearLayer(vid_dim, hidden_dim, layer_norm=True, dropout=input_dropout, relu=relu_args[0]), - LinearLayer(hidden_dim, hidden_dim, layer_norm=True, dropout=input_dropout, relu=relu_args[1]), - LinearLayer(hidden_dim, hidden_dim, layer_norm=True, dropout=input_dropout, relu=relu_args[2]) - ][:n_input_proj]) - - # MLP Projector - self.weightedpool = WeightedPool(hidden_dim) - - def forward(self, src_txt, src_txt_mask, src_vid, src_vid_mask, src_cls=None, src_cls_mask=None): - bs = src_vid.shape[0] - src_vid = self.input_vid_proj(src_vid) - src_txt = self.input_txt_proj(src_txt) - if src_cls is not None: - src_cls = self.input_txt_proj(src_cls) - - # type token. - src_vid = src_vid + self.token_type_embeddings(torch.full_like(src_vid_mask.long(), 1)) - src_txt = src_txt + self.token_type_embeddings(torch.zeros_like(src_txt_mask.long())) - if src_cls is not None: - src_cls = src_cls + self.token_type_embeddings(torch.zeros_like(src_cls_mask.long())) - - src = torch.cat([src_vid, src_txt], dim=1) # (bsz, L_vid+L_txt, d) - mask = torch.cat([src_vid_mask, src_txt_mask], dim=1).bool() # (bsz, L_vid+L_txt) - - pos_vid = self.position_embed(src_vid, src_vid_mask) # (bsz, L_vid, d) - pos_txt = self.txt_position_embed(src_txt) if self.use_txt_pos else torch.zeros_like(src_txt) # (bsz, L_txt, d) - pos = torch.cat([pos_vid, pos_txt], dim=1) - - memory = self.transformer(src, ~mask, pos) - vid_mem = memory[:, :src_vid.shape[1], :] # (bsz, L_vid, d) - - outputs_class = self.class_embed(vid_mem).sigmoid() # (#layers, batch_size, #queries, #classes) - outputs_coord = self.span_embed(vid_mem) # (#layers, bsz, #queries, 2 or max_v_l * 2) - - if self.span_loss_type == "l1": - outputs_coord = outputs_coord.sigmoid() - idx_mask = torch.tensor((-1, 1)).unsqueeze(0).unsqueeze(0).cuda() - idx_mask = idx_mask.repeat(outputs_coord.shape[0], outputs_coord.shape[1], 1) - outputs_coord = outputs_coord * idx_mask - else: - raise NotImplementedError - - out = {'pred_logits': outputs_class, 'pred_spans': outputs_coord, - 'src_vid_mask': src_vid_mask} - - vid_mem_proj = src_vid - - # word-level -> sentence-level - txt_mem_proj = self.weightedpool(src_txt, src_txt_mask).unsqueeze(1) - sim = F.cosine_similarity(vid_mem_proj, txt_mem_proj, dim=-1) + (src_vid_mask + 1e-45).log() - - out["vid_mem_proj"] = vid_mem_proj - out["txt_mem_proj"] = txt_mem_proj - if src_cls is not None: - cls_mem_proj = self.weightedpool(src_cls, src_cls_mask) - out["cls_mem_proj"] = cls_mem_proj - out["saliency_scores"] = sim - return out - -class SetCriterion(nn.Module): - """ This class computes the loss for DETR. - The process happens in two steps: - 1) we compute hungarian assignment between ground truth boxes and the outputs of the model - 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) - """ - - def __init__(self, matcher, weight_dict, eos_coef, losses, temperature, span_loss_type, max_v_l, - saliency_margin=1): - """ Create the criterion. - Parameters: - matcher: module able to compute a matching between targets and proposals - weight_dict: dict containing as key the names of the losses and as values their relative weight. - eos_coef: relative classification weight applied to the no-object category - losses: list of all the losses to be applied. See get_loss for list of available losses. - temperature: float, temperature for NCE loss - span_loss_type: str, [l1, ce] - max_v_l: int, - saliency_margin: float - """ - super().__init__() - self.matcher = matcher - self.weight_dict = weight_dict - self.losses = losses - self.temperature = temperature - self.span_loss_type = span_loss_type - self.max_v_l = max_v_l - self.saliency_margin = saliency_margin - self.temperature = 0.07 - - # foreground and background classification - self.foreground_label = 0 - self.background_label = 1 - self.eos_coef = eos_coef - empty_weight = torch.ones(2) - empty_weight[-1] = self.eos_coef # lower weight for background (index 1, foreground index 0) - self.register_buffer('empty_weight', empty_weight) - - def loss_spans(self, outputs, targets, indices): - assert 'pred_spans' in outputs - - start_spans = targets['timestamp'] - pred_spans = outputs['pred_spans'] - src_spans = start_spans + pred_spans - gt_spans = targets['span_labels_nn'] - - mask = targets['timestamp_mask'].bool() - mask_full = targets['timestamp_mask'].unsqueeze(2).repeat(1, 1, 2) - mask_valid = targets['timestamp_window'].bool() - mask_valid_full = targets['timestamp_window'].unsqueeze(2).repeat(1, 1, 2) - - loss_span = F.smooth_l1_loss(src_spans, gt_spans, reduction='none') * mask_valid_full - loss_giou = 1 - torch.diag(generalized_temporal_iou(src_spans[mask_valid], gt_spans[mask_valid])) - - losses = {} - losses['loss_b'] = loss_span.sum() / mask_valid.sum() - losses['loss_g'] = loss_giou.mean() - return losses - - def loss_labels(self, outputs, targets, indices, log=True): - saliency_scores = targets["saliency_scores"] - if saliency_scores.sum() == 0: - return {"loss_f": 0.} - - src_logits = outputs['pred_logits'].squeeze(-1) # (batch_size, #queries, #classes=2) - target_classes = targets["saliency_scores"].squeeze() - - weights = torch.ones_like(target_classes).float() * self.empty_weight[1] - weights[target_classes.bool()] = self.empty_weight[0] - - loss_ce = F.binary_cross_entropy(src_logits, target_classes.float(), reduction="none") - # pdb.set_trace() - return {"loss_f": loss_ce.sum() / target_classes.sum()} - # return {"loss_f": loss_ce.sum() / len(target_classes)} - - # mask = targets['timestamp_mask'].bool() - # mask_valid = targets['timestamp_window'].bool() - # target_classes = torch.full(src_logits.shape[:2], 0, dtype=torch.int64, device=src_logits.device) # (batch_size, #queries) - # target_classes[mask_valid] = 1 - # # target_classes = targets['timestamp_window'] # soft cls. - # target_classes.float() - # # pdb.set_trace() - - # weights = torch.zeros_like(target_classes).float() - # weights[mask] = self.empty_weight[1] - # weights[mask_valid] = self.empty_weight[0] - - # loss_ce = F.binary_cross_entropy(src_logits, target_classes.float(), weight=weights, reduction="none") * mask - # # return {"loss_f": loss_ce.sum() / mask.sum()} - # return {"loss_f": loss_ce.sum() / mask_valid.sum()} - - def loss_saliency(self, outputs, targets, indices, log=True): - """higher scores for positive clips""" - if "saliency_pos_labels" not in targets: - return {"loss_s_inter": 0., "loss_s_intra": 0.} - saliency_scores = targets["saliency_scores"] - if saliency_scores.sum() == 0: - return {"loss_s_inter": 0., "loss_s_intra": 0.} - - # * qfvs mil-nce mode - pos_indices = saliency_scores.squeeze() > 0 - - sim = outputs['saliency_scores'] - sim_soft = F.softmax(sim / self.temperature, dim=0) - sim_log = torch.log(sim_soft[pos_indices]) - loss_saliency_intra = -sim_log.sum() / len(sim_log) - return {"loss_s_inter": 0., "loss_s_intra": loss_saliency_intra} - - # * inter-vid mode - # vid_mem_proj = outputs["vid_mem_proj"] - # pos_indices = targets["saliency_pos_labels"][:,0].long() # (N, #pairs) - # batch_indices = torch.arange(len(vid_mem_proj)).to(vid_mem_proj.device) - - # vid_feats = vid_mem_proj[batch_indices, pos_indices] - # txt_feats = outputs["txt_mem_proj"].squeeze(1) - # sim = sim_matrix(vid_feats, txt_feats) - - # i_logsm = F.log_softmax(sim / self.temperature, dim=1) - # j_logsm = F.log_softmax(sim.t() /self.temperature, dim=1) - - # # sum over positives - # idiag = torch.diag(i_logsm) - # jdiag = torch.diag(j_logsm) - # loss_i = idiag.sum() / len(idiag) - # loss_j = jdiag.sum() / len(jdiag) - - # loss_saliency_inter = - loss_i - loss_j - - # # * intra-vid mode - # mask = targets['timestamp_mask'] - # selected_scores = saliency_scores[batch_indices, pos_indices].unsqueeze(-1) - # neg_indices_in = (saliency_scores < selected_scores) - # neg_indices_in[batch_indices, pos_indices] = True - # mask_invalid = neg_indices_in * mask.bool() - - # sim_in = F.cosine_similarity(vid_mem_proj, txt_feats.unsqueeze(1), dim=-1) - # sim_in = sim_in + (mask_invalid + 1e-45).log() - # logsm_in_i = F.log_softmax(sim_in / self.temperature, dim=1) - # logsm_in_j = F.log_softmax(sim_in.t() / self.temperature, dim=1) - - # pos_logsm_in_i = logsm_in_i[batch_indices, pos_indices] - # pos_logsm_in_j = logsm_in_j[pos_indices, batch_indices] - # loss_in_i = pos_logsm_in_i.sum() / len(pos_logsm_in_i) - # loss_in_j = pos_logsm_in_j.sum() / len(pos_logsm_in_j) - - # loss_saliency_intra = - loss_in_i - loss_in_j - - # return {"loss_s_inter": loss_saliency_inter, "loss_s_intra": loss_saliency_intra} - - def loss_saliency_cls(self, outputs, targets, indices, log=True): - """higher scores for positive clips""" - if "saliency_pos_labels" not in targets: - return {"loss_s_inter": 0., "loss_s_intra": 0.} - saliency_scores = targets["saliency_scores"] - if saliency_scores.sum() == 0: - return {"loss_s_inter": 0., "loss_s_intra": 0.} - - # * inter-vid mode - vid_mem_proj = outputs["vid_mem_proj"] - pos_indices = targets["saliency_pos_labels"][:,0].long() # (N, #pairs) - batch_indices = torch.arange(len(vid_mem_proj)).to(vid_mem_proj.device) - - vid_feats = vid_mem_proj[batch_indices, pos_indices] - txt_feats = outputs["txt_mem_proj"].squeeze(1) - sim = sim_matrix(vid_feats, txt_feats) - - i_logsm = F.log_softmax(sim / self.temperature, dim=1) - j_logsm = F.log_softmax(sim.t() /self.temperature, dim=1) - - # sum over positives - idiag = torch.diag(i_logsm) - jdiag = torch.diag(j_logsm) - loss_i = idiag.sum() / len(idiag) - loss_j = jdiag.sum() / len(jdiag) - - loss_saliency_inter = - loss_i - loss_j - - # * intra-vid mode - if 'cls_idx' not in targets.keys(): # eval - return {"loss_s_inter": loss_saliency_inter} - - cls_indices = targets['cls_idx'].bool() - cls_feats = outputs["cls_mem_proj"].squeeze(1) - sim_cls = sim_matrix(vid_feats, cls_feats) - - i_logsm_cls = F.log_softmax(sim_cls / self.temperature, dim=1) - idiag_cls = i_logsm_cls[cls_indices] - loss_cls_i = idiag_cls.sum() / len(idiag_cls) - - loss_saliency_intra = - loss_cls_i - - return {"loss_s_inter": loss_saliency_inter, "loss_s_intra": loss_saliency_intra} - - def get_loss(self, loss, outputs, targets, indices, **kwargs): - loss_map = { - "spans": self.loss_spans, - "labels": self.loss_labels, - "saliency": self.loss_saliency, - "saliency_cls": self.loss_saliency_cls, - } - assert loss in loss_map, f'do you really want to compute {loss} loss?' - return loss_map[loss](outputs, targets, indices, **kwargs) - - def forward(self, outputs, targets, mask_GT=None): - """ This performs the loss computation. - Parameters: - outputs: dict of tensors, see the output specification of the model for the format - targets: list of dicts, such that len(targets) == batch_size. - The expected keys in each dict depends on the losses applied, see each loss' doc - """ - indices = None - # Compute all the requested losses - losses = {} - # pdb.set_trace() - outputs['pred_logits'] = outputs['pred_logits'].reshape(1, -1).masked_select(mask_GT[0]) - outputs['saliency_scores'] = outputs['saliency_scores'].reshape(1, -1).masked_select(mask_GT[0]) - targets['saliency_scores'] = targets['saliency_scores'].masked_select(mask_GT[0]) - - for loss in self.losses: - losses.update(self.get_loss(loss, outputs, targets, indices)) - - return losses - -class MLP(nn.Module): - """ Very simple multi-layer perceptron (also called FFN)""" - - def __init__(self, input_dim, hidden_dim, output_dim, num_layers): - super().__init__() - self.num_layers = num_layers - h = [hidden_dim] * (num_layers - 1) - self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) - - def forward(self, x): - for i, layer in enumerate(self.layers): - x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) - return x - -class Conv(nn.Module): - """ Very simple multi-layer perceptron (also called FFN)""" - - def __init__(self, input_dim, hidden_dim, output_dim, num_layers, kernel_size): - super().__init__() - self.num_layers = num_layers - h = [hidden_dim] * (num_layers - 1) - # self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) - self.layers = nn.ModuleList( - nn.Conv1d(n, k, kernel_size=kernel_size, stride=1, padding=kernel_size//2, dilation=1, groups=1, bias=True, padding_mode='zeros') - for n, k in zip([input_dim] + h, h + [output_dim])) - def forward(self, x): - x = x.permute(0,2,1) - for i, layer in enumerate(self.layers): - x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) - return x.permute(0, 2, 1) - -class LinearLayer(nn.Module): - """linear layer configurable with layer normalization, dropout, ReLU.""" - - def __init__(self, in_hsz, out_hsz, layer_norm=True, dropout=0.1, relu=True): - super(LinearLayer, self).__init__() - self.relu = relu - self.layer_norm = layer_norm - if layer_norm: - self.LayerNorm = nn.LayerNorm(in_hsz) - layers = [ - nn.Dropout(dropout), - nn.Linear(in_hsz, out_hsz) - ] - self.net = nn.Sequential(*layers) - - def forward(self, x): - """(N, L, D)""" - if self.layer_norm: - x = self.LayerNorm(x) - x = self.net(x) - if self.relu: - x = F.relu(x, inplace=True) - return x # (N, L, D) - - -def build_model(args): - device = torch.device(args.device) - - transformer = build_transformer(args) - position_embedding, txt_position_embedding = build_position_encoding(args) - - model = Model( - transformer, - position_embedding, - txt_position_embedding, - txt_dim=args.t_feat_dim, - vid_dim=args.v_feat_dim, - input_dropout=args.input_dropout, - span_loss_type=args.span_loss_type, - use_txt_pos=args.use_txt_pos, - n_input_proj=args.n_input_proj, - ) - - matcher = build_matcher(args) - weight_dict = {"loss_b": args.b_loss_coef, - "loss_g": args.g_loss_coef, - "loss_f": args.f_loss_coef, - "loss_s_intra": args.s_loss_intra_coef, - "loss_s_inter": args.s_loss_inter_coef} - - if args.dset_type in ['mr', 'vlp']: - if 'tal' not in args.train_path: - losses = ['spans', 'labels', 'saliency'] - else: - losses = ['spans', 'labels', 'saliency_cls'] - elif args.dset_type in ['hl', 'vs']: - losses = ['labels', 'saliency'] - - criterion = SetCriterion( - matcher=matcher, - weight_dict=weight_dict, losses=losses, - eos_coef=args.eos_coef, temperature=args.temperature, - span_loss_type=args.span_loss_type, max_v_l=args.max_v_l, - saliency_margin=args.saliency_margin, - ) - criterion.to(device) - return model, criterion \ No newline at end of file diff --git a/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/tokenize/indic_tokenize.py b/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/tokenize/indic_tokenize.py deleted file mode 100644 index 4fa60735b2dd501382c399b22809d67ee2783b97..0000000000000000000000000000000000000000 --- a/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/tokenize/indic_tokenize.py +++ /dev/null @@ -1,113 +0,0 @@ -# -# Copyright (c) 2013-present, Anoop Kunchukuttan -# All rights reserved. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -# - -#Program for tokenizing Indian language input -# -# @author Anoop Kunchukuttan -# -""" -Tokenizer for Indian languages. Currently, simple punctuation-based tokenizers -are supported (see `trivial_tokenize`). Major Indian language punctuations are -handled. -""" -import string, re, sys - -from indicnlp.common import IndicNlpException - -### tokenizer patterns -triv_tokenizer_indic_pat=re.compile(r'(['+string.punctuation+r'\u0964\u0965\uAAF1\uAAF0\uABEB\uABEC\uABED\uABEE\uABEF\u1C7E\u1C7F'+r'])') -triv_tokenizer_urdu_pat=re.compile(r'(['+string.punctuation+r'\u0609\u060A\u060C\u061E\u066A\u066B\u066C\u066D\u06D4'+r'])') - -## date, numbers, section/article numbering -pat_num_seq=re.compile(r'([0-9]+ [,.:/] )+[0-9]+') - -def trivial_tokenize_indic(text): - """tokenize string for Indian language scripts using Brahmi-derived scripts - - A trivial tokenizer which just tokenizes on the punctuation boundaries. - This also includes punctuations for the Indian language scripts (the - purna virama and the deergha virama). This is a language independent - tokenizer - - Args: - text (str): text to tokenize - - Returns: - list: list of tokens - - """ - tok_str=triv_tokenizer_indic_pat.sub(r' \1 ',text.replace('\t',' ')) -# return re.sub(r'[ ]+',' ',tok_str).strip(' ').split(' ') - - s=re.sub(r'[ ]+',' ',tok_str).strip(' ') - - # do not tokenize numbers and dates - new_s='' - prev=0 - for m in pat_num_seq.finditer(s): - start=m.start() - end=m.end() - if start>prev: - new_s=new_s+s[prev:start] - new_s=new_s+s[start:end].replace(' ','') - prev=end - - new_s=new_s+s[prev:] - s=new_s - - return s.split(' ') - -def trivial_tokenize_urdu(text): - """tokenize Urdu string - - A trivial tokenizer which just tokenizes on the punctuation boundaries. - This also includes punctuations for the Urdu script. - These punctuations characters were identified from the Unicode database - for Arabic script by looking for punctuation symbols. - - Args: - text (str): text to tokenize - - Returns: - list: list of tokens - """ - tok_str=triv_tokenizer_urdu_pat.sub(r' \1 ',text.replace('\t',' ')) - return re.sub(r'[ ]+',' ',tok_str).strip(' ').split(' ') - # from urduhack.tokenization import word_tokenizer - # return word_tokenizer(text) - -def trivial_tokenize(text,lang='hi'): - """trivial tokenizer for Indian languages using Brahmi for Arabic scripts - - A trivial tokenizer which just tokenizes on the punctuation boundaries. - Major punctuations specific to Indian langauges are handled. - These punctuations characters were identified from the Unicode database. - - Args: - text (str): text to tokenize - lang (str): ISO 639-2 language code - - Returns: - list: list of tokens - """ - if lang=='ur': - return trivial_tokenize_urdu(text) - else: - return trivial_tokenize_indic(text) - -# if __name__ == '__main__': - -# if len(sys.argv)<4: -# print("Usage: python indic_tokenize.py ") -# sys.exit(1) - -# with open(sys.argv[1],'r', encoding='utf-8') as ifile: -# with open(sys.argv[2],'w', encoding='utf-8') as ofile: -# for line in ifile: -# tokenized_line=' '.join(trivial_tokenize(line,sys.argv[3])) -# ofile.write(tokenized_line) diff --git a/spaces/Lamai/LAMAIGPT/autogpt/json_utils/__init__.py b/spaces/Lamai/LAMAIGPT/autogpt/json_utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/LanguageBind/LanguageBind/v_cls/zero_shot.py b/spaces/LanguageBind/LanguageBind/v_cls/zero_shot.py deleted file mode 100644 index 18b9c1dca3ff3ec636a5486098c2e8fc6c5d669e..0000000000000000000000000000000000000000 --- a/spaces/LanguageBind/LanguageBind/v_cls/zero_shot.py +++ /dev/null @@ -1,109 +0,0 @@ -import logging -import os - -import torch -import torch.nn.functional as F -from tqdm import tqdm - -from open_clip import get_input_dtype, get_tokenizer -from open_clip.factory import HF_HUB_PREFIX -from training.distributed import is_master -from v_cls.zero_shot_classifier import build_zero_shot_classifier -from v_cls.zero_shot_metadata import OPENAI_IMAGENET_TEMPLATES, IMAGENET_CLASSNAMES - -from training.precision import get_autocast - - - - -def accuracy(output, target, topk=(1,)): - pred = output.topk(max(topk), 1, True, True)[1].t() - correct = pred.eq(target.view(1, -1).expand_as(pred)) - return [float(correct[:k].reshape(-1).float().sum(0, keepdim=True).cpu().numpy()) for k in topk] - - -def run(model, classifier, dataloader, args): - autocast = get_autocast(args.precision) - input_dtype = get_input_dtype(args.precision) - file = os.path.join(args.output_dir, str(args.rank) + '.txt') - final_result = [] - with torch.no_grad(): - top1, top5, n = 0., 0., 0. - for batch in tqdm(dataloader, unit_scale=args.batch_size): - images = batch[0] - target = batch[1] - ids = batch[2] - chunk_nb = batch[3] - split_nb = batch[4] - images = images.to(device=args.device, dtype=input_dtype) - target = target.to(args.device) - - with autocast(): - # predict - output = model(image=images) - image_features = output['image_features'] if isinstance(output, dict) else output[0] - logits = 100. * image_features @ classifier - output = logits - # print(output.shape) - for i in range(output.size(0)): - string = "{} {} {} {} {}\n".format( - ids[i], str(output.data[i].cpu().numpy().tolist()), - str(int(target[i].cpu().numpy())), - str(int(chunk_nb[i].cpu().numpy())), - str(int(split_nb[i].cpu().numpy()))) - final_result.append(string) - - # measure accuracy - acc1, acc5 = accuracy(logits, target, topk=(1, 5)) - top1 += acc1 - top5 += acc5 - n += images.size(0) - - top1 = (top1 / n) - top5 = (top5 / n) - - if not os.path.exists(file): - os.mknod(file) - with open(file, 'w') as f: - f.write("{}, {}\n".format(top1, top5)) - for line in final_result: - f.write(line) - - return top1, top5 - - -def zero_shot_eval(model, dataloader, epoch, args): - if args.zeroshot_frequency == 0: - return {} - if (epoch % args.zeroshot_frequency) != 0 and epoch != args.epochs: - return {} - if args.distributed and not args.horovod: - model = model.module - if is_master(args): - logging.info('Starting zero-shot kinetics400') - logging.info('Building zero-shot classifier') - autocast = get_autocast(args.precision) - with autocast(): - tokenizer = get_tokenizer(HF_HUB_PREFIX+args.model, cache_dir=args.cache_dir) - classifier = build_zero_shot_classifier( - model, - tokenizer=tokenizer, - classnames=IMAGENET_CLASSNAMES, - templates=OPENAI_IMAGENET_TEMPLATES, - num_classes_per_batch=10, - device=args.device, - use_tqdm=True, - ) - - - if is_master(args): - logging.info('Using classifier') - # results = {} - run(model, classifier, dataloader, args) - # results['kinetics400-zeroshot-val-top1'] = top1 - # results['kinetics400-zeroshot-val-top5'] = top5 - - if is_master(args): - logging.info('Finished zero-shot kinetics400') - - # return results diff --git a/spaces/Lianjd/stock_dashboard/backtrader/indicators/cci.py b/spaces/Lianjd/stock_dashboard/backtrader/indicators/cci.py deleted file mode 100644 index f47d6e204a425b2d961147391fc81452da8efe85..0000000000000000000000000000000000000000 --- a/spaces/Lianjd/stock_dashboard/backtrader/indicators/cci.py +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8; py-indent-offset:4 -*- -############################################################################### -# -# Copyright (C) 2015-2020 Daniel Rodriguez -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### -from __future__ import (absolute_import, division, print_function, - unicode_literals) - -from . import Indicator, Max, MovAv, MeanDev - - -class CommodityChannelIndex(Indicator): - ''' - Introduced by Donald Lambert in 1980 to measure variations of the - "typical price" (see below) from its mean to identify extremes and - reversals - - Formula: - - tp = typical_price = (high + low + close) / 3 - - tpmean = MovingAverage(tp, period) - - deviation = tp - tpmean - - meandev = MeanDeviation(tp) - - cci = deviation / (meandeviation * factor) - - See: - - https://en.wikipedia.org/wiki/Commodity_channel_index - ''' - alias = ('CCI',) - - lines = ('cci',) - - params = (('period', 20), - ('factor', 0.015), - ('movav', MovAv.Simple), - ('upperband', 100.0), - ('lowerband', -100.0),) - - def _plotlabel(self): - plabels = [self.p.period, self.p.factor] - plabels += [self.p.movav] * self.p.notdefault('movav') - return plabels - - def _plotinit(self): - self.plotinfo.plotyhlines = [0.0, self.p.upperband, self.p.lowerband] - - def __init__(self): - tp = (self.data.high + self.data.low + self.data.close) / 3.0 - tpmean = self.p.movav(tp, period=self.p.period) - - dev = tp - tpmean - meandev = MeanDev(tp, tpmean, period=self.p.period) - - self.lines.cci = dev / (self.p.factor * meandev) - - super(CommodityChannelIndex, self).__init__() diff --git a/spaces/LuxOAI/guanaco-playground-tgi/app.py b/spaces/LuxOAI/guanaco-playground-tgi/app.py deleted file mode 100644 index 071a157bf157a915100595498442576cf9a3cab8..0000000000000000000000000000000000000000 --- a/spaces/LuxOAI/guanaco-playground-tgi/app.py +++ /dev/null @@ -1,273 +0,0 @@ -import os - -import gradio as gr -from huggingface_hub import Repository -from text_generation import Client - -# from dialogues import DialogueTemplate -from share_btn import (community_icon_html, loading_icon_html, share_btn_css, - share_js) - -HF_TOKEN = os.environ.get("HF_TOKEN", None) -API_TOKEN = os.environ.get("API_TOKEN", None) -API_URL = os.environ.get("API_URL", None) -API_URL = "https://api-inference.huggingface.co/models/timdettmers/guanaco-33b-merged" - -client = Client( - API_URL, - headers={"Authorization": f"Bearer {API_TOKEN}"}, -) - -repo = None - - -def get_total_inputs(inputs, chatbot, preprompt, user_name, assistant_name, sep): - past = [] - for data in chatbot: - user_data, model_data = data - - if not user_data.startswith(user_name): - user_data = user_name + user_data - if not model_data.startswith(sep + assistant_name): - model_data = sep + assistant_name + model_data - - past.append(user_data + model_data.rstrip() + sep) - - if not inputs.startswith(user_name): - inputs = user_name + inputs - - total_inputs = preprompt + "".join(past) + inputs + sep + assistant_name.rstrip() - - return total_inputs - - -def has_no_history(chatbot, history): - return not chatbot and not history - - -header = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions." -prompt_template = "### Human: {query}\n### Assistant:{response}" - -def generate( - user_message, - chatbot, - history, - temperature, - top_p, - max_new_tokens, - repetition_penalty, -): - # Don't return meaningless message when the input is empty - if not user_message: - print("Empty input") - - history.append(user_message) - - past_messages = [] - for data in chatbot: - user_data, model_data = data - - past_messages.extend( - [{"role": "user", "content": user_data}, {"role": "assistant", "content": model_data.rstrip()}] - ) - - if len(past_messages) < 1: - prompt = header + prompt_template.format(query=user_message, response="") - else: - prompt = header - for i in range(0, len(past_messages), 2): - intermediate_prompt = prompt_template.format(query=past_messages[i]["content"], response=past_messages[i+1]["content"]) - print("intermediate: ", intermediate_prompt) - prompt = prompt + '\n' + intermediate_prompt - - prompt = prompt + prompt_template.format(query=user_message, response="") - - - generate_kwargs = { - "temperature": temperature, - "top_p": top_p, - "max_new_tokens": max_new_tokens, - } - - temperature = float(temperature) - if temperature < 1e-2: - temperature = 1e-2 - top_p = float(top_p) - - generate_kwargs = dict( - temperature=temperature, - max_new_tokens=max_new_tokens, - top_p=top_p, - repetition_penalty=repetition_penalty, - do_sample=True, - truncate=999, - seed=42, - ) - - stream = client.generate_stream( - prompt, - **generate_kwargs, - ) - - output = "" - for idx, response in enumerate(stream): - if response.token.text == '': - break - - if response.token.special: - continue - output += response.token.text - if idx == 0: - history.append(" " + output) - else: - history[-1] = output - - chat = [(history[i].strip(), history[i + 1].strip()) for i in range(0, len(history) - 1, 2)] - - yield chat, history, user_message, "" - - return chat, history, user_message, "" - - -examples = [ - "A Llama entered in my garden, what should I do?" -] - - -def clear_chat(): - return [], [] - - -def process_example(args): - for [x, y] in generate(args): - pass - return [x, y] - - -title = """

    Guanaco Playground 💬

    """ -custom_css = """ -#banner-image { - display: block; - margin-left: auto; - margin-right: auto; -} -#chat-message { - font-size: 14px; - min-height: 300px; -} -""" - -with gr.Blocks(analytics_enabled=False, css=custom_css) as demo: - gr.HTML(title) - - with gr.Row(): - with gr.Column(): - gr.Markdown( - """ - 💻 This demo showcases the Guanaco 33B model, released together with the paper [QLoRA](https://arxiv.org/abs/2305.14314) - """ - ) - - with gr.Row(): - with gr.Box(): - output = gr.Markdown() - chatbot = gr.Chatbot(elem_id="chat-message", label="Chat") - - with gr.Row(): - with gr.Column(scale=3): - user_message = gr.Textbox(placeholder="Enter your message here", show_label=False, elem_id="q-input") - with gr.Row(): - send_button = gr.Button("Send", elem_id="send-btn", visible=True) - - clear_chat_button = gr.Button("Clear chat", elem_id="clear-btn", visible=True) - - with gr.Accordion(label="Parameters", open=False, elem_id="parameters-accordion"): - temperature = gr.Slider( - label="Temperature", - value=0.7, - minimum=0.0, - maximum=1.0, - step=0.1, - interactive=True, - info="Higher values produce more diverse outputs", - ) - top_p = gr.Slider( - label="Top-p (nucleus sampling)", - value=0.9, - minimum=0.0, - maximum=1, - step=0.05, - interactive=True, - info="Higher values sample more low-probability tokens", - ) - max_new_tokens = gr.Slider( - label="Max new tokens", - value=1024, - minimum=0, - maximum=2048, - step=4, - interactive=True, - info="The maximum numbers of new tokens", - ) - repetition_penalty = gr.Slider( - label="Repetition Penalty", - value=1.2, - minimum=0.0, - maximum=10, - step=0.1, - interactive=True, - info="The parameter for repetition penalty. 1.0 means no penalty.", - ) - with gr.Row(): - gr.Examples( - examples=examples, - inputs=[user_message], - cache_examples=False, - fn=process_example, - outputs=[output], - ) - - with gr.Row(): - gr.Markdown( - "Disclaimer: The model can produce factually incorrect output, and should not be relied on to produce " - "factually accurate information. The model was trained on various public datasets; while great efforts " - "have been taken to clean the pretraining data, it is possible that this model could generate lewd, " - "biased, or otherwise offensive outputs.", - elem_classes=["disclaimer"], - ) - - - history = gr.State([]) - last_user_message = gr.State("") - - user_message.submit( - generate, - inputs=[ - user_message, - chatbot, - history, - temperature, - top_p, - max_new_tokens, - repetition_penalty, - ], - outputs=[chatbot, history, last_user_message, user_message], - ) - - send_button.click( - generate, - inputs=[ - user_message, - chatbot, - history, - temperature, - top_p, - max_new_tokens, - repetition_penalty, - ], - outputs=[chatbot, history, last_user_message, user_message], - ) - - clear_chat_button.click(clear_chat, outputs=[chatbot, history]) - -demo.queue(concurrency_count=16).launch(debug=True) diff --git a/spaces/MCkernick/Image_Restoration_Colorization/Global/data/online_dataset_for_old_photos.py b/spaces/MCkernick/Image_Restoration_Colorization/Global/data/online_dataset_for_old_photos.py deleted file mode 100644 index 068410a93eb10d5f00e694fd890f8aaa069526a3..0000000000000000000000000000000000000000 --- a/spaces/MCkernick/Image_Restoration_Colorization/Global/data/online_dataset_for_old_photos.py +++ /dev/null @@ -1,485 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import os.path -import io -import zipfile -from data.base_dataset import BaseDataset, get_params, get_transform, normalize -from data.image_folder import make_dataset -from PIL import Image -import torchvision.transforms as transforms -import numpy as np -from data.Load_Bigfile import BigFileMemoryLoader -import random -import cv2 -from io import BytesIO - -def pil_to_np(img_PIL): - '''Converts image in PIL format to np.array. - - From W x H x C [0...255] to C x W x H [0..1] - ''' - ar = np.array(img_PIL) - - if len(ar.shape) == 3: - ar = ar.transpose(2, 0, 1) - else: - ar = ar[None, ...] - - return ar.astype(np.float32) / 255. - - -def np_to_pil(img_np): - '''Converts image in np.array format to PIL image. - - From C x W x H [0..1] to W x H x C [0...255] - ''' - ar = np.clip(img_np * 255, 0, 255).astype(np.uint8) - - if img_np.shape[0] == 1: - ar = ar[0] - else: - ar = ar.transpose(1, 2, 0) - - return Image.fromarray(ar) - -def synthesize_salt_pepper(image,amount,salt_vs_pepper): - - ## Give PIL, return the noisy PIL - - img_pil=pil_to_np(image) - - out = img_pil.copy() - p = amount - q = salt_vs_pepper - flipped = np.random.choice([True, False], size=img_pil.shape, - p=[p, 1 - p]) - salted = np.random.choice([True, False], size=img_pil.shape, - p=[q, 1 - q]) - peppered = ~salted - out[flipped & salted] = 1 - out[flipped & peppered] = 0. - noisy = np.clip(out, 0, 1).astype(np.float32) - - - return np_to_pil(noisy) - -def synthesize_gaussian(image,std_l,std_r): - - ## Give PIL, return the noisy PIL - - img_pil=pil_to_np(image) - - mean=0 - std=random.uniform(std_l/255.,std_r/255.) - gauss=np.random.normal(loc=mean,scale=std,size=img_pil.shape) - noisy=img_pil+gauss - noisy=np.clip(noisy,0,1).astype(np.float32) - - return np_to_pil(noisy) - -def synthesize_speckle(image,std_l,std_r): - - ## Give PIL, return the noisy PIL - - img_pil=pil_to_np(image) - - mean=0 - std=random.uniform(std_l/255.,std_r/255.) - gauss=np.random.normal(loc=mean,scale=std,size=img_pil.shape) - noisy=img_pil+gauss*img_pil - noisy=np.clip(noisy,0,1).astype(np.float32) - - return np_to_pil(noisy) - - -def synthesize_low_resolution(img): - w,h=img.size - - new_w=random.randint(int(w/2),w) - new_h=random.randint(int(h/2),h) - - img=img.resize((new_w,new_h),Image.BICUBIC) - - if random.uniform(0,1)<0.5: - img=img.resize((w,h),Image.NEAREST) - else: - img = img.resize((w, h), Image.BILINEAR) - - return img - - -def convertToJpeg(im,quality): - with BytesIO() as f: - im.save(f, format='JPEG',quality=quality) - f.seek(0) - return Image.open(f).convert('RGB') - - -def blur_image_v2(img): - - - x=np.array(img) - kernel_size_candidate=[(3,3),(5,5),(7,7)] - kernel_size=random.sample(kernel_size_candidate,1)[0] - std=random.uniform(1.,5.) - - #print("The gaussian kernel size: (%d,%d) std: %.2f"%(kernel_size[0],kernel_size[1],std)) - blur=cv2.GaussianBlur(x,kernel_size,std) - - return Image.fromarray(blur.astype(np.uint8)) - -def online_add_degradation_v2(img): - - task_id=np.random.permutation(4) - - for x in task_id: - if x==0 and random.uniform(0,1)<0.7: - img = blur_image_v2(img) - if x==1 and random.uniform(0,1)<0.7: - flag = random.choice([1, 2, 3]) - if flag == 1: - img = synthesize_gaussian(img, 5, 50) - if flag == 2: - img = synthesize_speckle(img, 5, 50) - if flag == 3: - img = synthesize_salt_pepper(img, random.uniform(0, 0.01), random.uniform(0.3, 0.8)) - if x==2 and random.uniform(0,1)<0.7: - img=synthesize_low_resolution(img) - - if x==3 and random.uniform(0,1)<0.7: - img=convertToJpeg(img,random.randint(40,100)) - - return img - - -def irregular_hole_synthesize(img,mask): - - img_np=np.array(img).astype('uint8') - mask_np=np.array(mask).astype('uint8') - mask_np=mask_np/255 - img_new=img_np*(1-mask_np)+mask_np*255 - - - hole_img=Image.fromarray(img_new.astype('uint8')).convert("RGB") - - return hole_img,mask.convert("L") - -def zero_mask(size): - x=np.zeros((size,size,3)).astype('uint8') - mask=Image.fromarray(x).convert("RGB") - return mask - - - -class UnPairOldPhotos_SR(BaseDataset): ## Synthetic + Real Old - def initialize(self, opt): - self.opt = opt - self.isImage = 'domainA' in opt.name - self.task = 'old_photo_restoration_training_vae' - self.dir_AB = opt.dataroot - if self.isImage: - - self.load_img_dir_L_old=os.path.join(self.dir_AB,"Real_L_old.bigfile") - self.load_img_dir_RGB_old=os.path.join(self.dir_AB,"Real_RGB_old.bigfile") - self.load_img_dir_clean=os.path.join(self.dir_AB,"VOC_RGB_JPEGImages.bigfile") - - self.loaded_imgs_L_old=BigFileMemoryLoader(self.load_img_dir_L_old) - self.loaded_imgs_RGB_old=BigFileMemoryLoader(self.load_img_dir_RGB_old) - self.loaded_imgs_clean=BigFileMemoryLoader(self.load_img_dir_clean) - - else: - # self.load_img_dir_clean=os.path.join(self.dir_AB,self.opt.test_dataset) - self.load_img_dir_clean=os.path.join(self.dir_AB,"VOC_RGB_JPEGImages.bigfile") - self.loaded_imgs_clean=BigFileMemoryLoader(self.load_img_dir_clean) - - #### - print("-------------Filter the imgs whose size <256 in VOC-------------") - self.filtered_imgs_clean=[] - for i in range(len(self.loaded_imgs_clean)): - img_name,img=self.loaded_imgs_clean[i] - h,w=img.size - if h<256 or w<256: - continue - self.filtered_imgs_clean.append((img_name,img)) - - print("--------Origin image num is [%d], filtered result is [%d]--------" % ( - len(self.loaded_imgs_clean), len(self.filtered_imgs_clean))) - ## Filter these images whose size is less than 256 - - # self.img_list=os.listdir(load_img_dir) - self.pid = os.getpid() - - def __getitem__(self, index): - - - is_real_old=0 - - sampled_dataset=None - degradation=None - if self.isImage: ## domain A , contains 2 kinds of data: synthetic + real_old - P=random.uniform(0,2) - if P>=0 and P<1: - if random.uniform(0,1)<0.5: - sampled_dataset=self.loaded_imgs_L_old - self.load_img_dir=self.load_img_dir_L_old - else: - sampled_dataset=self.loaded_imgs_RGB_old - self.load_img_dir=self.load_img_dir_RGB_old - is_real_old=1 - if P>=1 and P<2: - sampled_dataset=self.filtered_imgs_clean - self.load_img_dir=self.load_img_dir_clean - degradation=1 - else: - - sampled_dataset=self.filtered_imgs_clean - self.load_img_dir=self.load_img_dir_clean - - sampled_dataset_len=len(sampled_dataset) - - index=random.randint(0,sampled_dataset_len-1) - - img_name,img = sampled_dataset[index] - - if degradation is not None: - img=online_add_degradation_v2(img) - - path=os.path.join(self.load_img_dir,img_name) - - # AB = Image.open(path).convert('RGB') - # split AB image into A and B - - # apply the same transform to both A and B - - if random.uniform(0,1) <0.1: - img=img.convert("L") - img=img.convert("RGB") - ## Give a probability P, we convert the RGB image into L - - - A=img - w,h=A.size - if w<256 or h<256: - A=transforms.Scale(256,Image.BICUBIC)(A) - ## Since we want to only crop the images (256*256), for those old photos whose size is smaller than 256, we first resize them. - - transform_params = get_params(self.opt, A.size) - A_transform = get_transform(self.opt, transform_params) - - B_tensor = inst_tensor = feat_tensor = 0 - A_tensor = A_transform(A) - - - input_dict = {'label': A_tensor, 'inst': is_real_old, 'image': A_tensor, - 'feat': feat_tensor, 'path': path} - return input_dict - - def __len__(self): - return len(self.loaded_imgs_clean) ## actually, this is useless, since the selected index is just a random number - - def name(self): - return 'UnPairOldPhotos_SR' - - -class PairOldPhotos(BaseDataset): - def initialize(self, opt): - self.opt = opt - self.isImage = 'imagegan' in opt.name - self.task = 'old_photo_restoration_training_mapping' - self.dir_AB = opt.dataroot - if opt.isTrain: - self.load_img_dir_clean= os.path.join(self.dir_AB, "VOC_RGB_JPEGImages.bigfile") - self.loaded_imgs_clean = BigFileMemoryLoader(self.load_img_dir_clean) - - print("-------------Filter the imgs whose size <256 in VOC-------------") - self.filtered_imgs_clean = [] - for i in range(len(self.loaded_imgs_clean)): - img_name, img = self.loaded_imgs_clean[i] - h, w = img.size - if h < 256 or w < 256: - continue - self.filtered_imgs_clean.append((img_name, img)) - - print("--------Origin image num is [%d], filtered result is [%d]--------" % ( - len(self.loaded_imgs_clean), len(self.filtered_imgs_clean))) - - else: - self.load_img_dir=os.path.join(self.dir_AB,opt.test_dataset) - self.loaded_imgs=BigFileMemoryLoader(self.load_img_dir) - - self.pid = os.getpid() - - def __getitem__(self, index): - - - - if self.opt.isTrain: - img_name_clean,B = self.filtered_imgs_clean[index] - path = os.path.join(self.load_img_dir_clean, img_name_clean) - if self.opt.use_v2_degradation: - A=online_add_degradation_v2(B) - ### Remind: A is the input and B is corresponding GT - else: - - if self.opt.test_on_synthetic: - - img_name_B,B=self.loaded_imgs[index] - A=online_add_degradation_v2(B) - img_name_A=img_name_B - path = os.path.join(self.load_img_dir, img_name_A) - else: - img_name_A,A=self.loaded_imgs[index] - img_name_B,B=self.loaded_imgs[index] - path = os.path.join(self.load_img_dir, img_name_A) - - - if random.uniform(0,1)<0.1 and self.opt.isTrain: - A=A.convert("L") - B=B.convert("L") - A=A.convert("RGB") - B=B.convert("RGB") - ## In P, we convert the RGB into L - - - ##test on L - - # split AB image into A and B - # w, h = img.size - # w2 = int(w / 2) - # A = img.crop((0, 0, w2, h)) - # B = img.crop((w2, 0, w, h)) - w,h=A.size - if w<256 or h<256: - A=transforms.Scale(256,Image.BICUBIC)(A) - B=transforms.Scale(256, Image.BICUBIC)(B) - - # apply the same transform to both A and B - transform_params = get_params(self.opt, A.size) - A_transform = get_transform(self.opt, transform_params) - B_transform = get_transform(self.opt, transform_params) - - B_tensor = inst_tensor = feat_tensor = 0 - A_tensor = A_transform(A) - B_tensor = B_transform(B) - - input_dict = {'label': A_tensor, 'inst': inst_tensor, 'image': B_tensor, - 'feat': feat_tensor, 'path': path} - return input_dict - - def __len__(self): - - if self.opt.isTrain: - return len(self.filtered_imgs_clean) - else: - return len(self.loaded_imgs) - - def name(self): - return 'PairOldPhotos' - - -class PairOldPhotos_with_hole(BaseDataset): - def initialize(self, opt): - self.opt = opt - self.isImage = 'imagegan' in opt.name - self.task = 'old_photo_restoration_training_mapping' - self.dir_AB = opt.dataroot - if opt.isTrain: - self.load_img_dir_clean= os.path.join(self.dir_AB, "VOC_RGB_JPEGImages.bigfile") - self.loaded_imgs_clean = BigFileMemoryLoader(self.load_img_dir_clean) - - print("-------------Filter the imgs whose size <256 in VOC-------------") - self.filtered_imgs_clean = [] - for i in range(len(self.loaded_imgs_clean)): - img_name, img = self.loaded_imgs_clean[i] - h, w = img.size - if h < 256 or w < 256: - continue - self.filtered_imgs_clean.append((img_name, img)) - - print("--------Origin image num is [%d], filtered result is [%d]--------" % ( - len(self.loaded_imgs_clean), len(self.filtered_imgs_clean))) - - else: - self.load_img_dir=os.path.join(self.dir_AB,opt.test_dataset) - self.loaded_imgs=BigFileMemoryLoader(self.load_img_dir) - - self.loaded_masks = BigFileMemoryLoader(opt.irregular_mask) - - self.pid = os.getpid() - - def __getitem__(self, index): - - - - if self.opt.isTrain: - img_name_clean,B = self.filtered_imgs_clean[index] - path = os.path.join(self.load_img_dir_clean, img_name_clean) - - - B=transforms.RandomCrop(256)(B) - A=online_add_degradation_v2(B) - ### Remind: A is the input and B is corresponding GT - - else: - img_name_A,A=self.loaded_imgs[index] - img_name_B,B=self.loaded_imgs[index] - path = os.path.join(self.load_img_dir, img_name_A) - - #A=A.resize((256,256)) - A=transforms.CenterCrop(256)(A) - B=A - - if random.uniform(0,1)<0.1 and self.opt.isTrain: - A=A.convert("L") - B=B.convert("L") - A=A.convert("RGB") - B=B.convert("RGB") - ## In P, we convert the RGB into L - - if self.opt.isTrain: - mask_name,mask=self.loaded_masks[random.randint(0,len(self.loaded_masks)-1)] - else: - mask_name, mask = self.loaded_masks[index%100] - mask = mask.resize((self.opt.loadSize, self.opt.loadSize), Image.NEAREST) - - if self.opt.random_hole and random.uniform(0,1)>0.5 and self.opt.isTrain: - mask=zero_mask(256) - - if self.opt.no_hole: - mask=zero_mask(256) - - - A,_=irregular_hole_synthesize(A,mask) - - if not self.opt.isTrain and self.opt.hole_image_no_mask: - mask=zero_mask(256) - - transform_params = get_params(self.opt, A.size) - A_transform = get_transform(self.opt, transform_params) - B_transform = get_transform(self.opt, transform_params) - - if transform_params['flip'] and self.opt.isTrain: - mask=mask.transpose(Image.FLIP_LEFT_RIGHT) - - mask_tensor = transforms.ToTensor()(mask) - - - B_tensor = inst_tensor = feat_tensor = 0 - A_tensor = A_transform(A) - B_tensor = B_transform(B) - - input_dict = {'label': A_tensor, 'inst': mask_tensor[:1], 'image': B_tensor, - 'feat': feat_tensor, 'path': path} - return input_dict - - def __len__(self): - - if self.opt.isTrain: - return len(self.filtered_imgs_clean) - - else: - return len(self.loaded_imgs) - - def name(self): - return 'PairOldPhotos_with_hole' \ No newline at end of file diff --git a/spaces/MLVKU/Human_Object_Interaction/app.py b/spaces/MLVKU/Human_Object_Interaction/app.py deleted file mode 100644 index d6b95b007f700473eaebb2d50dfa112e6b022aa4..0000000000000000000000000000000000000000 --- a/spaces/MLVKU/Human_Object_Interaction/app.py +++ /dev/null @@ -1,17 +0,0 @@ -import gradio as gr -from visualization import visualization -# pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog") -# pipeline = pipeline(task="image-classification", model="jhp/hoi") - -def predict(image,threshold,topk,device=''): - vis_img = visualization(image,threshold,topk) - return vis_img - -gr.Interface( - predict, - inputs=[gr.Image(type='pil',label="input image"), - gr.Slider(0, 1, value=0.4, label="Threshold", info="Set detection score threshold between 0~1"), - gr.Number(value=5,label='Topk',info='Topk prediction')], - outputs= gr.Image(type="pil", label="hoi detection results"), - title="HOI detection", -).launch(debug=True,enable_queue=True) \ No newline at end of file diff --git a/spaces/Mahiruoshi/BangDream-Bert-VITS2/train_ms.py b/spaces/Mahiruoshi/BangDream-Bert-VITS2/train_ms.py deleted file mode 100644 index 1f1708d8ef1f4e820b608234a60744a200a644cd..0000000000000000000000000000000000000000 --- a/spaces/Mahiruoshi/BangDream-Bert-VITS2/train_ms.py +++ /dev/null @@ -1,594 +0,0 @@ -# flake8: noqa: E402 - -import os -import torch -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter -import torch.distributed as dist -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.cuda.amp import autocast, GradScaler -from tqdm import tqdm -import logging - -logging.getLogger("numba").setLevel(logging.WARNING) -import commons -import utils -from data_utils import ( - TextAudioSpeakerLoader, - TextAudioSpeakerCollate, - DistributedBucketSampler, -) -from models import ( - SynthesizerTrn, - MultiPeriodDiscriminator, - DurationDiscriminator, -) -from losses import generator_loss, discriminator_loss, feature_loss, kl_loss -from mel_processing import mel_spectrogram_torch, spec_to_mel_torch -from text.symbols import symbols - -torch.backends.cuda.matmul.allow_tf32 = True -torch.backends.cudnn.allow_tf32 = ( - True # If encontered training problem,please try to disable TF32. -) -torch.set_float32_matmul_precision("medium") -torch.backends.cudnn.benchmark = True -torch.backends.cuda.sdp_kernel("flash") -torch.backends.cuda.enable_flash_sdp(True) -torch.backends.cuda.enable_mem_efficient_sdp( - True -) # Not available if torch version is lower than 2.0 -torch.backends.cuda.enable_math_sdp(True) -global_step = 0 - - -def run(): - dist.init_process_group( - backend="gloo", - init_method="env://", # Due to some training problem,we proposed to use gloo instead of nccl. - ) # Use torchrun instead of mp.spawn - rank = dist.get_rank() - n_gpus = dist.get_world_size() - hps = utils.get_hparams() - torch.manual_seed(hps.train.seed) - torch.cuda.set_device(rank) - global global_step - if rank == 0: - logger = utils.get_logger(hps.model_dir) - logger.info(hps) - utils.check_git_hash(hps.model_dir) - writer = SummaryWriter(log_dir=hps.model_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) - train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) - train_sampler = DistributedBucketSampler( - train_dataset, - hps.train.batch_size, - [32, 300, 400, 500, 600, 700, 800, 900, 1000], - num_replicas=n_gpus, - rank=rank, - shuffle=True, - ) - collate_fn = TextAudioSpeakerCollate() - train_loader = DataLoader( - train_dataset, - num_workers=16, - shuffle=False, - pin_memory=True, - collate_fn=collate_fn, - batch_sampler=train_sampler, - persistent_workers=True, - prefetch_factor=4, - ) # DataLoader config could be adjusted. - if rank == 0: - eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) - eval_loader = DataLoader( - eval_dataset, - num_workers=0, - shuffle=False, - batch_size=1, - pin_memory=True, - drop_last=False, - collate_fn=collate_fn, - ) - if ( - "use_noise_scaled_mas" in hps.model.keys() - and hps.model.use_noise_scaled_mas is True - ): - print("Using noise scaled MAS for VITS2") - mas_noise_scale_initial = 0.01 - noise_scale_delta = 2e-6 - else: - print("Using normal MAS for VITS1") - mas_noise_scale_initial = 0.0 - noise_scale_delta = 0.0 - if ( - "use_duration_discriminator" in hps.model.keys() - and hps.model.use_duration_discriminator is True - ): - print("Using duration discriminator for VITS2") - net_dur_disc = DurationDiscriminator( - hps.model.hidden_channels, - hps.model.hidden_channels, - 3, - 0.1, - gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, - ).cuda(rank) - if ( - "use_spk_conditioned_encoder" in hps.model.keys() - and hps.model.use_spk_conditioned_encoder is True - ): - if hps.data.n_speakers == 0: - raise ValueError( - "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" - ) - else: - print("Using normal encoder for VITS1") - - net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - mas_noise_scale_initial=mas_noise_scale_initial, - noise_scale_delta=noise_scale_delta, - **hps.model, - ).cuda(rank) - - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) - optim_g = torch.optim.AdamW( - filter(lambda p: p.requires_grad, net_g.parameters()), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps, - ) - optim_d = torch.optim.AdamW( - net_d.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps, - ) - if net_dur_disc is not None: - optim_dur_disc = torch.optim.AdamW( - net_dur_disc.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps, - ) - else: - optim_dur_disc = None - net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) - net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) - if net_dur_disc is not None: - net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True) - try: - if net_dur_disc is not None: - _, _, dur_resume_lr, epoch_str = utils.load_checkpoint( - utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), - net_dur_disc, - optim_dur_disc, - skip_optimizer=hps.train.skip_optimizer - if "skip_optimizer" in hps.train - else True, - ) - _, optim_g, g_resume_lr, epoch_str = utils.load_checkpoint( - utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), - net_g, - optim_g, - skip_optimizer=hps.train.skip_optimizer - if "skip_optimizer" in hps.train - else True, - ) - _, optim_d, d_resume_lr, epoch_str = utils.load_checkpoint( - utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), - net_d, - optim_d, - skip_optimizer=hps.train.skip_optimizer - if "skip_optimizer" in hps.train - else True, - ) - if not optim_g.param_groups[0].get("initial_lr"): - optim_g.param_groups[0]["initial_lr"] = g_resume_lr - if not optim_d.param_groups[0].get("initial_lr"): - optim_d.param_groups[0]["initial_lr"] = d_resume_lr - if not optim_dur_disc.param_groups[0].get("initial_lr"): - optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr - - epoch_str = max(epoch_str, 1) - global_step = (epoch_str - 1) * len(train_loader) - except Exception as e: - print(e) - epoch_str = 1 - global_step = 0 - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR( - optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 - ) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR( - optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 - ) - if net_dur_disc is not None: - if not optim_dur_disc.param_groups[0].get("initial_lr"): - optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr - scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR( - optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 - ) - else: - scheduler_dur_disc = None - scaler = GradScaler(enabled=hps.train.fp16_run) - - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank == 0: - train_and_evaluate( - rank, - epoch, - hps, - [net_g, net_d, net_dur_disc], - [optim_g, optim_d, optim_dur_disc], - [scheduler_g, scheduler_d, scheduler_dur_disc], - scaler, - [train_loader, eval_loader], - logger, - [writer, writer_eval], - ) - else: - train_and_evaluate( - rank, - epoch, - hps, - [net_g, net_d, net_dur_disc], - [optim_g, optim_d, optim_dur_disc], - [scheduler_g, scheduler_d, scheduler_dur_disc], - scaler, - [train_loader, None], - None, - None, - ) - scheduler_g.step() - scheduler_d.step() - if net_dur_disc is not None: - scheduler_dur_disc.step() - - -def train_and_evaluate( - rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers -): - net_g, net_d, net_dur_disc = nets - optim_g, optim_d, optim_dur_disc = optims - scheduler_g, scheduler_d, scheduler_dur_disc = schedulers - train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers - - train_loader.batch_sampler.set_epoch(epoch) - global global_step - - net_g.train() - net_d.train() - if net_dur_disc is not None: - net_dur_disc.train() - for batch_idx, ( - x, - x_lengths, - spec, - spec_lengths, - y, - y_lengths, - speakers, - tone, - language, - bert, - ja_bert, - ) in tqdm(enumerate(train_loader)): - if net_g.module.use_noise_scaled_mas: - current_mas_noise_scale = ( - net_g.module.mas_noise_scale_initial - - net_g.module.noise_scale_delta * global_step - ) - net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) - x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda( - rank, non_blocking=True - ) - spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda( - rank, non_blocking=True - ) - y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda( - rank, non_blocking=True - ) - speakers = speakers.cuda(rank, non_blocking=True) - tone = tone.cuda(rank, non_blocking=True) - language = language.cuda(rank, non_blocking=True) - bert = bert.cuda(rank, non_blocking=True) - ja_bert = ja_bert.cuda(rank, non_blocking=True) - - with autocast(enabled=hps.train.fp16_run): - ( - y_hat, - l_length, - attn, - ids_slice, - x_mask, - z_mask, - (z, z_p, m_p, logs_p, m_q, logs_q), - (hidden_x, logw, logw_), - ) = net_g( - x, - x_lengths, - spec, - spec_lengths, - speakers, - tone, - language, - bert, - ja_bert, - ) - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax, - ) - y_mel = commons.slice_segments( - mel, ids_slice, hps.train.segment_size // hps.data.hop_length - ) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax, - ) - - y = commons.slice_segments( - y, ids_slice * hps.data.hop_length, hps.train.segment_size - ) # slice - - # Discriminator - y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss( - y_d_hat_r, y_d_hat_g - ) - loss_disc_all = loss_disc - if net_dur_disc is not None: - y_dur_hat_r, y_dur_hat_g = net_dur_disc( - hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach() - ) - with autocast(enabled=False): - # TODO: I think need to mean using the mask, but for now, just mean all - ( - loss_dur_disc, - losses_dur_disc_r, - losses_dur_disc_g, - ) = discriminator_loss(y_dur_hat_r, y_dur_hat_g) - loss_dur_disc_all = loss_dur_disc - optim_dur_disc.zero_grad() - scaler.scale(loss_dur_disc_all).backward() - scaler.unscale_(optim_dur_disc) - commons.clip_grad_value_(net_dur_disc.parameters(), None) - scaler.step(optim_dur_disc) - - optim_d.zero_grad() - scaler.scale(loss_disc_all).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) - scaler.step(optim_d) - - with autocast(enabled=hps.train.fp16_run): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) - if net_dur_disc is not None: - y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) - with autocast(enabled=False): - loss_dur = torch.sum(l_length.float()) - loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl - if net_dur_disc is not None: - loss_dur_gen, losses_dur_gen = generator_loss(y_dur_hat_g) - loss_gen_all += loss_dur_gen - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank == 0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]["lr"] - losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl] - logger.info( - "Train Epoch: {} [{:.0f}%]".format( - epoch, 100.0 * batch_idx / len(train_loader) - ) - ) - logger.info([x.item() for x in losses] + [global_step, lr]) - - scalar_dict = { - "loss/g/total": loss_gen_all, - "loss/d/total": loss_disc_all, - "learning_rate": lr, - "grad_norm_d": grad_norm_d, - "grad_norm_g": grad_norm_g, - } - scalar_dict.update( - { - "loss/g/fm": loss_fm, - "loss/g/mel": loss_mel, - "loss/g/dur": loss_dur, - "loss/g/kl": loss_kl, - } - ) - scalar_dict.update( - {"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)} - ) - scalar_dict.update( - {"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)} - ) - scalar_dict.update( - {"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)} - ) - - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy( - y_mel[0].data.cpu().numpy() - ), - "slice/mel_gen": utils.plot_spectrogram_to_numpy( - y_hat_mel[0].data.cpu().numpy() - ), - "all/mel": utils.plot_spectrogram_to_numpy( - mel[0].data.cpu().numpy() - ), - "all/attn": utils.plot_alignment_to_numpy( - attn[0, 0].data.cpu().numpy() - ), - } - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict, - ) - - if global_step % hps.train.eval_interval == 0: - evaluate(hps, net_g, eval_loader, writer_eval) - utils.save_checkpoint( - net_g, - optim_g, - hps.train.learning_rate, - epoch, - os.path.join(hps.model_dir, "G_{}.pth".format(global_step)), - ) - utils.save_checkpoint( - net_d, - optim_d, - hps.train.learning_rate, - epoch, - os.path.join(hps.model_dir, "D_{}.pth".format(global_step)), - ) - if net_dur_disc is not None: - utils.save_checkpoint( - net_dur_disc, - optim_dur_disc, - hps.train.learning_rate, - epoch, - os.path.join(hps.model_dir, "DUR_{}.pth".format(global_step)), - ) - keep_ckpts = getattr(hps.train, "keep_ckpts", 5) - if keep_ckpts > 0: - utils.clean_checkpoints( - path_to_models=hps.model_dir, - n_ckpts_to_keep=keep_ckpts, - sort_by_time=True, - ) - - global_step += 1 - - if rank == 0: - logger.info("====> Epoch: {}".format(epoch)) - - -def evaluate(hps, generator, eval_loader, writer_eval): - generator.eval() - image_dict = {} - audio_dict = {} - print("Evaluating ...") - with torch.no_grad(): - for batch_idx, ( - x, - x_lengths, - spec, - spec_lengths, - y, - y_lengths, - speakers, - tone, - language, - bert, - ja_bert, - ) in enumerate(eval_loader): - x, x_lengths = x.cuda(), x_lengths.cuda() - spec, spec_lengths = spec.cuda(), spec_lengths.cuda() - y, y_lengths = y.cuda(), y_lengths.cuda() - speakers = speakers.cuda() - bert = bert.cuda() - ja_bert = ja_bert.cuda() - tone = tone.cuda() - language = language.cuda() - for use_sdp in [True, False]: - y_hat, attn, mask, *_ = generator.module.infer( - x, - x_lengths, - speakers, - tone, - language, - bert, - ja_bert, - y=spec, - max_len=1000, - sdp_ratio=0.0 if not use_sdp else 1.0, - ) - y_hat_lengths = mask.sum([1, 2]).long() * hps.data.hop_length - - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax, - ) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1).float(), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax, - ) - image_dict.update( - { - f"gen/mel_{batch_idx}": utils.plot_spectrogram_to_numpy( - y_hat_mel[0].cpu().numpy() - ) - } - ) - audio_dict.update( - { - f"gen/audio_{batch_idx}_{use_sdp}": y_hat[ - 0, :, : y_hat_lengths[0] - ] - } - ) - image_dict.update( - { - f"gt/mel_{batch_idx}": utils.plot_spectrogram_to_numpy( - mel[0].cpu().numpy() - ) - } - ) - audio_dict.update({f"gt/audio_{batch_idx}": y[0, :, : y_lengths[0]]}) - - utils.summarize( - writer=writer_eval, - global_step=global_step, - images=image_dict, - audios=audio_dict, - audio_sampling_rate=hps.data.sampling_rate, - ) - generator.train() - - -if __name__ == "__main__": - run() diff --git a/spaces/Mahiruoshi/vits-chatbot/attentions.py b/spaces/Mahiruoshi/vits-chatbot/attentions.py deleted file mode 100644 index 86bc73b5fe98cc7b443e9078553920346c996707..0000000000000000000000000000000000000000 --- a/spaces/Mahiruoshi/vits-chatbot/attentions.py +++ /dev/null @@ -1,300 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/March07/PromptBench/adv_prompts/t5_zeroshot.md b/spaces/March07/PromptBench/adv_prompts/t5_zeroshot.md deleted file mode 100644 index 6975c5b0bf9a163eb8fe67aabde47e90098bf490..0000000000000000000000000000000000000000 --- a/spaces/March07/PromptBench/adv_prompts/t5_zeroshot.md +++ /dev/null @@ -1,3190 +0,0 @@ -# t5_zeroshot - -# cola - -## 10 prompts - -Acc: 78.00%, prompt: Assess the following sentence and determine if it is grammatically correct. Respond with 'Acceptable' or 'Unacceptable': -Acc: 77.90%, prompt: Examine the given sentence and decide if it is grammatically sound. Answer with either 'Acceptable' or 'Unacceptable': -Acc: 77.50%, prompt: Determine if the grammar of the given sentence is 'Acceptable' or 'Unacceptable': -Acc: 77.20%, prompt: Examine the sentence and decide if its grammar is 'Acceptable' or 'Unacceptable': -Acc: 76.90%, prompt: Please evaluate the grammatical structure of the provided sentence and answer with 'Acceptable' or 'Unacceptable': -Acc: 76.60%, prompt: Analyze the provided sentence and classify its grammatical correctness as 'Acceptable' or 'Unacceptable': -Acc: 76.50%, prompt: Review the sentence below and identify whether its grammar is 'Acceptable' or 'Unacceptable': -Acc: 76.50%, prompt: Check the grammar of the following sentence and indicate if it is 'Acceptable' or 'Unacceptable': -Acc: 75.50%, prompt: Assess the grammatical structure of the given sentence and classify it as 'Acceptable' or 'Unacceptable': -Acc: 70.90%, prompt: Is the provided sentence grammatically correct? Respond with 'Acceptable' or 'Unacceptable': - -Acc: 76.30%, prompt: Functioning as a grammar evaluation tool, analyze the given sentence and decide if it is grammatically correct, responding with 'acceptable' or 'unacceptable': -Acc: 76.30%, prompt: As an instrument for grammar evaluation, consider the sentence and determine if its grammar is correct, responding with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar: -Acc: 75.80%, prompt: Acting as a grammar detection instrument, determine if the provided sentence is grammatically sound, answering with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar: -Acc: 75.70%, prompt: In your role as a grammar check tool, assess the following sentence and classify it as 'acceptable' if it is grammatically correct or 'unacceptable' if it is incorrect: -Acc: 75.60%, prompt: Working as a grammar classifier, identify whether the given sentence has correct grammar, and respond with 'acceptable' for correct sentences or 'unacceptable' for incorrect ones: -Acc: 75.50%, prompt: While performing grammar analysis, classify the grammar of the following sentence as 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar: -Acc: 75.30%, prompt: In the capacity of a grammar assessment system, indicate if the structure of the provided sentence is grammatically correct, responding with 'acceptable' or 'unacceptable': -Acc: 74.90%, prompt: As a grammar identification system, examine the provided sentence and respond with 'acceptable' for grammatically correct sentences or 'unacceptable' for incorrect ones: -Acc: 74.90%, prompt: As a tool for determining grammatical correctness, review the sentence and categorize its grammar as either 'acceptable' or 'unacceptable': -Acc: 74.90%, prompt: In the role of a syntax analyzer, examine the grammar of the provided sentence and decide if it is correct, answering with 'acceptable' for grammatically correct sentences or 'unacceptable' for incorrect ones: - -## bertattack - -Original prompt: Assess the following sentence and determine if it is grammatically correct. Respond with 'Acceptable' or 'Unacceptable': -Attacked prompt: b"Assess the below conversation and try if it is grammaticalabsolutely correct. respond with 'Acceptable' or 'Unacceptable':" -Original acc: 78.00%, attacked acc: 67.30%, dropped acc: 10.70% - -Original prompt: Examine the given sentence and decide if it is grammatically sound. Answer with either 'Acceptable' or 'Unacceptable': -Attacked prompt: b"examine the english plural and say if it is grammaticalmost sound. Answer with neither 'Acceptable' or 'Unacceptable':" -Original acc: 77.90%, attacked acc: 30.90%, dropped acc: 47.00% - -Original prompt: Determine if the grammar of the given sentence is 'Acceptable' or 'Unacceptable': -Attacked prompt: b"Determine if the gender of the given sentence is 'Acceptable' or 'Unacceptable':" -Original acc: 77.50%, attacked acc: 37.40%, dropped acc: 40.10% - -Original prompt: Functioning as a grammar evaluation tool, analyze the given sentence and decide if it is grammatically correct, responding with 'acceptable' or 'unacceptable': -Attacked prompt: b"he as a linguistic evaluation tool, assessing the first language and seeing if it is grammatically correct, responded with 'acceptable' or 'unacceptable':" -Original acc: 76.30%, attacked acc: 39.50%, dropped acc: 36.80% - -Original prompt: As an instrument for grammar evaluation, consider the sentence and determine if its grammar is correct, responding with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar: -Attacked prompt: b"for an organ for linguistic evaluation, write the movie and question if its grammar is incorrect, and with 'acceptable' for new grammar or 'unacceptable' for unstable grammar:" -Original acc: 76.30%, attacked acc: 30.90%, dropped acc: 45.40% - -Original prompt: Acting as a grammar detection instrument, determine if the provided sentence is grammatically sound, answering with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar: -Attacked prompt: b"t as a grammar detection measures, determine if the a clause is grammatically sounded, query with 'acceptable' for valid level or 'unacceptable' for unclear verbs:" -Original acc: 75.80%, attacked acc: 69.20%, dropped acc: 6.60% - -## checklist - -Original prompt: Assess the following sentence and determine if it is grammatically correct. Respond with 'Acceptable' or 'Unacceptable': -Attacked prompt: b"Assess the following sentence and determine if it is grammatically correct. Respond with 'Acceptable' or 'Unacceptable' IhuBIhoPGc:" -Original acc: 78.00%, attacked acc: 68.90%, dropped acc: 9.10% - -Original prompt: Examine the given sentence and decide if it is grammatically sound. Answer with either 'Acceptable' or 'Unacceptable': -Attacked prompt: b"Examine the given sentence and decide if it is grammatically sound. Answer with either 'Acceptable' or 'Unacceptable' IhuBIhoPGc:" -Original acc: 77.90%, attacked acc: 62.60%, dropped acc: 15.30% - -Original prompt: Determine if the grammar of the given sentence is 'Acceptable' or 'Unacceptable': -Attacked prompt: b"Determine if the grammar of the given sentence is 'Acceptable' or 'Unacceptable' ItGDrrA1Sc:" -Original acc: 77.50%, attacked acc: 75.20%, dropped acc: 2.30% - -Original prompt: Functioning as a grammar evaluation tool, analyze the given sentence and decide if it is grammatically correct, responding with 'acceptable' or 'unacceptable': -Attacked prompt: b"Functioning as a grammar evaluation tool, analyze the given sentence and decide if it is grammatically correct, responding with 'acceptable' or 'unacceptable' yh912BU8T0:" -Original acc: 76.30%, attacked acc: 75.80%, dropped acc: 0.50% - -Original prompt: As an instrument for grammar evaluation, consider the sentence and determine if its grammar is correct, responding with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar: -Attacked prompt: b"As an instrument for grammar evaluation, consider the sentence and determine if its grammar is correct, responding with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar IhuBIhoPGc:" -Original acc: 76.30%, attacked acc: 74.90%, dropped acc: 1.40% - -Original prompt: Acting as a grammar detection instrument, determine if the provided sentence is grammatically sound, answering with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar: -Attacked prompt: b"Acting as a grammar detection instrument, determine if the provided sentence is grammatically sound, answering with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar g0vBZf3tQC:" -Original acc: 75.80%, attacked acc: 75.10%, dropped acc: 0.70% - -## deepwordbug - -Original prompt: Assess the following sentence and determine if it is grammatically correct. Respond with 'Acceptable' or 'Unacceptable': -Attacked prompt: b"AssesRs the Nollowing setnence and deermine if it is gammatically corerct. Respnd with 'Acceptable' or 'Unacceptable':" -Original acc: 78.00%, attacked acc: 41.10%, dropped acc: 36.90% - -Original prompt: Examine the given sentence and decide if it is grammatically sound. Answer with either 'Acceptable' or 'Unacceptable': -Attacked prompt: b"Exapine the gievn sentene and dVecide if it is grammktically sound. Answer with either 'Acceptable' or 'Unacceptable':" -Original acc: 77.90%, attacked acc: 31.10%, dropped acc: 46.80% - -Original prompt: Determine if the grammar of the given sentence is 'Acceptable' or 'Unacceptable': -Attacked prompt: b"DeMermine if the grmmar of the gibven sentenec is 'Acceptable' or 'Unacceptable':" -Original acc: 77.50%, attacked acc: 35.10%, dropped acc: 42.40% - -Original prompt: Functioning as a grammar evaluation tool, analyze the given sentence and decide if it is grammatically correct, responding with 'acceptable' or 'unacceptable': -Attacked prompt: b"Functining as a gramEar evaluation tool, analzye the giqen sentencGe and decidpe if it is grammgtically cornect, resopnding with 'acceptable' or 'unacceptable':" -Original acc: 76.30%, attacked acc: 51.30%, dropped acc: 25.00% - -Original prompt: As an instrument for grammar evaluation, consider the sentence and determine if its grammar is correct, responding with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar: -Attacked prompt: b"sA an instrument for grammar evaluatiJn, cnsider the setnence and deterbmine if its grmamar is crorect, reYponding with 'acceptable' for corect gramOar or 'unacceptable' for incorGect rgammar:" -Original acc: 76.30%, attacked acc: 36.50%, dropped acc: 39.80% - -Original prompt: Acting as a grammar detection instrument, determine if the provided sentence is grammatically sound, answering with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar: -Attacked prompt: b"AVcting as a grmamar detection instrumVent, dJtermine if the pGrovided sentencae is guammatically sQound, anskering with 'acceptable' for coFrect gramar or 'unacceptable' for incorreNt lgrammar:" -Original acc: 75.80%, attacked acc: 58.40%, dropped acc: 17.40% - -## stresstest - -Original prompt: Assess the following sentence and determine if it is grammatically correct. Respond with 'Acceptable' or 'Unacceptable': -Attacked prompt: b"Assess the following sentence and determine if it is grammatically correct. Respond with 'Acceptable' or 'Unacceptable' and false is not true :" -Original acc: 78.00%, attacked acc: 63.50%, dropped acc: 14.50% - -Original prompt: Examine the given sentence and decide if it is grammatically sound. Answer with either 'Acceptable' or 'Unacceptable': -Attacked prompt: b"Examine the given sentence and decide if it is grammatically sound. Answer with either 'Acceptable' or 'Unacceptable' and true is true :" -Original acc: 77.90%, attacked acc: 75.70%, dropped acc: 2.20% - -Original prompt: Determine if the grammar of the given sentence is 'Acceptable' or 'Unacceptable': -Attacked prompt: b"Determine if the grammar of the given sentence is 'Acceptable' or 'Unacceptable' and true is true :" -Original acc: 77.50%, attacked acc: 71.90%, dropped acc: 5.60% - -Original prompt: Functioning as a grammar evaluation tool, analyze the given sentence and decide if it is grammatically correct, responding with 'acceptable' or 'unacceptable': -Attacked prompt: b"Functioning as a grammar evaluation tool, analyze the given sentence and decide if it is grammatically correct, responding with 'acceptable' or 'unacceptable' and true is true and true is true and true is true and true is true and true is true :" -Original acc: 76.30%, attacked acc: 76.50%, dropped acc: -0.20% - -Original prompt: As an instrument for grammar evaluation, consider the sentence and determine if its grammar is correct, responding with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar: -Attacked prompt: b"As an instrument for grammar evaluation, consider the sentence and determine if its grammar is correct, responding with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar and true is true and true is true and true is true and true is true and true is true :" -Original acc: 76.30%, attacked acc: 62.90%, dropped acc: 13.40% - -Original prompt: Acting as a grammar detection instrument, determine if the provided sentence is grammatically sound, answering with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar: -Attacked prompt: b"Acting as a grammar detection instrument, determine if the provided sentence is grammatically sound, answering with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar and true is true and true is true and true is true and true is true and true is true :" -Original acc: 75.80%, attacked acc: 74.00%, dropped acc: 1.80% - -## textbugger - -Original prompt: Assess the following sentence and determine if it is grammatically correct. Respond with 'Acceptable' or 'Unacceptable': -Attacked prompt: b"Assess the following senten\xcf\xb2e and ascertain if it is pronouns correct. Respond with 'Acceptable' or 'Unacceptable':" -Original acc: 78.00%, attacked acc: 59.40%, dropped acc: 18.60% - -Original prompt: Examine the given sentence and decide if it is grammatically sound. Answer with either 'Acceptable' or 'Unacceptable': -Attacked prompt: b"Examining the given condemnation and decide if it is pronoun sounds. Answer with neither 'Acceptable' or 'Unacceptable':" -Original acc: 77.90%, attacked acc: 32.10%, dropped acc: 45.80% - -Original prompt: Determine if the grammar of the given sentence is 'Acceptable' or 'Unacceptable': -Attacked prompt: b"Ascertain if the gr\xc9\x91mmar of the given sentences is 'Acceptable' or 'Unacceptable':" -Original acc: 77.50%, attacked acc: 62.90%, dropped acc: 14.60% - -Original prompt: Functioning as a grammar evaluation tool, analyze the given sentence and decide if it is grammatically correct, responding with 'acceptable' or 'unacceptable': -Attacked prompt: b"Functioni\xd5\xb8g as a \xc9\xa1rammar appraisals tool, analyzing the given punishments and deciding if it is grammatically correct, responding with 'acceptable' or 'unacceptable':" -Original acc: 76.30%, attacked acc: 62.00%, dropped acc: 14.30% - -Original prompt: As an instrument for grammar evaluation, consider the sentence and determine if its grammar is correct, responding with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar: -Attacked prompt: b"Because an devices for grammar assessment, consdier the punishments and determining if its grammar is correct, respon\xd4\x81ing with 'acceptable' for correct grammar or 'unacceptable' for incorrect pronunciation:" -Original acc: 76.30%, attacked acc: 46.80%, dropped acc: 29.50% - -Original prompt: Acting as a grammar detection instrument, determine if the provided sentence is grammatically sound, answering with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar: -Attacked prompt: b"A\xcf\xb2ting as a grammar detection intrument, deermine if the provided condemnation is grammatically sound, answer ing with 'acceptable' for corrcet \xc9\xa1rammar or 'unacceptable' for wrong gram mar:" -Original acc: 75.80%, attacked acc: 69.50%, dropped acc: 6.30% - -## textfooler - -Original prompt: Assess the following sentence and determine if it is grammatically correct. Respond with 'Acceptable' or 'Unacceptable': -Attacked prompt: b"Appraisal the below condemning and decide if it is grammatically correction. Reaction with 'Acceptable' or 'Unacceptable':" -Original acc: 78.00%, attacked acc: 34.10%, dropped acc: 43.90% - -Original prompt: Examine the given sentence and decide if it is grammatically sound. Answer with either 'Acceptable' or 'Unacceptable': -Attacked prompt: b"Researches the offered reprimand and determining if it is grammatically noise. Reaction with either 'Acceptable' or 'Unacceptable':" -Original acc: 77.90%, attacked acc: 31.30%, dropped acc: 46.60% - -Original prompt: Determine if the grammar of the given sentence is 'Acceptable' or 'Unacceptable': -Attacked prompt: b"Establishes if the jargon of the made punishments is 'Acceptable' or 'Unacceptable':" -Original acc: 77.50%, attacked acc: 31.00%, dropped acc: 46.50% - -Original prompt: Functioning as a grammar evaluation tool, analyze the given sentence and decide if it is grammatically correct, responding with 'acceptable' or 'unacceptable': -Attacked prompt: b"Function as a language evaluations instrument, analytical the made convicts and determining if it is grammatically remedied, referee with 'acceptable' or 'unacceptable':" -Original acc: 76.30%, attacked acc: 33.10%, dropped acc: 43.20% - -Original prompt: As an instrument for grammar evaluation, consider the sentence and determine if its grammar is correct, responding with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar: -Attacked prompt: b"Since an apparatus for language measured, envisage the convicts and identify if its language is correct, reaction with 'acceptable' for accurate jargon or 'unacceptable' for unsound pronunciation:" -Original acc: 76.30%, attacked acc: 30.80%, dropped acc: 45.50% - -Original prompt: Acting as a grammar detection instrument, determine if the provided sentence is grammatically sound, answering with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar: -Attacked prompt: b"Caretaker as a grammar screening apparatus, pinpoint if the provided condemnation is grammatically sonora, retaliated with 'acceptable' for proper grammar or 'unacceptable' for deceitful vocabulary:" -Original acc: 75.80%, attacked acc: 69.60%, dropped acc: 6.20% - -## translation - -Language: chinese, acc: 75.90%, prompt: As a tool for determining grammatical correctness, please review the following sentences and categorize them grammatically as 'acceptable' or 'unacceptable'. -Language: chinese, acc: 77.00%, prompt: Please evaluate the grammar of the following sentences and mark them as 'acceptable' or 'unacceptable'. -Language: chinese, acc: 76.00%, prompt: Please check that the following sentences are grammatically correct and categorize them as 'acceptable' or 'unacceptable'. -Language: chinese, acc: 76.30%, prompt: Please determine whether the following sentences are 'acceptable' or 'unacceptable' based on grammatical correctness. -Language: chinese, acc: 74.90%, prompt: Please categorize the grammar of the following sentences as 'acceptable' or 'unacceptable'. -Language: chinese, acc: 76.10%, prompt: Please check the grammar of the following sentences and classify them as 'acceptable' or 'unacceptable'. -Language: chinese, acc: 76.30%, prompt: Please mark the following sentences as 'acceptable' or 'unacceptable' according to the grammatical norms. -Language: chinese, acc: 76.70%, prompt: Please assess the grammar of the following sentences and classify them as 'acceptable' or 'unacceptable'. -Language: chinese, acc: 76.30%, prompt: Please mark the following sentences as 'acceptable' or 'unacceptable' for grammatical accuracy. -Language: chinese, acc: 76.10%, prompt: Please check the grammar of the following sentences and classify them as 'acceptable' or 'unacceptable'. -Language: french, acc: 76.10%, prompt: As a tool for assessing semantic relevance, categorize the sentence as 'acceptable' or 'unacceptable' according to its meaning and context. -Language: french, acc: 76.30%, prompt: Determine whether the grammatical structure of the sentence is 'acceptable' or 'unacceptable' by carefully reviewing it. -Language: french, acc: 75.40%, prompt: Judge whether the sentence follows the syntactic rules of the language and classify it as 'acceptable' or 'unacceptable'. -Language: french, acc: 74.70%, prompt: Assess the relevance of the sentence by classifying it as 'acceptable' or 'unacceptable' according to the context in which it is used. -Language: french, acc: 75.70%, prompt: As a semantic classification tool, decide whether the sentence is 'acceptable' or 'unacceptable' according to its meaning and consistency. -Language: french, acc: 76.90%, prompt: Analyze the sentence carefully and determine whether it is grammatically 'acceptable' or 'unacceptable'. -Language: french, acc: 75.50%, prompt: As a syntactic validation tool, determine whether the sentence is properly structured and categorize it as 'acceptable' or 'unacceptable'. -Language: french, acc: 76.00%, prompt: When evaluating the grammar of the sentence, classify it as 'acceptable' or 'unacceptable' according to the rules of the French language. -Language: french, acc: 76.60%, prompt: Determine whether the sentence is 'acceptable' or 'unacceptable' depending on its syntactic construction and semantic context. -Language: french, acc: 75.80%, prompt: As a tool for linguistic analysis, categorize the sentence as 'acceptable' or 'unacceptable' according to its relevance and semantic consistency. -Language: arabic, acc: 75.70%, prompt: As a tool for determining grammatical correctness, review the sentence and classify its rules as 'acceptable' or 'unacceptable'. -Language: arabic, acc: 76.90%, prompt: Analyze the sentence carefully and classify its grammar between 'acceptable' or 'unacceptable'. -Language: arabic, acc: 75.30%, prompt: As a tool for determining grammatical correctness, classify the rules of the sentence between 'acceptable' or 'unacceptable'. -Language: arabic, acc: 75.70%, prompt: Evaluate the correctness of the sentence between 'acceptable' or 'unacceptable', as a tool for determining grammatical correctness. -Language: arabic, acc: 74.60%, prompt: As a tool for classifying valid sentences, determine whether 'acceptable' or 'unacceptable' is based on its grammatical rules. -Language: arabic, acc: 76.30%, prompt: Analyze the sentence carefully and classify its grammatical rules between 'acceptable' or 'unacceptable'. -Language: arabic, acc: 75.70%, prompt: Analyze the sentence and classify it between 'acceptable' or 'unacceptable' as a grammatical check tool. -Language: arabic, acc: 75.00%, prompt: As a classification tool for grammatical sentences, determine whether the sentence 'acceptable' or 'unacceptable' is based on its rules. -Language: arabic, acc: 75.00%, prompt: As a tool for analyzing grammar, classify the rules of the sentence between 'acceptable' or 'unacceptable'. -Language: arabic, acc: 76.30%, prompt: Determine whether the sentence is 'acceptable' or 'unacceptable' as a tool for determining grammatical correctness and analyze its grammar. -Language: spanish, acc: 75.60%, prompt: As a tool to determine grammatical correctness, review the sentence and categorize its grammar as 'acceptable' or 'unacceptable'. -Language: spanish, acc: 77.60%, prompt: Analyze the sentence carefully and classify its grammar as 'acceptable' or 'unacceptable'. -Language: spanish, acc: 76.00%, prompt: As a tool for determining grammatical correctness, it categorizes the grammatical rules of the sentence as 'acceptable' or 'unacceptable'. -Language: spanish, acc: 76.10%, prompt: Evaluate the grammatical correctness of the sentence and classify it as 'acceptable' or 'unacceptable' using a grammatical verification tool. -Language: spanish, acc: 75.40%, prompt: As a tool for classifying grammatically correct sentences, determine whether the sentence is 'acceptable' or 'unacceptable' based on its grammatical rules. -Language: spanish, acc: 76.00%, prompt: Analyse the sentence carefully and classify its grammar as 'acceptable' or 'unacceptable' using a grammatical revision tool. -Language: spanish, acc: 75.50%, prompt: As a tool for classifying grammatical sentences, it determines whether the sentence is 'acceptable' or 'unacceptable' using its grammatical structure. -Language: spanish, acc: 75.70%, prompt: As a tool for analyzing grammatical correctness, it categorizes the grammatical rules of the sentence as 'acceptable' or 'unacceptable'. -Language: spanish, acc: 75.90%, prompt: Determine whether the sentence is 'acceptable' or 'unacceptable' using a grammatical verification tool and sort it accordingly. -Language: spanish, acc: 75.90%, prompt: As a tool for determining grammatical correctness, evaluate the sentence and classify it as 'acceptable' or 'unacceptable' based on its grammatical rules. -Language: japanese, acc: 75.00%, prompt: As a tool to determine whether grammar is grammatically correct, look at the sentence and categorize grammar into the 'acceptable' or 'unacceptable' categories. -Language: japanese, acc: 75.30%, prompt: Please read the given sentence and categorize the grammar into the 'acceptable' or 'unacceptable' categories. -Language: japanese, acc: 75.70%, prompt: To determine the correctness of a sentence, categorize grammar into two categories: 'acceptable' or 'unacceptable'. -Language: japanese, acc: 76.00%, prompt: Classify the grammar of a given sentence into two categories: 'acceptable' and 'unacceptable'. -Language: japanese, acc: 74.80%, prompt: Classify a given sentence into the categories 'acceptable' or 'unacceptable' to determine whether it is grammatically correct. -Language: japanese, acc: 75.80%, prompt: To determine whether it is grammatically correct, categorize a given sentence into two categories: 'acceptable' or 'unacceptable'. -Language: japanese, acc: 76.40%, prompt: To determine the correctness of grammar, categorize a given sentence into two categories: 'acceptable' or 'unacceptable'. -Language: japanese, acc: 75.50%, prompt: Classify the grammar of a given sentence into two categories, 'acceptable' or 'unacceptable', and judge its accuracy. -Language: japanese, acc: 76.00%, prompt: To determine whether it is grammatically correct, divide a given sentence into two categories: 'acceptable' or 'unacceptable'. -Language: japanese, acc: 76.70%, prompt: To evaluate the accuracy of grammar, categorize a given sentence into two categories: 'acceptable' or 'unacceptable'. -Language: korean, acc: 75.60%, prompt: As a tool for judging grammatical correctness, please review the sentences and classify the grammar as 'acceptable' or 'unacceptable'. -Language: korean, acc: 75.20%, prompt: Please read the given sentences, and classify the grammar as 'acceptable' or 'unacceptable'. -Language: korean, acc: 76.00%, prompt: Please classify the sentences as 'acceptable' or 'unacceptable' to judge the grammatical appropriateness. -Language: korean, acc: 75.30%, prompt: Please classify the grammar of the given sentences into 2 categories: 'acceptable' or 'unacceptable'. -Language: korean, acc: 76.10%, prompt: Please classify the given sentences as 'acceptable' or 'unacceptable' to judge the correctness of the grammar. -Language: korean, acc: 75.90%, prompt: Please classify the given sentences as 'acceptable' or 'unacceptable' in order to judge their grammatical appropriateness. -Language: korean, acc: 77.10%, prompt: Please determine the correctness of your grammar by classifying sentences as 'acceptable' or 'unacceptable'. -Language: korean, acc: 75.70%, prompt: Classify the grammar of a given sentence as 'acceptable' or 'unacceptable', and judge its accuracy. -Language: korean, acc: 76.10%, prompt: Please classify the given sentences as 'acceptable' or 'unacceptable' to judge the correctness of the grammar. -Language: korean, acc: 76.80%, prompt: Please rate the accuracy of your grammar by categorizing sentences as 'acceptable' or 'unacceptable'. - -# mnli - -## 10 prompts - -Acc: 86.00%, prompt: Identify whether the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Acc: 85.80%, prompt: Determine if the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction': -Acc: 85.70%, prompt: Examine the pair of sentences and determine if they exhibit entailment, neutral, or contradiction. Answer with either 'entailment', 'neutral', or 'contradiction': -Acc: 85.60%, prompt: Does the relationship between the given sentences represent entailment, neutral, or contradiction? Respond with 'entailment', 'neutral', or 'contradiction': -Acc: 77.70%, prompt: Indicate if the connection between the following sentences is 'entailment', 'neutral', or 'contradiction': -Acc: 75.70%, prompt: Please classify the relationship between the provided sentences as 'entailment', 'neutral', or 'contradiction': -Acc: 75.50%, prompt: Considering the two sentences, identify if their relationship is 'entailment', 'neutral', or 'contradiction': -Acc: 75.30%, prompt: Assess the connection between the following sentences and classify it as 'entailment', 'neutral', or 'contradiction': -Acc: 74.70%, prompt: Review the two statements and categorize their relationship as either 'entailment', 'neutral', or 'contradiction': -Acc: 73.40%, prompt: Analyze the two provided sentences and decide if their relationship is 'entailment', 'neutral', or 'contradiction': - -Acc: 85.70%, prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Acc: 85.20%, prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction': -Acc: 84.30%, prompt: In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Acc: 77.60%, prompt: In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment', 'neutral', or 'contradiction': -Acc: 76.30%, prompt: As an entailment identification system, examine the connection between the following sentences and respond with 'entailment', 'neutral', or 'contradiction': -Acc: 75.00%, prompt: While performing entailment analysis, classify the relationship between the provided sentences as 'entailment', 'neutral', or 'contradiction': -Acc: 74.40%, prompt: As a tool for determining entailment relationships, review the two statements and categorize their connection as either 'entailment', 'neutral', or 'contradiction': -Acc: 73.90%, prompt: In your role as an entailment analysis tool, assess the relationship between the given sentences and classify it as 'entailment', 'neutral', or 'contradiction': -Acc: 73.60%, prompt: Functioning as an entailment evaluation tool, analyze the provided sentences and decide if their relationship is 'entailment', 'neutral', or 'contradiction': -Acc: 72.60%, prompt: As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment', 'neutral', or 'contradiction': - -## bertattack - -Original prompt: Identify whether the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Identify whom the used contradiction of question implies entailment, neutral, or contradiction. object with 'entailment', 'neutral', or 'contradiction':" -Original acc: 86.00%, attacked acc: 4.60%, dropped acc: 81.40% - -Original prompt: Determine if the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"be if the no both of clauses means entailment, neutral, or contradiction. answer with 'entailment', 'neutral', or 'contradiction':" -Original acc: 85.80%, attacked acc: 78.40%, dropped acc: 7.40% - -Original prompt: Examine the pair of sentences and determine if they exhibit entailment, neutral, or contradiction. Answer with either 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Examine the or of clauses and select if they occur entailment, neutral, or contradiction. pose with doubt 'entailment', 'neutral', or 'contradiction':" -Original acc: 85.70%, attacked acc: 79.90%, dropped acc: 5.80% - -Original prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"for as an entailment suppression cue, determine if the shown contradictory of sentence is entailment, neutral, or contradiction. agree with 'entailment', 'neutral', or 'contradiction':" -Original acc: 85.70%, attacked acc: 67.40%, dropped acc: 18.30% - -Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"the as an entailment classindicator, identify which the given sex of clauses reflects entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction':" -Original acc: 85.20%, attacked acc: 79.90%, dropped acc: 5.30% - -Original prompt: In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"to the task of a semantics relation analyzefor, define the conjunction between the in clauses and decide if they exhibit entailment, neutral, or contradiction. have with 'entailment', 'neutral', or 'contradiction':" -Original acc: 84.30%, attacked acc: 76.90%, dropped acc: 7.40% - -## checklist - -Original prompt: Identify whether the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Identify whether the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction' ofOQXLxiW4:" -Original acc: 86.00%, attacked acc: 85.50%, dropped acc: 0.50% - -Original prompt: Determine if the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Determine if the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction' 9Q3S1F94fE:" -Original acc: 85.80%, attacked acc: 85.50%, dropped acc: 0.30% - -Original prompt: Examine the pair of sentences and determine if they exhibit entailment, neutral, or contradiction. Answer with either 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Examine the pair of sentences and determine if they exhibit entailment, neutral, or contradiction. Answer with either 'entailment', 'neutral', or 'contradiction' ofOQXLxiW4:" -Original acc: 85.70%, attacked acc: 85.20%, dropped acc: 0.50% - -Original prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction' iia2yL9Uzm:" -Original acc: 85.70%, attacked acc: 85.30%, dropped acc: 0.40% - -Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Working as an entailment classifier, identify whether the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction' BWmsLx8xOA:" -Original acc: 85.20%, attacked acc: 84.80%, dropped acc: 0.40% - -Original prompt: In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction' S6oUV5E54P:" -Original acc: 84.30%, attacked acc: 83.60%, dropped acc: 0.70% - -## deepwordbug - -Original prompt: Identify whether the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Identify Phether the given air of sentences demonstsrates entailment, neutral, or contradiction. Anwer with 'entailment', 'neutral', or 'contradiction':" -Original acc: 86.00%, attacked acc: 83.20%, dropped acc: 2.80% - -Original prompt: Determine if the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Determie if the given pakir of sentences dsplays entailment, neutral, or contradiction. Responf with 'entailment', 'neutral', or 'contradiction':" -Original acc: 85.80%, attacked acc: 83.90%, dropped acc: 1.90% - -Original prompt: Examine the pair of sentences and determine if they exhibit entailment, neutral, or contradiction. Answer with either 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Examine the paiQr of sentences and determine if they exhibit entailment, neutral, or contradiction. AnTwer with ither 'entailment', 'neutral', or 'contradiction':" -Original acc: 85.70%, attacked acc: 84.30%, dropped acc: 1.40% - -Original prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Acting as an entailment deection insrument, determine if the given pai of sentences demnostrates entailment, neutral, or contradiction. Asnwer with 'entailment', 'neutral', or 'contradiction':" -Original acc: 85.70%, attacked acc: 83.80%, dropped acc: 1.90% - -Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Working as an entailment clasifier, identify whethter the given pair of sentences disSlays entailment, neutral, or contradiction. Resond with 'entailment', 'neutral', or 'contradiction':" -Original acc: 85.20%, attacked acc: 83.80%, dropped acc: 1.40% - -Original prompt: In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction':" -Original acc: 84.30%, attacked acc: 84.30%, dropped acc: 0.90% - -## stresstest - -Original prompt: Identify whether the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Identify whether the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction' and true is true and true is true and true is true and true is true and true is true :" -Original acc: 86.00%, attacked acc: 68.30%, dropped acc: 17.70% - -Original prompt: Determine if the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Determine if the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction' and true is true and true is true and true is true and true is true and true is true :" -Original acc: 85.80%, attacked acc: 78.70%, dropped acc: 7.10% - -Original prompt: Examine the pair of sentences and determine if they exhibit entailment, neutral, or contradiction. Answer with either 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Examine the pair of sentences and determine if they exhibit entailment, neutral, or contradiction. Answer with either 'entailment', 'neutral', or 'contradiction' and true is true and true is true and true is true and true is true and true is true :" -Original acc: 85.70%, attacked acc: 79.80%, dropped acc: 5.90% - -Original prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction' and true is true and true is true and true is true and true is true and true is true :" -Original acc: 85.70%, attacked acc: 72.00%, dropped acc: 13.70% - -Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Working as an entailment classifier, identify whether the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction' and true is true and true is true and true is true and true is true and true is true :" -Original acc: 85.20%, attacked acc: 78.90%, dropped acc: 6.30% - -Original prompt: In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction' and true is true and true is true and true is true and true is true and true is true :" -Original acc: 84.30%, attacked acc: 81.90%, dropped acc: 2.40% - -## textbugger - -Original prompt: Identify whether the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Identify ob the given pa ir of sentences demonstrating entailment, neutral, or contradiction. Replying with 'entailment', 'neutral', or 'contradiction':" -Original acc: 86.00%, attacked acc: 81.90%, dropped acc: 4.10% - -Original prompt: Determine if the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Deermine if the given p air of sentence\xd1\x95 displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction':" -Original acc: 85.80%, attacked acc: 84.20%, dropped acc: 1.60% - -Original prompt: Examine the pair of sentences and determine if they exhibit entailment, neutral, or contradiction. Answer with either 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Examine the p air of sentences and determine if they exhibit entailment, neutral, or contradiction. An swer with either 'entailment', 'neutral', or 'contradiction':" -Original acc: 85.70%, attacked acc: 84.30%, dropped acc: 1.40% - -Original prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Behaving as an entailment detection instrument, identify if the awarded p air of sentence demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction':" -Original acc: 85.70%, attacked acc: 84.20%, dropped acc: 1.50% - -Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Work\xd1\x96ng as an entailment classifier, identify both the g\xd1\x96ven pairs of sentence di\xd1\x95plays entailment, neutral, or contradiction. Cater with 'entailment', 'neutral', or 'contradiction':" -Original acc: 85.20%, attacked acc: 80.80%, dropped acc: 4.40% - -Original prompt: In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction':" -Original acc: 84.30%, attacked acc: 84.30%, dropped acc: 0.90% - -## textfooler - -Original prompt: Identify whether the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Locate whether the given coupling of phrases depicting entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction':" -Original acc: 86.00%, attacked acc: 82.40%, dropped acc: 3.60% - -Original prompt: Determine if the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Detects if the given doublet of condemning reflecting entailment, neutral, or contradiction. Reactions with 'entailment', 'neutral', or 'contradiction':" -Original acc: 85.80%, attacked acc: 82.90%, dropped acc: 2.90% - -Original prompt: Examine the pair of sentences and determine if they exhibit entailment, neutral, or contradiction. Answer with either 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Examine the peers of punishment and identifies if they exhibit entailment, neutral, or contradiction. Address with either 'entailment', 'neutral', or 'contradiction':" -Original acc: 85.70%, attacked acc: 83.50%, dropped acc: 2.20% - -Original prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Acting as an entailment detecting instrument, determine if the given coupling of sentences denotes entailment, neutral, or contradiction. Satisfy with 'entailment', 'neutral', or 'contradiction':" -Original acc: 85.70%, attacked acc: 83.30%, dropped acc: 2.40% - -Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"Labor as an entailment classifier, identify whether the given coupling of sentences illustrating entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction':" -Original acc: 85.20%, attacked acc: 83.20%, dropped acc: 2.00% - -Original prompt: In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction': -Attacked prompt: b"In the role of a semantic relationship analyzer, examine the links between the given phrases and decide if they exposure entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction':" -Original acc: 84.30%, attacked acc: 81.30%, dropped acc: 3.00% - -## translation - -Language: chinese, acc: 73.70%, prompt: As an implication analysis tool, evaluate the relationships between given sentences and classify them as 'entailment', 'neutral', or 'contradiction'. -Language: chinese, acc: 74.20%, prompt: Analyze the relationships between the given sentences and classify them as 'entailment', 'neutral', or 'contradiction'. -Language: chinese, acc: 75.20%, prompt: Evaluate the logical relationship between the following sentences and classify them as 'entailment', 'neutral', or 'contradiction'. -Language: chinese, acc: 74.00%, prompt: In your role as an implication analysis tool, judge the relationships of the following sentences and classify them as 'entailment', 'neutral', or 'contradiction'. -Language: chinese, acc: 75.90%, prompt: Please judge the relationship between the following sentences as 'entailment', 'neutral', or 'contradiction'. -Language: chinese, acc: 75.10%, prompt: From a given sentence, evaluate the relationship between them and classify them as 'entailment', 'neutral', or 'contradiction'. -Language: chinese, acc: 71.30%, prompt: Please classify the following sentences as 'entailment', 'neutral', or 'contradiction' according to their relationships. -Language: chinese, acc: 75.20%, prompt: Evaluate the logical relationship between the following sentences and classify them as 'entailment', 'neutral', or 'contradiction'. -Language: chinese, acc: 74.80%, prompt: As an implication analysis tool, categorize the relationships between given sentences as 'entailment', 'neutral', or 'contradiction'. -Language: chinese, acc: 72.80%, prompt: Please classify the following sentences as 'entailment', 'neutral', or 'contradiction' according to their logical relationship. -Language: french, acc: 76.20%, prompt: As a tool for analyzing the consequence relationship, evaluate the relationship between the given sentences and classify it as 'entailment', 'neutral', or 'contradiction'. -Language: french, acc: 75.10%, prompt: Evaluate the relationship between the given sentences and classify it as 'entailment', 'neutral', or 'contradiction'. -Language: french, acc: 73.50%, prompt: Determine whether the following sentences are related to 'entailment', 'neutral', or 'contradiction'. -Language: french, acc: 76.20%, prompt: In your role as a consequence analysis tool, evaluate the relationship between the given sentences and classify it as 'entailment', 'neutral', or 'contradiction'. -Language: french, acc: 75.40%, prompt: Classify the relationship between the following sentences as 'entailment', 'neutral', or 'contradiction'. -Language: french, acc: 75.80%, prompt: As a consequence analysis tool, evaluate the relationship between the given sentences and classify it as 'entailment', 'neutral', or 'contradiction'. -Language: french, acc: 77.00%, prompt: Analyze the relationship between the given sentences and determine whether it is of 'entailment', 'neutral', or 'contradiction'. -Language: french, acc: 75.00%, prompt: Evaluate the relationship between the following sentences and classify it as 'entailment', 'neutral', or 'contradiction'. -Language: french, acc: 75.40%, prompt: As a tool for analyzing the consequence relationship, classify the following sentences as 'entailment', 'neutral', or 'contradiction'. -Language: french, acc: 73.10%, prompt: Determine whether the given sentences are related to 'entailment', 'neutral', or 'contradiction'. -Language: arabic, acc: 76.20%, prompt: Based on your role as a reasoning analyst, analyze the relationship between the given sentences and classify them as 'entailment', 'neutral', or 'contradiction'. -Language: arabic, acc: 75.00%, prompt: Evaluate the relationship between given sentences and classify them as 'entailment', 'neutral', or 'contradiction'. -Language: arabic, acc: 74.80%, prompt: Determine if the following sentences are 'entailment', 'neutral', or 'contradiction'. -Language: arabic, acc: 75.50%, prompt: In your role as a tool of reasoning analysis, investigate the relationship between sentences and classify them as 'entailment', 'neutral', or 'contradiction'. -Language: arabic, acc: 75.40%, prompt: Classify the relationship between the following sentences as 'entailment', 'neutral', or 'contradiction'. -Language: arabic, acc: 77.20%, prompt: In your role as a tool of reasoning analysis, evaluate the relationship between the given sentences and classify them as 'entailment', 'neutral', or 'contradiction'. -Language: arabic, acc: 77.50%, prompt: Analyze the relationship between the given sentences and determine if they are 'entailment', 'neutral', or 'contradiction'. -Language: arabic, acc: 75.10%, prompt: Evaluate the relationship between the following sentences and classify them as 'entailment', 'neutral', or 'contradiction'. -Language: arabic, acc: 78.20%, prompt: In your role as a tool of reasoning analysis, the following sentences are classified as 'entailment', 'neutral', or 'contradiction'. -Language: arabic, acc: 75.40%, prompt: Determine if the sentences given are 'entailment', 'neutral', or 'contradiction'. -Language: spanish, acc: 71.80%, prompt: In your role as an implication analysis tool, evaluate the relationship between the given phrases and classify them as 'entailment', 'neutral', or 'contradiction'. -Language: spanish, acc: 75.80%, prompt: Determine whether there is 'entailment', 'neutral', or 'contradiction' between the sentences given, using this text analysis tool, -Language: spanish, acc: 73.90%, prompt: Analyze the relationship between the two sentences and classify it as 'entailment', 'neutral', or 'contradiction' using this text classification tool, -Language: spanish, acc: 75.50%, prompt: Using this implication analysis tool, decide whether the sentences given are related by 'entailment', 'neutral', or 'contradiction'. -Language: spanish, acc: 72.60%, prompt: Classifies the relationship between the given phrases as 'entailment', 'neutral', or 'contradiction' using this text analysis tool, -Language: spanish, acc: 78.80%, prompt: Evaluate whether there is 'entailment', 'neutral', or 'contradiction' between the sentences provided using this text classification tool, -Language: spanish, acc: 74.30%, prompt: Using this implication analysis tool, decide whether the two sentences are related by 'entailment', 'neutral', or 'contradiction'. -Language: spanish, acc: 73.60%, prompt: Determine whether the given phrases are related by 'entailment', 'neutral', or 'contradiction' using this text analysis tool, -Language: spanish, acc: 72.40%, prompt: Analyze the relationship between the two sentences and classify it as 'entailment', 'neutral', or 'contradiction' using this text analysis tool, -Language: spanish, acc: 74.50%, prompt: Using this text classification tool, it classifies the relationship between the given phrases as 'entailment', 'neutral', or 'contradiction'. -Language: japanese, acc: 73.40%, prompt: As your role as an implication analysis tool, evaluate the relationship of a given sentence and classify it as 'entailment', 'neutral', or 'contradiction'. -Language: japanese, acc: 73.30%, prompt: Use the implication analysis tool as your role to evaluate the relationship of a given sentence and classify it as 'entailment', 'neutral', or 'contradiction'. -Language: japanese, acc: 74.20%, prompt: Use this text classification tool to categorize relationships in a given text as 'entailment', 'neutral', or 'contradiction'. -Language: japanese, acc: 74.00%, prompt: Use the implication analysis tool as your role and classify the relationship of a given sentence as 'entailment', 'neutral', or 'contradiction'. -Language: japanese, acc: 74.30%, prompt: Evaluate the relationship of a given sentence and use this text classification tool to classify it as 'entailment', 'neutral', or 'contradiction'. -Language: japanese, acc: 73.40%, prompt: Evaluate the relationship of a given sentence and use this text classification tool to accurately classify it as 'entailment', 'neutral', or 'contradiction'. -Language: japanese, acc: 73.10%, prompt: Use the implication analysis tool as your role and use this text classification tool to classify the relationship of a given sentence as 'entailment', 'neutral', or 'contradiction'. -Language: japanese, acc: 74.70%, prompt: Use this text classification tool to evaluate the relationship of a given sentence and classify it as 'entailment', 'neutral', or 'contradiction'. -Language: japanese, acc: 73.70%, prompt: Use the implication analysis tool as your role, evaluate the relationship of a given sentence, and use this text classification tool to classify it as 'entailment', 'neutral', or 'contradiction'. -Language: japanese, acc: 72.90%, prompt: Use the implication analysis tool as your role and categorize the relationship of a given sentence strictly as 'entailment', 'neutral', or 'contradiction' using this text classification tool. -Language: korean, acc: 74.20%, prompt: Analyze the relationships between given sentences and classify them as 'entailment', 'neutral', or 'contradiction'. -Language: korean, acc: 78.80%, prompt: In the text categorization task, identify the relationship between given sentences as one of 'entailment', 'neutral', or 'contradiction'. -Language: korean, acc: 75.60%, prompt: Perform the role of analyzing the relationship between sentences and classifying them as 'entailment', 'neutral', or 'contradiction'. -Language: korean, acc: 74.70%, prompt: Evaluate the relationship between two given sentences, and classify them as 'entailment', 'neutral', or 'contradiction'. -Language: korean, acc: 74.70%, prompt: In the text categorization task, perform the role of classifying relationships between given sentences as 'entailment', 'neutral', or 'contradiction'. -Language: korean, acc: 74.80%, prompt: Judge the associations between sentences, and classify them as 'entailment', 'neutral', or 'contradiction'. -Language: korean, acc: 74.50%, prompt: Analyze the relationship between two given sentences and classify them as 'entailment', 'neutral', or 'contradiction'. -Language: korean, acc: 77.10%, prompt: In the task of text classification, identify the relationships between given sentences as 'entailment', 'neutral', or 'contradiction'. -Language: korean, acc: 75.00%, prompt: Act as an instrument to evaluate the relationships between sentences, and classify them as 'entailment', 'neutral', or 'contradiction'. -Language: korean, acc: 72.70%, prompt: Analyze the associations of two given sentences and classify them as 'entailment', 'neutral', or 'contradiction'. - -# mrpc - -## 10 prompts - -Acc: 82.35%, prompt: Compare the following sentences and determine if they share the same semantic meaning by responding with 'equivalent' or 'not_equivalent'. -Acc: 82.11%, prompt: Do these two sentences have the same underlying meaning? Respond with 'equivalent' or 'not_equivalent'. -Acc: 82.11%, prompt: Do the meanings of these two statements align? Indicate your answer with 'equivalent' or 'not_equivalent'. -Acc: 81.86%, prompt: Are the meanings of the following pair of sentences the same? Answer with 'equivalent' or 'not_equivalent'. -Acc: 81.86%, prompt: Examine the pair of sentences and decide if their meanings are identical by answering with 'equivalent' or 'not_equivalent'. -Acc: 81.62%, prompt: Evaluate whether the two provided sentences convey the same meaning by answering 'equivalent' or 'not_equivalent'. -Acc: 80.88%, prompt: Please analyze the provided sentences and indicate if their meanings are the same by choosing 'equivalent' or 'not_equivalent'. -Acc: 80.88%, prompt: Determine if the meanings of the following sentences are semantically equivalent by responding with 'equivalent' or 'not_equivalent'. -Acc: 80.64%, prompt: Can the given sentences be considered semantically identical? Please reply with 'equivalent' or 'not_equivalent'. -Acc: 80.64%, prompt: Assess if the two given sentences have equivalent meanings by selecting 'equivalent' or 'not_equivalent'. - -Acc: 82.60%, prompt: As a semantic comparison expert, evaluate the given pair of sentences and determine if they are 'equivalent' or 'not_equivalent'. -Acc: 82.60%, prompt: In your capacity as a semantic comparison tool, examine the given sentences and decide if their meanings are identical, responding with 'equivalent' or 'not_equivalent'. -Acc: 81.62%, prompt: As a language comparison expert, examine the given pair of sentences and decide if their meanings align, answering with 'equivalent' or 'not_equivalent'. -Acc: 81.13%, prompt: In the role of a textual comparison specialist, examine the given sentences and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent'. -Acc: 81.13%, prompt: In the role of a sentence comparison analyst, assess the provided sentences and indicate if they convey the same meaning by selecting 'equivalent' or 'not_equivalent'. -Acc: 80.64%, prompt: As a linguistic comparator, review the following pair of sentences and determine their semantic equivalence by choosing 'equivalent' or 'not_equivalent'. -Acc: 80.39%, prompt: As a sentence similarity evaluator, analyze the provided sentences and indicate if their meanings are 'equivalent' or 'not_equivalent'. -Acc: 79.66%, prompt: As a textual similarity evaluator, analyze the following pair of sentences and determine if they are semantically 'equivalent' or 'not_equivalent'. -Acc: 77.94%, prompt: In your capacity as a language analyst, assess the following sentences and classify their similarity as 'equivalent' or 'not_equivalent'. -Acc: 75.74%, prompt: In your capacity as a semantic assessment tool, evaluate the provided sentences and classify their meanings as 'equivalent' or 'not_equivalent'. - -## bertattack - -Original prompt: Compare the following sentences and determine if they share the same semantic meaning by responding with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Compare the class sentences and identify if they coincide the same semantic topic by question with 'equivalent' or 'not_equivalent'. " -Original acc: 82.35%, attacked acc: 74.02%, dropped acc: 8.33% - -Original prompt: Do these two sentences have the same underlying meaning? Respond with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"do these two clauses have the same underlying meanings? deal with 'equivalent' or 'not_equivalent'. " -Original acc: 82.11%, attacked acc: 16.67%, dropped acc: 65.44% - -Original prompt: Do the meanings of these two statements align? Indicate your answer with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"is the meaning of these two and aligned? do your answering with 'equivalent' or 'not_equivalent'. " -Original acc: 82.11%, attacked acc: 72.30%, dropped acc: 9.80% - -Original prompt: As a semantic comparison expert, evaluate the given pair of sentences and determine if they are 'equivalent' or 'not_equivalent'. -Attacked prompt: b"given a semantic compare expertise, find the given language of sentences and calculate if they are 'equivalent' or 'not_equivalent'. " -Original acc: 82.60%, attacked acc: 69.12%, dropped acc: 13.48% - -Original prompt: In your capacity as a semantic comparison tool, examine the given sentences and decide if their meanings are identical, responding with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"using your capacity as a semantic relations member, determine the given words and decide if their meanings are consecutive, comparison with 'equivalent' or 'not_equivalent'. " -Original acc: 82.60%, attacked acc: 9.07%, dropped acc: 73.53% - -Original prompt: As a language comparison expert, examine the given pair of sentences and decide if their meanings align, answering with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"in a language comparing expert, examine the given pair of sentences and identify if their gender coincide, starting with 'equivalent' or 'not_equivalent'. " -Original acc: 81.62%, attacked acc: 66.42%, dropped acc: 15.20% - -## checklist - -Original prompt: Compare the following sentences and determine if they share the same semantic meaning by responding with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Compare the following sentences and determine if they share the same semantic meaning by responding with 'equivalent' or 'not_equivalent' hXLpw3bbiw. " -Original acc: 82.35%, attacked acc: 81.37%, dropped acc: 0.98% - -Original prompt: Do these two sentences have the same underlying meaning? Respond with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Do these two sentences have the same underlying meaning? Respond with 'equivalent' or 'not_equivalent' MSAddJ4D2a. " -Original acc: 82.11%, attacked acc: 79.17%, dropped acc: 2.94% - -Original prompt: Do the meanings of these two statements align? Indicate your answer with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Do the meanings of these two statements align? Indicate your answer with 'equivalent' or 'not_equivalent' IhuBIhoPGc. " -Original acc: 82.11%, attacked acc: 80.15%, dropped acc: 1.96% - -Original prompt: As a semantic comparison expert, evaluate the given pair of sentences and determine if they are 'equivalent' or 'not_equivalent'. -Attacked prompt: b"As a semantic comparison expert, evaluate the given pair of sentences and determine if they are 'equivalent' or 'not_equivalent' yh912BU8T0. " -Original acc: 82.60%, attacked acc: 80.15%, dropped acc: 2.45% - -Original prompt: In your capacity as a semantic comparison tool, examine the given sentences and decide if their meanings are identical, responding with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"In your capacity as a semantic comparison tool, examine the given sentences and decide if their meanings are identical, responding with 'equivalent' or 'not_equivalent' IhuBIhoPGc. " -Original acc: 82.60%, attacked acc: 81.37%, dropped acc: 1.23% - -Original prompt: As a language comparison expert, examine the given pair of sentences and decide if their meanings align, answering with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"As a language comparison expert, examine the given pair of sentences and decide if their meanings align, answering with 'equivalent' or 'not_equivalent' rzu98kF2KF. " -Original acc: 81.62%, attacked acc: 80.39%, dropped acc: 1.23% - -## deepwordbug - -Original prompt: Compare the following sentences and determine if they share the same semantic meaning by responding with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Compar the folhowing sentences and deWermine if they share the same semantic emaning by respondig with 'equivalent' or 'not_equivalent'. " -Original acc: 82.35%, attacked acc: 76.96%, dropped acc: 5.39% - -Original prompt: Do these two sentences have the same underlying meaning? Respond with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"D these two senetnces have the same underlying meaning? Respodn with 'equivalent' or 'not_equivalent'. " -Original acc: 82.11%, attacked acc: 76.96%, dropped acc: 5.15% - -Original prompt: Do the meanings of these two statements align? Indicate your answer with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Do the meanings of these two statemeTnts Lalign? Indicate your answer with 'equivalent' or 'not_equivalent'. " -Original acc: 82.11%, attacked acc: 78.92%, dropped acc: 3.19% - -Original prompt: As a semantic comparison expert, evaluate the given pair of sentences and determine if they are 'equivalent' or 'not_equivalent'. -Attacked prompt: b"As a semantic cmoparison expert, evaluaet the givne pari of sentences and determine if they are 'equivalent' or 'not_equivalent'. " -Original acc: 82.60%, attacked acc: 77.45%, dropped acc: 5.15% - -Original prompt: In your capacity as a semantic comparison tool, examine the given sentences and decide if their meanings are identical, responding with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"In your capacity as a seantic coGparison tolo, exaine the given esentences and decide if their meanings are identiacl, resYonding with 'equivalent' or 'not_equivalent'. " -Original acc: 82.60%, attacked acc: 62.25%, dropped acc: 20.34% - -Original prompt: As a language comparison expert, examine the given pair of sentences and decide if their meanings align, answering with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"As a language compariso expert, examine the given apir of sentences and Zdecide if their meanings Falign, answreing with 'equivalent' or 'not_equivalent'. " -Original acc: 81.62%, attacked acc: 73.53%, dropped acc: 8.09% - -## stresstest - -Original prompt: Compare the following sentences and determine if they share the same semantic meaning by responding with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Compare the following sentences and determine if they share the same semantic meaning by responding with 'equivalent' or 'not_equivalent' and true is true and true is true and true is true and true is true and true is true . " -Original acc: 82.35%, attacked acc: 31.62%, dropped acc: 50.74% - -Original prompt: Do these two sentences have the same underlying meaning? Respond with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Do these two sentences have the same underlying meaning? Respond with 'equivalent' or 'not_equivalent' and true is true and true is true and true is true and true is true and true is true . " -Original acc: 82.11%, attacked acc: 40.20%, dropped acc: 41.91% - -Original prompt: Do the meanings of these two statements align? Indicate your answer with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Do the meanings of these two statements align? Indicate your answer with 'equivalent' or 'not_equivalent' and true is true and true is true and true is true and true is true and true is true . " -Original acc: 82.11%, attacked acc: 29.66%, dropped acc: 52.45% - -Original prompt: As a semantic comparison expert, evaluate the given pair of sentences and determine if they are 'equivalent' or 'not_equivalent'. -Attacked prompt: b"As a semantic comparison expert, evaluate the given pair of sentences and determine if they are 'equivalent' or 'not_equivalent' and true is true and true is true and true is true and true is true and true is true . " -Original acc: 82.60%, attacked acc: 17.40%, dropped acc: 65.20% - -Original prompt: In your capacity as a semantic comparison tool, examine the given sentences and decide if their meanings are identical, responding with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"In your capacity as a semantic comparison tool, examine the given sentences and decide if their meanings are identical, responding with 'equivalent' or 'not_equivalent' and true is true and true is true and true is true and true is true and true is true . " -Original acc: 82.60%, attacked acc: 20.59%, dropped acc: 62.01% - -Original prompt: As a language comparison expert, examine the given pair of sentences and decide if their meanings align, answering with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"As a language comparison expert, examine the given pair of sentences and decide if their meanings align, answering with 'equivalent' or 'not_equivalent' and true is true and true is true and true is true and true is true and true is true . " -Original acc: 81.62%, attacked acc: 15.93%, dropped acc: 65.69% - -## textbugger - -Original prompt: Compare the following sentences and determine if they share the same semantic meaning by responding with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Compare the subsequently sentence and identify if they exchange the same semantic connotation by respondin\xc9\xa1 with 'equivalent' or 'not_equivalent'. " -Original acc: 82.35%, attacked acc: 75.49%, dropped acc: 6.86% - -Original prompt: Do these two sentences have the same underlying meaning? Respond with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Do these two punishments have the same underlying meaning? Respond with 'equivalent' or 'not_equivalent'. " -Original acc: 82.11%, attacked acc: 75.98%, dropped acc: 6.13% - -Original prompt: Do the meanings of these two statements align? Indicate your answer with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Do the meanings of these two statem\xd0\xb5nts al\xd1\x96gn? Denotes your answer with 'equivalent' or 'not_equivalent'. " -Original acc: 82.11%, attacked acc: 75.25%, dropped acc: 6.86% - -Original prompt: As a semantic comparison expert, evaluate the given pair of sentences and determine if they are 'equivalent' or 'not_equivalent'. -Attacked prompt: b"As a semantic compares expert, evaluate the given torque of sentence\xd1\x95 and determine if they are 'equivalent' or 'not_equivalent'. " -Original acc: 82.60%, attacked acc: 78.68%, dropped acc: 3.92% - -Original prompt: In your capacity as a semantic comparison tool, examine the given sentences and decide if their meanings are identical, responding with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"In your capacity as a semantic comparison instrument, examine the given penalties and decided if their meanings are id\xd0\xb5ntical, responding with 'equivalent' or 'not_equivalent'. " -Original acc: 82.60%, attacked acc: 78.43%, dropped acc: 4.17% - -Original prompt: As a language comparison expert, examine the given pair of sentences and decide if their meanings align, answering with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"As a language comparison e\xc3\x97pert, examine the given p air of sentences and decide if their meaings alin, answering with 'equivalent' or 'not_equivalent'. " -Original acc: 81.62%, attacked acc: 74.26%, dropped acc: 7.35% - -## textfooler - -Original prompt: Compare the following sentences and determine if they share the same semantic meaning by responding with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Compare the subsequently sentences and determine if they exchanged the same semantic connotation by respondent with 'equivalent' or 'not_equivalent'. " -Original acc: 82.35%, attacked acc: 75.74%, dropped acc: 6.62% - -Original prompt: Do these two sentences have the same underlying meaning? Respond with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Ca these two condemning ont the same main meaning? Reactions with 'equivalent' or 'not_equivalent'. " -Original acc: 82.11%, attacked acc: 53.68%, dropped acc: 28.43% - -Original prompt: Do the meanings of these two statements align? Indicate your answer with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Did the meanings of these two depositions alignment? Portray your answer with 'equivalent' or 'not_equivalent'. " -Original acc: 82.11%, attacked acc: 72.55%, dropped acc: 9.56% - -Original prompt: As a semantic comparison expert, evaluate the given pair of sentences and determine if they are 'equivalent' or 'not_equivalent'. -Attacked prompt: b"As a semantic relative specialize, determine the given matched of sentence and identifying if they are 'equivalent' or 'not_equivalent'. " -Original acc: 82.60%, attacked acc: 75.25%, dropped acc: 7.35% - -Original prompt: In your capacity as a semantic comparison tool, examine the given sentences and decide if their meanings are identical, responding with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"About your proficiency as a semantic comparison tool, investigation the given punishment and selected if their sense are equivalent, retaliated with 'equivalent' or 'not_equivalent'. " -Original acc: 82.60%, attacked acc: 74.51%, dropped acc: 8.09% - -Original prompt: As a language comparison expert, examine the given pair of sentences and decide if their meanings align, answering with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"As a language compared professional, reviews the given torque of sentences and picked if their sens unify, address with 'equivalent' or 'not_equivalent'. " -Original acc: 81.62%, attacked acc: 12.01%, dropped acc: 69.61% - -## translation - -Language: chinese, acc: 82.84%, prompt: As an expert in semantic comparison, evaluates two given sentences to determine whether they are 'equivalent' or 'not_equivalent'. -Language: chinese, acc: 82.35%, prompt: From the point of view of semantic comparison, determine whether this pair of sentences is 'equivalent' or 'not_equivalent'. -Language: chinese, acc: 79.90%, prompt: Using the semantic comparison method, determine whether the following two statements are 'equivalent' or 'not_equivalent'. -Language: chinese, acc: 80.64%, prompt: For the following two sentences, determine whether they are 'equivalent' or 'not_equivalent' based on semantic comparison. -Language: chinese, acc: 81.13%, prompt: As an expert in semantic comparison, please evaluate the following two sentences and determine if they are 'equivalent' or 'not_equivalent'. -Language: chinese, acc: 79.90%, prompt: Using semantic comparison techniques, determine whether the following two sentences are 'equivalent' or 'not_equivalent'. -Language: chinese, acc: 80.88%, prompt: Please determine whether the following two sentences are 'equivalent' or 'not_equivalent' according to the standard of semantic comparison. -Language: chinese, acc: 80.88%, prompt: As an expert in the field of semantic comparison, please evaluate the following two sentences and determine whether they are 'equivalent' or 'not_equivalent'. -Language: chinese, acc: 80.39%, prompt: Using semantic comparison, determine whether the following two sentences are 'equivalent' or 'not_equivalent'. -Language: chinese, acc: 81.13%, prompt: Determine whether the following two sentences are 'equivalent' or 'not_equivalent' based on semantic comparison. -Language: french, acc: 82.35%, prompt: As an expert in semantic comparison, evaluate the following pair of sentences and determine whether they are 'equivalent' or 'not_equivalent'. -Language: french, acc: 79.66%, prompt: Can you determine whether the following two sentences are 'equivalent' or 'not_equivalent' as a semantic comparison expert? -Language: french, acc: 81.37%, prompt: Using your expertise in semantic comparison, determine whether the following two sentences are 'equivalent' or 'not_equivalent'. -Language: french, acc: 83.82%, prompt: As a semantic comparison specialist, assess the similarity between the following two sentences and determine whether they are 'equivalent' or 'not_equivalent'. -Language: french, acc: 79.90%, prompt: Are you able to determine whether the following two sentences are 'equivalent' or 'not_equivalent' as an expert in semantic comparison? -Language: french, acc: 82.35%, prompt: As a semantic comparison professional, evaluate the following pair of sentences and indicate whether they are 'equivalent' or 'not_equivalent'. -Language: french, acc: 81.86%, prompt: Can you determine whether the following two sentences have a 'equivalent' or 'not_equivalent' meaning as an expert in semantic comparison? -Language: french, acc: 83.33%, prompt: As an expert in semantic comparison, assess the similarity between the following two sentences and determine whether they are 'equivalent' or 'not_equivalent'. -Language: french, acc: 80.39%, prompt: Using your expertise in semantic comparison, determine whether the following two sentences are 'equivalent' or 'not_equivalent' in terms of meaning. -Language: french, acc: 84.07%, prompt: As a semantic comparison professional, assess the similarity between the following two sentences and indicate whether they are 'equivalent' or 'not_equivalent'. -Language: arabic, acc: 82.35%, prompt: As an expert in semantic comparison, evaluate the two given sentences and determine whether they are 'equivalent' or 'not_equivalent'. -Language: arabic, acc: 81.62%, prompt: Based on my experience in semantic analysis, classify the following two sentences as 'equivalent' or 'not_equivalent'. -Language: arabic, acc: 81.13%, prompt: As an expert in semantic comparison, analyze the following two sentences and classify them as 'equivalent' or 'not_equivalent'. -Language: arabic, acc: 82.35%, prompt: Your task as an expert in semantic comparison is to evaluate the following two sentences and determine whether they are 'equivalent' or 'not_equivalent'. -Language: arabic, acc: 81.62%, prompt: As a semantic comparison specialist, analyze the two data statements and insert them into one of the following categories: 'equivalent' or 'not_equivalent'. -Language: arabic, acc: 81.86%, prompt: Based on my experience in semantic analysis, classify the following two sentences between 'equivalent' or 'not_equivalent'. -Language: arabic, acc: 81.86%, prompt: Your role as a semantic comparison specialist requires analyzing the two given sentences and determining whether they are 'equivalent' or 'not_equivalent'. -Language: arabic, acc: 82.35%, prompt: As an experienced semantic analyst, classify the following two sentences as 'equivalent' or 'not_equivalent'. -Language: arabic, acc: 81.13%, prompt: Your job as a semantic analyst evaluates the following two sentences as 'equivalent' or 'not_equivalent'. -Language: arabic, acc: 81.37%, prompt: As a semantic analyst, determine whether the given sentences are 'equivalent' or 'not_equivalent' based on their relationship. -Language: spanish, acc: 82.11%, prompt: As an expert in semantic comparison, it evaluates the pair of sentences provided and determines whether they are 'equivalent' or 'not_equivalent'. -Language: spanish, acc: 81.62%, prompt: Based on my experience in semantic analysis, classify the following two sentences as 'equivalent' or 'not_equivalent'. -Language: spanish, acc: 81.62%, prompt: As an expert in semantic comparison, analyze the two sentences given and classify them as 'equivalent' or 'not_equivalent'. -Language: spanish, acc: 82.11%, prompt: Your task as a semantic comparison specialist is to evaluate the following two sentences and determine whether they are 'equivalent' or 'not_equivalent'. -Language: spanish, acc: 81.62%, prompt: As an expert in semantic analysis, he makes a classification of the following two sentences based on their 'equivalent' or 'not_equivalent'. -Language: spanish, acc: 81.13%, prompt: Based on your experience of semantic comparison, classify the next two sentences as 'equivalent' or 'not_equivalent'. -Language: spanish, acc: 81.37%, prompt: As a specialist in semantic analysis, you are given the task of analysing the two sentences given and classifying them as 'equivalent' or 'not_equivalent'. -Language: spanish, acc: 82.60%, prompt: As an expert in semantic comparison, he classifies the following two sentences into 'equivalent' or 'not_equivalent'. -Language: spanish, acc: 81.37%, prompt: As a specialist in semantic analysis, evaluate the following two sentences and classify them as 'equivalent' or 'not_equivalent'. -Language: spanish, acc: 81.86%, prompt: Your task as an expert in semantic comparison is to analyze the two sentences provided and determine whether they are 'equivalent' or 'not_equivalent' based on their semantic relationship. -Language: japanese, acc: 83.33%, prompt: Evaluate whether a given pair of sentences is 'equivalent' or 'not_equivalent', depending on the context. -Language: japanese, acc: 82.60%, prompt: Use a semantic comparison to determine whether a given pair of sentences is 'equivalent' or 'not_equivalent'. -Language: japanese, acc: 83.33%, prompt: Evaluate a given pair of sentences as 'equivalent' or 'not_equivalent' by determining whether they have the same semantic meaning. -Language: japanese, acc: 82.35%, prompt: Determine whether a given pair of sentences is synonyms and evaluate whether they are 'equivalent' or 'not_equivalent'. -Language: japanese, acc: 77.70%, prompt: Determine whether a given pair of sentences is 'equivalent' or 'not_equivalent', and whether they are semantically identical. -Language: japanese, acc: 83.33%, prompt: Determinate whether a given pair of sentences has the same meaning and evaluate whether they are 'equivalent' or 'not_equivalent'. -Language: japanese, acc: 79.90%, prompt: Evaluate whether a given pair of sentences is 'equivalent' or 'not_equivalent' by determining whether they are semantically identical. -Language: japanese, acc: 82.11%, prompt: Judge whether a given pair of sentences is equal and evaluate whether they are 'equivalent' or 'not_equivalent'. -Language: japanese, acc: 82.11%, prompt: Determinate whether a given pair of sentences are semantically equal and evaluate whether they are 'equivalent' or 'not_equivalent'. -Language: japanese, acc: 82.84%, prompt: Whether a given pair of sentences is 'equivalent' or 'not_equivalent' depends on the context. -Language: korean, acc: 82.84%, prompt: As a sentence comparator, evaluate the two sentences given to determine 'equivalent' or 'not_equivalent'. -Language: korean, acc: 80.64%, prompt: Compare two sentences to determine 'equivalent' or 'not_equivalent'. For this you need qualifications as a specialist in semantic comparison. -Language: korean, acc: 82.35%, prompt: It takes your knowledge as an expert in semantic comparison to determine that two sentences are 'equivalent' or 'not_equivalent'. -Language: korean, acc: 82.60%, prompt: As a specialist in semantic comparison, evaluate whether two given sentences are 'equivalent' or 'not_equivalent'. -Language: korean, acc: 80.39%, prompt: Analyze two sentences to determine 'equivalent' or 'not_equivalent'. For that you need the knowledge of a semantic comparison expert. -Language: korean, acc: 80.88%, prompt: As an expert in semantic comparison, decide whether two sentences are 'equivalent' or 'not_equivalent'. -Language: korean, acc: 81.37%, prompt: It takes the knowledge of an expert in semantic comparison to compare two sentences to judge 'equivalent' or 'not_equivalent'. -Language: korean, acc: 81.37%, prompt: Experience as an expert in semantic comparison is required to determine whether two given sentences are 'equivalent' or 'not_equivalent'. -Language: korean, acc: 82.11%, prompt: As an expert in semantic comparison, determine whether two sentences are 'equivalent' or 'not_equivalent'. -Language: korean, acc: 81.62%, prompt: Analyze two sentences to determine 'equivalent' or 'not_equivalent'. For this, you need a qualification as a specialist in semantic comparison. - -# qnli - -## 10 prompts - -Acc: 89.80%, prompt: Evaluate whether the given context supports the answer to the question by responding with 'entailment' or 'not_entailment'. -Acc: 88.70%, prompt: Based on the information in the context, decide if the answer to the question is justified by choosing 'entailment' or 'not_entailment'. -Acc: 88.50%, prompt: Based on the provided context and question, decide if the information supports the answer by responding with 'entailment' or 'not_entailment'. -Acc: 88.10%, prompt: Analyze the context and question, and indicate if the context entails the answer by choosing 'entailment' or 'not_entailment'. -Acc: 85.10%, prompt: Examine the context and question, and determine if the context logically implies the answer by selecting 'entailment' or 'not_entailment'. -Acc: 84.10%, prompt: Review the given context and question, and decide if the context contains enough information to support the answer by selecting 'entailment' or 'not_entailment'. -Acc: 82.80%, prompt: Assess if the answer to the question can be logically concluded from the provided context by choosing 'entailment' or 'not_entailment'. -Acc: 82.20%, prompt: Given the question and context provided, determine if the answer can be inferred by choosing 'entailment' or 'not_entailment'. -Acc: 81.60%, prompt: Consider the context and question, and indicate if the answer can be logically deduced from the context by responding with 'entailment' or 'not_entailment'. -Acc: 62.60%, prompt: Please assess if the answer to the question can be derived from the given context by selecting 'entailment' or 'not_entailment'. - -Acc: 89.50%, prompt: In your role as a linguistic investigator, determine if the context given entails the answer to the question and provide your conclusion with 'entailment' or 'not_entailment'. -Acc: 89.40%, prompt: As a semantic interpreter, assess whether the provided context supports the answer to the given question and answer with 'entailment' or 'not_entailment'. -Acc: 89.20%, prompt: As a textual analyst, examine if the given context logically implies the answer to the question and indicate your decision with 'entailment' or 'not_entailment'. -Acc: 89.00%, prompt: In your role as a semantic evaluator, determine if the provided context justifies the answer to the question and answer with 'entailment' or 'not_entailment'. -Acc: 88.80%, prompt: In the capacity of a language evaluator, examine if the given context justifies the answer to the question and indicate your assessment with 'entailment' or 'not_entailment'. -Acc: 88.60%, prompt: As a semantic researcher, evaluate whether the provided context supports the answer to the question and choose 'entailment' or 'not_entailment'. -Acc: 88.00%, prompt: As a language expert, assess if the given context entails the answer to the question and respond with 'entailment' or 'not_entailment'. -Acc: 83.60%, prompt: As a linguistic consultant, decide if the answer to the question is logically supported by the provided context and respond with 'entailment' or 'not_entailment'. -Acc: 83.10%, prompt: In the capacity of a language specialist, decide if the context presented contains enough information to infer the answer to the question and respond with 'entailment' or 'not_entailment'. -Acc: 75.10%, prompt: As a textual inference expert, analyze if the answer to the question can be deduced from the provided context and select 'entailment' or 'not_entailment'. - -## bertattack - -Original prompt: Evaluate whether the given context supports the answer to the question by responding with 'entailment' or 'not_entailment'. -Attacked prompt: b"evaluated unless the given context prevents the response to the question by responses with 'entailment' or 'not_entailment'. " -Original acc: 89.80%, attacked acc: 42.20%, dropped acc: 47.60% - -Original prompt: Based on the information in the context, decide if the answer to the question is justified by choosing 'entailment' or 'not_entailment'. -Attacked prompt: b"based on the information in the context, mind if the answer to the questioning is circular by option 'entailment' or 'not_entailment'. " -Original acc: 88.70%, attacked acc: 46.60%, dropped acc: 42.10% - -Original prompt: Based on the provided context and question, decide if the information supports the answer by responding with 'entailment' or 'not_entailment'. -Attacked prompt: b"following on the supplied context and questioning, save if the information states the response by simply with 'entailment' or 'not_entailment'. " -Original acc: 88.50%, attacked acc: 27.60%, dropped acc: 60.90% - -Original prompt: In your role as a linguistic investigator, determine if the context given entails the answer to the question and provide your conclusion with 'entailment' or 'not_entailment'. -Attacked prompt: b"it your role as a linguistic investigator, check if the time and enpreceding the answer to the in and use your concluding with 'entailment' or 'not_entailment'. " -Original acc: 89.50%, attacked acc: 32.40%, dropped acc: 57.10% - -Original prompt: As a semantic interpreter, assess whether the provided context supports the answer to the given question and answer with 'entailment' or 'not_entailment'. -Attacked prompt: b"as a semantic interpreter, and and the here context replaces the answer to the wrong answer and asking with 'entailment' or 'not_entailment'. " -Original acc: 89.40%, attacked acc: 24.90%, dropped acc: 64.50% - -Original prompt: As a textual analyst, examine if the given context logically implies the answer to the question and indicate your decision with 'entailment' or 'not_entailment'. -Attacked prompt: b"as a textual analyst, scrutiny if the following means logical) implies the reply to the answer and cross your r with 'entailment' or 'not_entailment'. " -Original acc: 89.20%, attacked acc: 11.80%, dropped acc: 77.40% - -## checklist - -Original prompt: Evaluate whether the given context supports the answer to the question by responding with 'entailment' or 'not_entailment'. -Attacked prompt: b"Evaluate whether the given context supports the answer to the question by responding with 'entailment' or 'not_entailment' SFPCqpiClT. " -Original acc: 89.80%, attacked acc: 89.00%, dropped acc: 0.80% - -Original prompt: Based on the information in the context, decide if the answer to the question is justified by choosing 'entailment' or 'not_entailment'. -Attacked prompt: b"Based on the information in the context, decide if the answer to the question is justified by choosing 'entailment' or 'not_entailment' S6oUV5E54P. " -Original acc: 88.70%, attacked acc: 87.30%, dropped acc: 1.40% - -Original prompt: Based on the provided context and question, decide if the information supports the answer by responding with 'entailment' or 'not_entailment'. -Attacked prompt: b"Based on the provided context and question, decide if the information supports the answer by responding with 'entailment' or 'not_entailment' S6oUV5E54P. " -Original acc: 88.50%, attacked acc: 87.10%, dropped acc: 1.40% - -Original prompt: In your role as a linguistic investigator, determine if the context given entails the answer to the question and provide your conclusion with 'entailment' or 'not_entailment'. -Attacked prompt: b"In your role as a linguistic investigator, determine if the context given entails the answer to the question and provide your conclusion with 'entailment' or 'not_entailment' OB4KVJzIft. " -Original acc: 89.50%, attacked acc: 89.00%, dropped acc: 0.50% - -Original prompt: As a semantic interpreter, assess whether the provided context supports the answer to the given question and answer with 'entailment' or 'not_entailment'. -Attacked prompt: b"As a semantic interpreter, assess whether the provided context supports the answer to the given question and answer with 'entailment' or 'not_entailment' ofOQXLxiW4. " -Original acc: 89.40%, attacked acc: 88.70%, dropped acc: 0.70% - -Original prompt: As a textual analyst, examine if the given context logically implies the answer to the question and indicate your decision with 'entailment' or 'not_entailment'. -Attacked prompt: b"As a textual analyst, examine if the given context logically implies the answer to the question and indicate your decision with 'entailment' or 'not_entailment' CuVpbbkC4c. " -Original acc: 89.20%, attacked acc: 88.70%, dropped acc: 0.50% - -## deepwordbug - -Original prompt: Evaluate whether the given context supports the answer to the question by responding with 'entailment' or 'not_entailment'. -Attacked prompt: b"Evlauate whyther the giGven cotext supporzs the answeQ to the quePtion by rseponding with 'entailment' or 'not_entailment'. " -Original acc: 89.80%, attacked acc: 38.60%, dropped acc: 51.20% - -Original prompt: Based on the information in the context, decide if the answer to the question is justified by choosing 'entailment' or 'not_entailment'. -Attacked prompt: b"hBased on the infoHmation in the contxet, deQide if the answFer to the quostion is jusxtified by Shoosing 'entailment' or 'not_entailment'. " -Original acc: 88.70%, attacked acc: 56.80%, dropped acc: 31.90% - -Original prompt: Based on the provided context and question, decide if the information supports the answer by responding with 'entailment' or 'not_entailment'. -Attacked prompt: b"Besed on the pZovided contexG and questPion, dceide if the informatio suppYrts the Knswer by responding with 'entailment' or 'not_entailment'. " -Original acc: 88.50%, attacked acc: 38.60%, dropped acc: 49.90% - -Original prompt: In your role as a linguistic investigator, determine if the context given entails the answer to the question and provide your conclusion with 'entailment' or 'not_entailment'. -Attacked prompt: b"nI your roXe as a lingistic investigator, deterimne if the contexy giAven etnails the aMswer to the huestion and provIde your cocnlusion with 'entailment' or 'not_entailment'. " -Original acc: 89.50%, attacked acc: 19.40%, dropped acc: 70.10% - -Original prompt: As a semantic interpreter, assess whether the provided context supports the answer to the given question and answer with 'entailment' or 'not_entailment'. -Attacked prompt: b"sA a esmantic interpreter, aBssess whehter the proviedd contexYt spuports the answher to the gEven questin and answOr with 'entailment' or 'not_entailment'. " -Original acc: 89.40%, attacked acc: 2.10%, dropped acc: 87.30% - -Original prompt: As a textual analyst, examine if the given context logically implies the answer to the question and indicate your decision with 'entailment' or 'not_entailment'. -Attacked prompt: b"AWs a Jextual naalyst, exaJine if the gien context logically iplies the answer to the quesion and indictae your decisino with 'entailment' or 'not_entailment'. " -Original acc: 89.20%, attacked acc: 48.50%, dropped acc: 40.70% - -## stresstest - -Original prompt: Evaluate whether the given context supports the answer to the question by responding with 'entailment' or 'not_entailment'. -Attacked prompt: b"Evaluate whether the given context supports the answer to the question by responding with 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true . " -Original acc: 89.80%, attacked acc: 88.70%, dropped acc: 1.10% - -Original prompt: Based on the information in the context, decide if the answer to the question is justified by choosing 'entailment' or 'not_entailment'. -Attacked prompt: b"Based on the information in the context, decide if the answer to the question is justified by choosing 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true . " -Original acc: 88.70%, attacked acc: 78.60%, dropped acc: 10.10% - -Original prompt: Based on the provided context and question, decide if the information supports the answer by responding with 'entailment' or 'not_entailment'. -Attacked prompt: b"Based on the provided context and question, decide if the information supports the answer by responding with 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true . " -Original acc: 88.50%, attacked acc: 80.90%, dropped acc: 7.60% - -Original prompt: In your role as a linguistic investigator, determine if the context given entails the answer to the question and provide your conclusion with 'entailment' or 'not_entailment'. -Attacked prompt: b"In your role as a linguistic investigator, determine if the context given entails the answer to the question and provide your conclusion with 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true . " -Original acc: 89.50%, attacked acc: 81.10%, dropped acc: 8.40% - -Original prompt: As a semantic interpreter, assess whether the provided context supports the answer to the given question and answer with 'entailment' or 'not_entailment'. -Attacked prompt: b"As a semantic interpreter, assess whether the provided context supports the answer to the given question and answer with 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true . " -Original acc: 89.40%, attacked acc: 86.90%, dropped acc: 2.50% - -Original prompt: As a textual analyst, examine if the given context logically implies the answer to the question and indicate your decision with 'entailment' or 'not_entailment'. -Attacked prompt: b"As a textual analyst, examine if the given context logically implies the answer to the question and indicate your decision with 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true . " -Original acc: 89.20%, attacked acc: 57.20%, dropped acc: 32.00% - -## textbugger - -Original prompt: Evaluate whether the given context supports the answer to the question by responding with 'entailment' or 'not_entailment'. -Attacked prompt: b"Ev\xc9\x91luate lf the given context supports the replying to the question by answer with 'entailment' or 'not_entailment'. " -Original acc: 89.80%, attacked acc: 56.80%, dropped acc: 33.00% - -Original prompt: Based on the information in the context, decide if the answer to the question is justified by choosing 'entailment' or 'not_entailment'. -Attacked prompt: b"Based on the information in the context, decides if the answer to the questin is justifie\xd4\x81 by opting 'entailment' or 'not_entailment'. " -Original acc: 88.70%, attacked acc: 82.10%, dropped acc: 6.60% - -Original prompt: Based on the provided context and question, decide if the information supports the answer by responding with 'entailment' or 'not_entailment'. -Attacked prompt: b"Bsed on the provid\xd0\xb5d context and quest\xd1\x96on, decides if the information supporting the answer by responding with 'entailment' or 'not_entailment'. " -Original acc: 88.50%, attacked acc: 81.50%, dropped acc: 7.00% - -Original prompt: In your role as a linguistic investigator, determine if the context given entails the answer to the question and provide your conclusion with 'entailment' or 'not_entailment'. -Attacked prompt: b"In your role as a linguistic inv\xd0\xb5stigator, deter mine if the backgrounds give\xd5\xb8 entails the answer to the question and provide your conclusion with 'entailment' or 'not_entailment'. " -Original acc: 89.50%, attacked acc: 80.00%, dropped acc: 9.50% - -Original prompt: As a semantic interpreter, assess whether the provided context supports the answer to the given question and answer with 'entailment' or 'not_entailment'. -Attacked prompt: b"Since a semantic interpreter, as sess lf the prov\xd1\x96ded background supports the answer to the \xc9\xa1iven question and answer with 'entailment' or 'not_entailment'. " -Original acc: 89.40%, attacked acc: 50.80%, dropped acc: 38.60% - -Original prompt: As a textual analyst, examine if the given context logically implies the answer to the question and indicate your decision with 'entailment' or 'not_entailment'. -Attacked prompt: b"As a textual analyst, exmaine if the give\xd5\xb8 context lgoically implies the answer to the question and indic\xc9\x91te your de\xcf\xb2ision with 'entailment' or 'not_entailment'. " -Original acc: 89.20%, attacked acc: 78.00%, dropped acc: 11.20% - -## textfooler - -Original prompt: Evaluate whether the given context supports the answer to the question by responding with 'entailment' or 'not_entailment'. -Attacked prompt: b"Estimation whether the gave backgrounds perpetuates the answered to the trouble by responses with 'entailment' or 'not_entailment'. " -Original acc: 89.80%, attacked acc: 41.40%, dropped acc: 48.40% - -Original prompt: Based on the information in the context, decide if the answer to the question is justified by choosing 'entailment' or 'not_entailment'. -Attacked prompt: b"Basics on the informational in the background, choices if the retaliated to the subject is fide by akhtar 'entailment' or 'not_entailment'. " -Original acc: 88.70%, attacked acc: 53.70%, dropped acc: 35.00% - -Original prompt: Based on the provided context and question, decide if the information supports the answer by responding with 'entailment' or 'not_entailment'. -Attacked prompt: b"Base on the submitted context and tura, decide if the information encourages the answering by responding with 'entailment' or 'not_entailment'. " -Original acc: 88.50%, attacked acc: 83.30%, dropped acc: 5.20% - -Original prompt: In your role as a linguistic investigator, determine if the context given entails the answer to the question and provide your conclusion with 'entailment' or 'not_entailment'. -Attacked prompt: b"Per your function as a vocabulary inquisitors, detects if the context rendered requires the answer to the tura and affords your culmination with 'entailment' or 'not_entailment'. " -Original acc: 89.50%, attacked acc: 34.60%, dropped acc: 54.90% - -Original prompt: As a semantic interpreter, assess whether the provided context supports the answer to the given question and answer with 'entailment' or 'not_entailment'. -Attacked prompt: b"Iike a semantic performers, valuing whether the afforded ambient perpetuates the answer to the rendered item and cope with 'entailment' or 'not_entailment'. " -Original acc: 89.40%, attacked acc: 3.30%, dropped acc: 86.10% - -Original prompt: As a textual analyst, examine if the given context logically implies the answer to the question and indicate your decision with 'entailment' or 'not_entailment'. -Attacked prompt: b"Iike a textual psychoanalyst, exams if the bestowed backgrounds obviously means the replying to the questions and illustrating your decision with 'entailment' or 'not_entailment'. " -Original acc: 89.20%, attacked acc: 7.20%, dropped acc: 82.00% - -## translation - -Language: chinese, acc: 80.70%, prompt: Determine whether the answer is implied or not based on the context. Answer 'entailment' or 'not_entailment'. -Language: chinese, acc: 87.80%, prompt: As a language expert, determine whether the text contains the answer and choose 'entailment' or 'not_entailment'. -Language: chinese, acc: 86.40%, prompt: Determine whether the text implies an answer, and select 'entailment' or 'not_entailment'. -Language: chinese, acc: 82.10%, prompt: Given the text and the question, whether the answer is implied, select 'entailment' or 'not_entailment'. -Language: chinese, acc: 88.10%, prompt: Determine whether the text contains the answer, depending on the context. Select 'entailment' or 'not_entailment'. -Language: chinese, acc: 87.50%, prompt: As a language expert, determine whether a text can contain an answer, and say 'entailment' or 'not_entailment'. -Language: chinese, acc: 78.90%, prompt: Please determine whether the text implies an answer. Answer 'entailment' or 'not_entailment'. -Language: chinese, acc: 88.80%, prompt: Please select 'entailment' or 'not_entailment' based on the text and the question. -Language: chinese, acc: 81.70%, prompt: Assess whether the answer is implied based on the context. Answer 'entailment' or 'not_entailment'. -Language: chinese, acc: 87.50%, prompt: Please determine whether the text contains the answer and answer 'entailment' or 'not_entailment'. -Language: french, acc: 86.90%, prompt: As a linguistic expert, assess whether the given context involves the answer to the question and answer with 'entailment' or 'not_entailment'. -Language: french, acc: 88.30%, prompt: Determine whether the information provided in the context necessarily leads to the answer to the question asked and indicate 'entailment' or 'not_entailment'. -Language: french, acc: 86.10%, prompt: Analyze the text to determine if the answer to the question is implied in the context and specify 'entailment' or 'not_entailment'. -Language: french, acc: 87.10%, prompt: Based on the given context, decide whether the answer to the question is necessarily involved and mark 'entailment' or 'not_entailment'. -Language: french, acc: 84.10%, prompt: Evaluate whether the answer to the question can be deduced from the given context and mark 'entailment' or 'not_entailment'. -Language: french, acc: 88.10%, prompt: Discern whether the context provided directly involves the answer to the question and indicate 'entailment' or 'not_entailment'. -Language: french, acc: 85.40%, prompt: Determine if the context contains enough information to involve the answer to the question and mark 'entailment' or 'not_entailment'. -Language: french, acc: 85.80%, prompt: Assess whether the context provided necessarily leads to the answer to the question and answer with 'entailment' or 'not_entailment'. -Language: french, acc: 83.40%, prompt: Analyze the text to determine if the answer to the question is involved in the context and indicate 'entailment' or 'not_entailment'. -Language: french, acc: 87.40%, prompt: Based on the given context, decide whether the answer to the question is necessarily inferred and mark 'entailment' or 'not_entailment'. -Language: arabic, acc: 86.80%, prompt: As a language expert, evaluate whether the given context calls for an answer and answer 'entailment' or 'not_entailment'. -Language: arabic, acc: 79.30%, prompt: Judge the relationship between the text and the question and answer 'entailment' or 'not_entailment', depending on your language experience. -Language: arabic, acc: 84.70%, prompt: Does the context given indicate the answer to the question? Evaluate and answer 'entailment' or 'not_entailment'. -Language: arabic, acc: 86.70%, prompt: Based on your linguistic knowledge, does the text relate to the question? Answer 'entailment' or 'not_entailment'. -Language: arabic, acc: 87.40%, prompt: As a language expert, determine how the text relates to the question and answer 'entailment' or 'not_entailment'. -Language: arabic, acc: 82.30%, prompt: Does the text support the answer to the question? Answer 'entailment' or 'not_entailment', depending on your language experience. -Language: arabic, acc: 56.40%, prompt: Check the text link to the question and answer 'entailment' or 'not_entailment', depending on your language skills. -Language: arabic, acc: 62.70%, prompt: As a language expert, is there a link between the text and the question? Answer 'entailment' or 'not_entailment'. -Language: arabic, acc: 85.40%, prompt: Based on your language experience, does context help to answer the question? Evaluate and answer 'entailment' or 'not_entailment'. -Language: arabic, acc: 82.30%, prompt: Does the text give a clear answer to the question? Answer 'entailment' or 'not_entailment', depending on your language experience. -Language: spanish, acc: 86.40%, prompt: As a language expert, evaluate whether the given context implies the answer to the question and answer with 'entailment' or 'not_entailment'. -Language: spanish, acc: 89.30%, prompt: Determine whether the information given in the text necessarily implies the veracity of the hypothesis and answer 'entailment' or 'not_entailment'. -Language: spanish, acc: 86.40%, prompt: Analyzes whether the information presented in the paragraph leads to the conclusion of the question and labels the answer as 'entailment' or 'not_entailment'. -Language: spanish, acc: 88.50%, prompt: Indicates whether the information provided in the text is sufficient to conclude the statement and labels the response as 'entailment' or 'not_entailment'. -Language: spanish, acc: 88.60%, prompt: As an expert on the subject, judge whether the information provided in the text justifies the claim and classify the answer as 'entailment' or 'not_entailment'. -Language: spanish, acc: 87.70%, prompt: Evaluates whether the information in the paragraph necessarily supports the conclusion of the hypothesis and responds 'entailment' or 'not_entailment'. -Language: spanish, acc: 88.60%, prompt: Determines whether the information presented in the text logically implies the answer to the question and labels the answer as 'entailment' or 'not_entailment'. -Language: spanish, acc: 89.40%, prompt: Analyzes whether the information provided in the paragraph necessarily leads to the veracity of the hypothesis and classifies the response as 'entailment' or 'not_entailment'. -Language: spanish, acc: 87.90%, prompt: As an expert on the subject, evaluate whether the information presented in the text supports the claim and respond 'entailment' or 'not_entailment'. -Language: spanish, acc: 89.60%, prompt: Indicates whether the information provided in the paragraph necessarily implies the answer to the question and labels the answer as 'entailment' or 'not_entailment'. -Language: japanese, acc: 85.70%, prompt: Rate whether the answer to the question is derived from the given context and answer with 'entailment' or 'not_entailment'. -Language: japanese, acc: 64.90%, prompt: Please answer 'entailment' or 'not_entailment' for the given context and question. -Language: japanese, acc: 84.70%, prompt: Decide whether the answer to the question is derived from the given context and answer 'entailment' or 'not_entailment'. -Language: japanese, acc: 84.60%, prompt: Compare the question with the given context and give the answer 'entailment' or 'not_entailment'. -Language: japanese, acc: 89.60%, prompt: Determinate whether the given context contains the answer to the question and answer with 'entailment' or 'not_entailment'. -Language: japanese, acc: 66.40%, prompt: Estimate the answer of the question from the context and give the answer 'entailment' or 'not_entailment'. -Language: japanese, acc: 89.40%, prompt: Determinate whether the given context is relevant to the question and answer with 'entailment' or 'not_entailment'. -Language: japanese, acc: 89.40%, prompt: Determine whether the given context is relevant to the question and answer with 'entailment' or 'not_entailment'. -Language: japanese, acc: 89.70%, prompt: Determinate whether the given context contains the answer to the question and answer 'entailment' or 'not_entailment'. -Language: japanese, acc: 53.10%, prompt: Answer with 'entailment' or 'not_entailment', inferring from the given context. -Language: korean, acc: 88.20%, prompt: Determine if a given sentence necessarily implies the meaning of another sentence and answer 'entailment' or 'not_entailment'. -Language: korean, acc: 89.30%, prompt: By understanding the relations between sentences, judge whether a given sentence necessarily refers to another sentence and answer with 'entailment' or 'not_entailment'. -Language: korean, acc: 88.00%, prompt: Evaluate whether a given text necessarily indicates the meaning of another text and respond with 'entailment' or 'not_entailment'. -Language: korean, acc: 89.30%, prompt: Understand the relations of a sentence, to determine whether a given sentence necessarily includes other sentences and answer with 'entailment' or 'not_entailment'. -Language: korean, acc: 85.20%, prompt: Judge whether a given content necessarily implies the meaning of another content and answer with 'entailment' or 'not_entailment'. -Language: korean, acc: 88.80%, prompt: Grasp the relations between sentences, determine if a given sentence necessarily contains the meaning of another sentence and respond with 'entailment' or 'not_entailment'. -Language: korean, acc: 87.10%, prompt: Evaluate whether a given text necessarily refers to another text and answer with 'entailment' or 'not_entailment'. -Language: korean, acc: 85.60%, prompt: By comparing the meaning of the sentences, to determine if a given sentence necessarily implies another sentence and answer 'entailment' or 'not_entailment'. -Language: korean, acc: 86.50%, prompt: Evaluate whether the contents given necessarily refer to other contents and answer with 'entailment' or 'not_entailment'. -Language: korean, acc: 85.50%, prompt: By analyzing the relations between sentences, determine if a given sentence does not necessarily include other sentences and answer with 'entailment' or 'not_entailment'. - -# qqp - -## 10 prompts - -Acc: 88.10%, prompt: Are the following two questions equivalent or not? Answer me with "equivalent" or "not_equivalent". -Acc: 87.80%, prompt: Can these two statements be considered equal in meaning? Answer with 'equivalent' or 'not_equivalent'. -Acc: 87.70%, prompt: Determine if the given pair of statements can be considered the same by responding with 'equivalent' or 'not_equivalent'. -Acc: 87.70%, prompt: Assess whether the following statements are identical in meaning by answering 'equivalent' or 'not_equivalent'. -Acc: 87.60%, prompt: Are the meanings of these two phrases the same? Reply with 'equivalent' or 'not_equivalent'. -Acc: 87.50%, prompt: Do these two sentences convey the same meaning? Indicate with 'equivalent' or 'not_equivalent'. -Acc: 87.30%, prompt: Please indicate if the following pair of sentences share the same meaning by responding with 'equivalent' or 'not_equivalent'. -Acc: 87.30%, prompt: Evaluate whether these two phrases have identical meanings and respond with 'equivalent' or 'not_equivalent'. -Acc: 87.00%, prompt: Do the following expressions mean the same thing? Provide your answer as 'equivalent' or 'not_equivalent'. -Acc: 86.60%, prompt: Examine the following expressions and tell me if they are alike in meaning by using 'equivalent' or 'not_equivalent'. -Acc: 84.80%, prompt: Analyze if the given set of sentences have the same connotation by answering with 'equivalent' or 'not_equivalent'. - -Acc: 87.70%, prompt: Acting as a question equivalence instrument, determine if the provided questions are equivalent in meaning, answering with 'equivalent' for similar questions or 'not_equivalent' for dissimilar ones. -Acc: 87.40%, prompt: As a question equivalence detection system, examine the provided questions and respond with 'equivalent' if they are the same in meaning, or 'not_equivalent' if they are different. -Acc: 86.90%, prompt: Functioning as a question similarity evaluation tool, analyze the given questions and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent'. -Acc: 86.90%, prompt: As an instrument for question comparison evaluation, consider the questions and determine if their meaning is the same, responding with 'equivalent' for similar questions or 'not_equivalent' for different questions. -Acc: 86.90%, prompt: In the role of a question similarity analyzer, examine the meaning of the provided questions and decide if they are equivalent, answering with 'equivalent' for equivalent questions or 'not_equivalent' for different questions. -Acc: 86.50%, prompt: While performing question comparison analysis, classify the similarity of the following questions as 'equivalent' for equivalent questions or 'not_equivalent' for different questions. -Acc: 86.20%, prompt: In your role as a question comparison tool, assess the following pair of questions and classify them as 'equivalent' or 'not_equivalent'. -Acc: 86.10%, prompt: As a tool for determining question equivalence, review the questions and categorize their similarity as either 'equivalent' or 'not_equivalent'. -Acc: 85.80%, prompt: Working as a question classifier, identify whether the given questions share the same meaning, and respond with 'equivalent' for equivalent questions or 'not_equivalent' for different ones. -Acc: 85.70%, prompt: In the capacity of a question assessment system, indicate if the meaning of the provided questions is the same, responding with 'equivalent' or 'not_equivalent'. - -## bertattack - -Original prompt: Are the following two questions equivalent or not? Answer me with "equivalent" or "not_equivalent". -Attacked prompt: b'to the next enough being equivalent or not? Answer me with "equivalent" or "not_equivalent". ' -Original acc: 88.10%, attacked acc: 80.20%, dropped acc: 7.90% - -Original prompt: Can these two statements be considered equal in meaning? Answer with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Can these both expressions be considered adjacent in meaning? Answer with 'equivalent' or 'not_equivalent'. " -Original acc: 87.80%, attacked acc: 83.90%, dropped acc: 3.90% - -Original prompt: Determine if the given pair of statements can be considered the same by responding with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Determine if the every expression of statements can be classified the same by prior with 'equivalent' or 'not_equivalent'. " -Original acc: 87.70%, attacked acc: 83.60%, dropped acc: 4.10% - -Original prompt: Acting as a question equivalence instrument, determine if the provided questions are equivalent in meaning, answering with 'equivalent' for similar questions or 'not_equivalent' for dissimilar ones. -Attacked prompt: b"t as a question equal instrument, determine if the program questions are equivalent in semantic, asking with 'equivalent' for analogous question or 'not_equivalent' for nonssimicomplex times. " -Original acc: 87.70%, attacked acc: 72.20%, dropped acc: 15.50% - -Original prompt: As a question equivalence detection system, examine the provided questions and respond with 'equivalent' if they are the same in meaning, or 'not_equivalent' if they are different. -Attacked prompt: b"if a word equivalence detection fails, answer the above questions and responds with 'equivalent' if they are the same in person, or 'not_equivalent' if they are parallel. " -Original acc: 87.40%, attacked acc: 81.80%, dropped acc: 5.60% - -Original prompt: Functioning as a question similarity evaluation tool, analyze the given questions and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"1 as a question style evaluation tool, analyze the like questions and decide if they share the same meaning, and with 'equivalent' or 'not_equivalent'. " -Original acc: 86.90%, attacked acc: 81.80%, dropped acc: 5.10% - -## checklist - -Original prompt: Are the following two questions equivalent or not? Answer me with "equivalent" or "not_equivalent". -Attacked prompt: b'Are the following two questions equivalent or not? Answer me with "equivalent" or "not_equivalent ofOQXLxiW4". ' -Original acc: 88.10%, attacked acc: 87.30%, dropped acc: 0.80% - -Original prompt: Can these two statements be considered equal in meaning? Answer with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Can these two statements be considered equal in meaning? Answer with 'equivalent' or 'not_equivalent' 9vGXgnbWB8. " -Original acc: 87.80%, attacked acc: 86.80%, dropped acc: 1.00% - -Original prompt: Determine if the given pair of statements can be considered the same by responding with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Determine if the given pair of statements can be considered the same by responding with 'equivalent' or 'not_equivalent' Kw6nrs57gH. " -Original acc: 87.70%, attacked acc: 86.50%, dropped acc: 1.20% - -Original prompt: Acting as a question equivalence instrument, determine if the provided questions are equivalent in meaning, answering with 'equivalent' for similar questions or 'not_equivalent' for dissimilar ones. -Attacked prompt: b"Acting as a question equivalence instrument, determine if the provided questions are equivalent in meaning, answering with 'equivalent' for similar questions or 'not_equivalent' for dissimilar ones ofOQXLxiW4. " -Original acc: 87.70%, attacked acc: 86.80%, dropped acc: 0.90% - -Original prompt: As a question equivalence detection system, examine the provided questions and respond with 'equivalent' if they are the same in meaning, or 'not_equivalent' if they are different. -Attacked prompt: b"As a question equivalence detection system, examine the provided questions and respond with 'equivalent' if they are the same in meaning, or 'not_equivalent' if they are different 5yWbBXztUY. " -Original acc: 87.40%, attacked acc: 87.10%, dropped acc: 0.30% - -Original prompt: Functioning as a question similarity evaluation tool, analyze the given questions and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Functioning as a question similarity evaluation tool, analyze the given questions and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent' Kw6nrs57gH. " -Original acc: 86.90%, attacked acc: 86.60%, dropped acc: 0.30% - -## deepwordbug - -Original prompt: Are the following two questions equivalent or not? Answer me with "equivalent" or "not_equivalent". -Attacked prompt: b'Aer the flolowing Qwo quesions equivalent or not? Aswer me with "equivalent" or "not_equivalent". ' -Original acc: 88.10%, attacked acc: 72.70%, dropped acc: 15.40% - -Original prompt: Can these two statements be considered equal in meaning? Answer with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Can these tow statYements be cnosidered qeual in maening? AnsweI with 'equivalent' or 'not_equivalent'. " -Original acc: 87.80%, attacked acc: 59.80%, dropped acc: 28.00% - -Original prompt: Determine if the given pair of statements can be considered the same by responding with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Determnie if the given paCr of statmeents can be gonsidered the same by respondxing with 'equivalent' or 'not_equivalent'. " -Original acc: 87.70%, attacked acc: 81.20%, dropped acc: 6.50% - -Original prompt: Acting as a question equivalence instrument, determine if the provided questions are equivalent in meaning, answering with 'equivalent' for similar questions or 'not_equivalent' for dissimilar ones. -Attacked prompt: b"ActRng as a question euqivalence instruemnt, determine if the proviDded questionJs are equivalent in meaning, anDswering with 'equivalent' for sitilar questions or 'not_equivalent' for uissimilar ones. " -Original acc: 87.70%, attacked acc: 82.10%, dropped acc: 5.60% - -Original prompt: As a question equivalence detection system, examine the provided questions and respond with 'equivalent' if they are the same in meaning, or 'not_equivalent' if they are different. -Attacked prompt: b"Ass a question equivalence detection system, examine the provided questions and respond with 'equivalent' if they are the same in meaning, or 'not_equivalent' if they are different. " -Original acc: 87.40%, attacked acc: 87.00%, dropped acc: 0.40% - -Original prompt: Functioning as a question similarity evaluation tool, analyze the given questions and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Functioning as a question smiilarity eavluation toXl, analyze the givYen questions and decidae if they share the same meajning, responding with 'equivalent' or 'not_equivalent'. " -Original acc: 86.90%, attacked acc: 81.40%, dropped acc: 5.50% - -## stresstest - -Original prompt: Are the following two questions equivalent or not? Answer me with "equivalent" or "not_equivalent". -Attacked prompt: b'Are the following two questions equivalent or not? Answer me with "equivalent" or "not_equivalent and false is not true ". ' -Original acc: 88.10%, attacked acc: 24.00%, dropped acc: 64.10% - -Original prompt: Can these two statements be considered equal in meaning? Answer with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Can these two statements be considered equal in meaning? Answer with 'equivalent' or 'not_equivalent' and true is true and true is true and true is true and true is true and true is true . " -Original acc: 87.80%, attacked acc: 80.10%, dropped acc: 7.70% - -Original prompt: Assess whether the following statements are identical in meaning by answering 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Assess whether the following statements are identical in meaning by answering 'equivalent' or 'not_equivalent' and true is true and true is true and true is true and true is true and true is true . " -Original acc: 87.70%, attacked acc: 82.80%, dropped acc: 4.90% - -Original prompt: Acting as a question equivalence instrument, determine if the provided questions are equivalent in meaning, answering with 'equivalent' for similar questions or 'not_equivalent' for dissimilar ones. -Attacked prompt: b"Acting as a question equivalence instrument, determine if the provided questions are equivalent in meaning, answering with 'equivalent' for similar questions or 'not_equivalent' for dissimilar ones and true is true and true is true and true is true and true is true and true is true . " -Original acc: 87.70%, attacked acc: 86.70%, dropped acc: 1.00% - -Original prompt: As a question equivalence detection system, examine the provided questions and respond with 'equivalent' if they are the same in meaning, or 'not_equivalent' if they are different. -Attacked prompt: b"As a question equivalence detection system, examine the provided questions and respond with 'equivalent' if they are the same in meaning, or 'not_equivalent' if they are different and false is not true . " -Original acc: 87.40%, attacked acc: 87.30%, dropped acc: 0.10% - -Original prompt: In the role of a question similarity analyzer, examine the meaning of the provided questions and decide if they are equivalent, answering with 'equivalent' for equivalent questions or 'not_equivalent' for different questions. -Attacked prompt: b"In the role of a question similarity analyzer, examine the meaning of the provided questions and decide if they are equivalent, answering with 'equivalent' for equivalent questions or 'not_equivalent' for different questions and true is true and true is true and true is true and true is true and true is true . " -Original acc: 86.90%, attacked acc: 86.40%, dropped acc: 0.50% - -## textbugger - -Original prompt: Are the following two questions equivalent or not? Answer me with "equivalent" or "not_equivalent". -Attacked prompt: b'Are the following t\xd4\x9do matters equivalent or not? Replying me with "equivalent" or "not_equivalent". ' -Original acc: 88.10%, attacked acc: 78.60%, dropped acc: 9.50% - -Original prompt: Can these two statements be considered equal in meaning? Answer with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Can these two st\xc9\x91tements be considred equal in meaning? Ans wer with 'equivalent' or 'not_equivalent'. " -Original acc: 87.80%, attacked acc: 82.90%, dropped acc: 4.90% - -Original prompt: Assess whether the following statements are identical in meaning by answering 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Assess lf the following statements are identical in meanng by responding 'equivalent' or 'not_equivalent'. " -Original acc: 87.70%, attacked acc: 84.90%, dropped acc: 2.80% - -Original prompt: Acting as a question equivalence instrument, determine if the provided questions are equivalent in meaning, answering with 'equivalent' for similar questions or 'not_equivalent' for dissimilar ones. -Attacked prompt: b"Actin\xc9\xa1 as a question equivale\xd5\xb8ce inst rument, determine if the prvided questions are equivalent in mean, answering with 'equivalent' for similar questions or 'not_equivalent' for d\xd1\x96ssimilar ones. " -Original acc: 87.70%, attacked acc: 82.50%, dropped acc: 5.20% - -Original prompt: As a question equivalence detection system, examine the provided questions and respond with 'equivalent' if they are the same in meaning, or 'not_equivalent' if they are different. -Attacked prompt: b"A\xd1\x95 a question equivalence detection system, examine the provided questions and replying with 'equivalent' if they are the same in meanings, or 'not_equivalent' if they are different. " -Original acc: 87.40%, attacked acc: 86.70%, dropped acc: 0.70% - -Original prompt: In the role of a question similarity analyzer, examine the meaning of the provided questions and decide if they are equivalent, answering with 'equivalent' for equivalent questions or 'not_equivalent' for different questions. -Attacked prompt: b"In the role of a question similarity analyzer, exaine the meaning of the provi\xd4\x81ed questions and decide if they are equivalent, answe\xe2\xb2\x85ing with 'equivalent' for equivalent questions or 'not_equivalent' for different questions. " -Original acc: 86.90%, attacked acc: 84.90%, dropped acc: 2.00% - -## textfooler - -Original prompt: Are the following two questions equivalent or not? Answer me with "equivalent" or "not_equivalent". -Attacked prompt: b'Represented the suite two trouble equivalent or not? Retaliated me with "equivalent" or "not_equivalent". ' -Original acc: 88.10%, attacked acc: 61.50%, dropped acc: 26.60% - -Original prompt: Can these two statements be considered equal in meaning? Answer with 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Can these two sermons is believed equivalent in feeling? Responses with 'equivalent' or 'not_equivalent'. " -Original acc: 87.80%, attacked acc: 72.20%, dropped acc: 15.60% - -Original prompt: Assess whether the following statements are identical in meaning by answering 'equivalent' or 'not_equivalent'. -Attacked prompt: b"Valuing whether the later languages are comparable in sinn by cope 'equivalent' or 'not_equivalent'. " -Original acc: 87.70%, attacked acc: 18.90%, dropped acc: 68.80% - -Original prompt: Acting as a question equivalence instrument, determine if the provided questions are equivalent in meaning, answering with 'equivalent' for similar questions or 'not_equivalent' for dissimilar ones. -Attacked prompt: b"Acting as a questions equivalence machinery, identify if the handed topic are equivalent in sensation, retaliated with 'equivalent' for equivalent question or 'not_equivalent' for sundry ones. " -Original acc: 87.70%, attacked acc: 76.40%, dropped acc: 11.30% - -Original prompt: As a question equivalence detection system, examine the provided questions and respond with 'equivalent' if they are the same in meaning, or 'not_equivalent' if they are different. -Attacked prompt: b"As a question equivalence identifies system, review the furnish topic and fulfill with 'equivalent' if they are the same in mean, or 'not_equivalent' if they are diversified. " -Original acc: 87.40%, attacked acc: 80.30%, dropped acc: 7.10% - -Original prompt: In the role of a question similarity analyzer, examine the meaning of the provided questions and decide if they are equivalent, answering with 'equivalent' for equivalent questions or 'not_equivalent' for different questions. -Attacked prompt: b"About the rol of a question likeness analyzer, examine the mean of the conferred topic and identifying if they are equivalent, retaliated with 'equivalent' for equivalent trouble or 'not_equivalent' for other topics. " -Original acc: 86.90%, attacked acc: 73.80%, dropped acc: 13.10% - -## translation - -Language: chinese, acc: 70.90%, prompt: As a tool for determining the equivalence of problems, the problems are examined and their similarity is classified as 'equivalent' or 'not_equivalent'. -Language: chinese, acc: 86.80%, prompt: Please compare the similarity of two sentences and put them into the category of 'equivalent' or 'not_equivalent'. -Language: chinese, acc: 87.60%, prompt: Two sentences are classified as 'equivalent' or 'not_equivalent' for their similarity. -Language: chinese, acc: 87.40%, prompt: You can determine how similar the questions are by comparing them and categorizing them as 'equivalent' or 'not_equivalent'. -Language: chinese, acc: 85.30%, prompt: Using the method of contrast, the similarity of these problems is divided into two categories: 'equivalent' or 'not_equivalent'. -Language: chinese, acc: 87.20%, prompt: By comparing these issues, you can classify them as 'equivalent' or 'not_equivalent'. -Language: chinese, acc: 86.90%, prompt: To determine whether the questions are similar, put them into the category of 'equivalent' or 'not_equivalent'. -Language: chinese, acc: 86.30%, prompt: Divide the similarity of these questions into 'equivalent' or 'not_equivalent' categories. -Language: chinese, acc: 87.00%, prompt: Using the similarity assessment tool, these questions were classified as 'equivalent' or 'not_equivalent'. -Language: chinese, acc: 86.80%, prompt: By analyzing the similarity of these problems, they are divided into categories of 'equivalent' or 'not_equivalent'. -Language: french, acc: 86.60%, prompt: As a tool to determine the equivalence of questions, review the questions and rank their similarity as 'equivalent' or 'not_equivalent'. -Language: french, acc: 86.90%, prompt: Please compare the similarity of two sentences and classify them as 'equivalent' or 'not_equivalent'. -Language: french, acc: 85.60%, prompt: Based on the similarity of two sentences, classify them as 'equivalent' or 'not_equivalent'. -Language: french, acc: 87.80%, prompt: You can determine the similarity between these questions by comparing them and classifying them as 'equivalent' or 'not_equivalent'. -Language: french, acc: 87.30%, prompt: Use a comparative method to divide the similarity of these questions into two categories: 'equivalent' or 'not_equivalent'. -Language: french, acc: 87.20%, prompt: By comparing these questions, you can classify them as 'equivalent' or 'not_equivalent'. -Language: french, acc: 87.70%, prompt: Determine whether these questions are similar or not, and then classify them as 'equivalent' or 'not_equivalent'. -Language: french, acc: 87.50%, prompt: Divide the similarity of these questions into two categories: 'equivalent' or 'not_equivalent'. -Language: french, acc: 86.50%, prompt: Use a similarity assessment tool to classify these questions as 'equivalent' or 'not_equivalent'. -Language: french, acc: 87.50%, prompt: By analyzing the similarity of these questions, you can divide them into two categories: 'equivalent' or 'not_equivalent'. -Language: arabic, acc: 86.00%, prompt: As a tool for determining an equation of questions, review the questions and classify their similarity as either 'equivalent' or 'not_equivalent'. -Language: arabic, acc: 87.70%, prompt: When using questions in the classification domain, please classify the similarity between the questions as 'equivalent' or 'not_equivalent'. -Language: arabic, acc: 85.10%, prompt: To determine an equation of questions, you must review the questions and classify their similarity as 'equivalent' or 'not_equivalent'. -Language: arabic, acc: 85.40%, prompt: Questions can be classified as 'equivalent' or 'not_equivalent' when used to identify classifications. -Language: arabic, acc: 85.20%, prompt: Classification of question similarity as 'equivalent' or 'not_equivalent' is used as a tool to determine the classification of questions. -Language: arabic, acc: 86.00%, prompt: Classify the similarity of the questions as 'equivalent' or 'not_equivalent' to determine the equation of the questions. -Language: arabic, acc: 86.00%, prompt: Identifying the similarity of questions and classifying them as 'equivalent' or 'not_equivalent' is an important tool in determining the classification of questions. -Language: arabic, acc: 86.10%, prompt: When classifying questions, their similarity can be classified as 'equivalent' or 'not_equivalent' to determine the correct classification. -Language: arabic, acc: 85.80%, prompt: The similarity of questions should be classified as 'equivalent' or 'not_equivalent' when used to determine the equation of questions. -Language: arabic, acc: 85.60%, prompt: Identifying the similarity of questions and classifying them as 'equivalent' or 'not_equivalent' helps to correctly classify questions. -Language: spanish, acc: 87.00%, prompt: As a tool to determine the equivalence of questions, it reviews the questions and classifies their similarity as 'equivalent' or 'not_equivalent'. -Language: spanish, acc: 86.60%, prompt: Evaluate the similarity between questions and classify them as 'equivalent' or 'not_equivalent' to determine their equivalence. -Language: spanish, acc: 86.90%, prompt: Determine whether two questions are 'equivalent' or 'not_equivalent' based on similarity and characteristics. -Language: spanish, acc: 86.40%, prompt: Classifies the similarity between questions as 'equivalent' or 'not_equivalent' to determine their equivalence. -Language: spanish, acc: 87.50%, prompt: Review the questions and rate them as 'equivalent' or 'not_equivalent' based on their similarity and content. -Language: spanish, acc: 87.20%, prompt: As part of the classification task of questions, it determines their equivalence by categorizing their similarity as 'equivalent' or 'not_equivalent'. -Language: spanish, acc: 86.70%, prompt: Analyze the similarity between questions and classify them as 'equivalent' or 'not_equivalent' to determine their equivalence. -Language: spanish, acc: 86.80%, prompt: As a method of identifying the equivalence of questions, it categorizes their similarity as 'equivalent' or 'not_equivalent'. -Language: spanish, acc: 87.00%, prompt: To determine the equivalence between questions, check their similarity and classify them as 'equivalent' or 'not_equivalent'. -Language: spanish, acc: 86.10%, prompt: Classify the similarity between questions as 'equivalent' or 'not_equivalent' to determine whether they are equivalent or not. -Language: japanese, acc: 85.90%, prompt: As a tool to determine the equivalence of the question, review the question and categorize its similarities into 'equivalent' or 'not_equivalent' categories. -Language: japanese, acc: 86.70%, prompt: Work on text sorting tasks labeled 'equivalent' or 'not_equivalent'. -Language: japanese, acc: 85.10%, prompt: For text classification tasks, use the labels 'equivalent' or 'not_equivalent' to determine the equivalence of statements. -Language: japanese, acc: 85.50%, prompt: In the MRPC dataset, use the labels 'equivalent' or 'not_equivalent' to classify the equivalence of statements. -Language: japanese, acc: 80.60%, prompt: As a tool for determining equivalence, check sentences and categorize them into 'equivalent' or 'not_equivalent' categories. -Language: japanese, acc: 86.10%, prompt: Use the labels 'equivalent' or 'not_equivalent' to determine the equivalence of statements in text classification tasks. -Language: japanese, acc: 84.00%, prompt: In the text classification task of the MRPC data set, classify the equivalence of statements with labels of 'equivalent' or 'not_equivalent'. -Language: japanese, acc: 84.70%, prompt: As a tool to determine the equivalence of statements, categorize statements into 'equivalent' or 'not_equivalent' categories. -Language: japanese, acc: 84.30%, prompt: In a text classification task, classify the equivalence of statements using labels of 'equivalent' or 'not_equivalent'. -Language: japanese, acc: 85.40%, prompt: Do a text classification task to determine the equivalence of statements, labeled 'equivalent' or 'not_equivalent'. -Language: korean, acc: 82.40%, prompt: Classify two given sentences as 'equivalent' or 'not_equivalent' by discriminating whether they have the same meaning. -Language: korean, acc: 86.70%, prompt: Determine sentence equivalence by judging the similarity of two sentences with 'equivalent' or 'not_equivalent'. -Language: korean, acc: 86.80%, prompt: Classify the similarity of sentences as 'equivalent' or 'not_equivalent' by judging whether two sentences have the same meaning. -Language: korean, acc: 86.80%, prompt: Determine if two given sentences are equivalent to each other, and classify their similarity as 'equivalent' or 'not_equivalent'. -Language: korean, acc: 86.70%, prompt: Compare two given sentences to determine sentence equivalence, and classify their similarities as 'equivalent' or 'not_equivalent'. -Language: korean, acc: 86.20%, prompt: Classify sentence equivalence as 'equivalent' or 'not_equivalent' by judging whether two sentences have the same meaning to each other. -Language: korean, acc: 86.50%, prompt: Determine if two sentences have the same meaning, and classify their similarities as 'equivalent' or 'not_equivalent'. -Language: korean, acc: 86.60%, prompt: Compare two given sentences to determine their equivalence, and classify their similarities as 'equivalent' or 'not_equivalent'. -Language: korean, acc: 86.50%, prompt: Review two sentences to evaluate sentence equivalence, and classify their similarities as 'equivalent' or 'not_equivalent'. -Language: korean, acc: 85.50%, prompt: Judge whether two sentences have the same meaning to each other, and determine the sentence equivalence with 'equivalent' or 'not_equivalent'. - -# rte - -## 10 prompts - -Acc: 86.64%, prompt: Are the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one word. -Acc: 86.64%, prompt: Analyze the two provided sentences and decide if their relationship is 'entailment' or 'not_entailment'. -Acc: 86.64%, prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'. -Acc: 86.28%, prompt: Review the two statements and categorize their relationship as either 'entailment' or 'not_entailment'. -Acc: 85.20%, prompt: Does the relationship between the given sentences represent entailment or not_entailment? Respond with 'entailment' or 'not_entailment'. -Acc: 85.20%, prompt: Assess the connection between the following sentences and classify it as 'entailment' or 'not_entailment'. -Acc: 85.20%, prompt: Please classify the relationship between the provided sentences as 'entailment' or 'not_entailment'. -Acc: 85.20%, prompt: Considering the two sentences, identify if their relationship is 'entailment' or 'not_entailment'. -Acc: 84.84%, prompt: Identify whether the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'. -Acc: 84.12%, prompt: Examine the pair of sentences and determine if they exhibit entailment or not_entailment. Answer with either 'entailment' or 'not_entailment'. -Acc: 84.12%, prompt: Determine if the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'. - -Acc: 85.56%, prompt: As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment': -Acc: 84.84%, prompt: While performing entailment analysis, classify the relationship between the provided sentences as 'entailment' or 'not_entailment': -Acc: 84.84%, prompt: In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment': -Acc: 84.48%, prompt: In your role as an entailment analysis tool, assess the relationship between the given sentences and classify it as 'entailment' or 'not_entailment': -Acc: 84.48%, prompt: Functioning as an entailment evaluation tool, analyze the provided sentences and decide if their relationship is 'entailment' or 'not_entailment': -Acc: 84.48%, prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment': -Acc: 84.12%, prompt: As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment': -Acc: 84.12%, prompt: As a tool for determining entailment relationships, review the two statements and categorize their connection as either 'entailment' or 'not_entailment': -Acc: 83.03%, prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment': -Acc: 82.31%, prompt: In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment or not_entailment. Answer with 'entailment' or 'not_entailment': -O - -## bertattack - -Original prompt: Are the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one word. -Attacked prompt: b'be the following deux verbs entailment or not_entailment? asking me with "entailment" or "not_entailment", just one word. ' -Original acc: 86.64%, attacked acc: 81.59%, dropped acc: 5.05% - -Original prompt: Analyze the two provided sentences and decide if their relationship is 'entailment' or 'not_entailment'. -Attacked prompt: b"analyze the similar generated sentences and check if their relationship is 'entailment' or 'not_entailment'." -Original acc: 86.64%, attacked acc: 82.67%, dropped acc: 3.97% - -Original prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'. -Attacked prompt: b"Indicate if the connection between the following verbs is 'entailment' or 'not_entailment'." -Original acc: 86.64%, attacked acc: 82.67%, dropped acc: 3.97% - -Original prompt: As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment': -Attacked prompt: b"given an instrument for entailment evaluation, define the a sentences and ask if their communication is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment':" -Original acc: 85.56%, attacked acc: 81.95%, dropped acc: 3.61% - -Original prompt: While performing entailment analysis, classify the relationship between the provided sentences as 'entailment' or 'not_entailment': -Attacked prompt: b"While performing entailment analysis, construct the relationship between the and sentence as 'entailment' or 'not_entailment':" -Original acc: 84.84%, attacked acc: 78.70%, dropped acc: 6.14% - -Original prompt: In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment': -Attacked prompt: b"but the capability of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment':" -Original acc: 84.84%, attacked acc: 83.39%, dropped acc: 1.44% - -## checklist - -Original prompt: Are the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one word. -Attacked prompt: b'Are the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one word OB4KVJzIft. ' -Original acc: 86.64%, attacked acc: 83.75%, dropped acc: 2.89% - -Original prompt: Analyze the two provided sentences and decide if their relationship is 'entailment' or 'not_entailment'. -Attacked prompt: b"Analyze the two provided sentences and decide if their relationship is 'entailment' or 'not_entailment' BWmsLx8xOA." -Original acc: 86.64%, attacked acc: 84.48%, dropped acc: 2.17% - -Original prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'. -Attacked prompt: b"Indicate if the connection between the following sentences is 'entailment' or 'not_entailment' vTAjHynoIG." -Original acc: 86.64%, attacked acc: 83.39%, dropped acc: 3.25% - -Original prompt: As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment': -Attacked prompt: b"As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment' XStsD2pUzu:" -Original acc: 85.56%, attacked acc: 84.48%, dropped acc: 1.08% - -Original prompt: While performing entailment analysis, classify the relationship between the provided sentences as 'entailment' or 'not_entailment': -Attacked prompt: b"While performing entailment analysis, classify the relationship between the provided sentences as 'entailment' or 'not_entailment' CuVpbbkC4c:" -Original acc: 84.84%, attacked acc: 83.75%, dropped acc: 1.08% - -Original prompt: In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment': -Attacked prompt: b"In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment' 6a4Yn3RGVc:" -Original acc: 84.84%, attacked acc: 83.75%, dropped acc: 1.08% - -## deepwordbug - -Original prompt: Are the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one word. -Attacked prompt: b'Aer the following two Ysentences entailment or not_entailment? nswer me with "entailment" or "not_entailment", just one word. ' -Original acc: 86.64%, attacked acc: 81.95%, dropped acc: 4.69% - -Original prompt: Analyze the two provided sentences and decide if their relationship is 'entailment' or 'not_entailment'. -Attacked prompt: b"Analyz the twC provided sentences and decidS if their relationsAhip is 'entailment' or 'not_entailment'." -Original acc: 86.64%, attacked acc: 82.31%, dropped acc: 4.33% - -Original prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'. -Attacked prompt: b"Indicat if the connection between the following seRtences is 'entailment' or 'not_entailment'." -Original acc: 86.64%, attacked acc: 83.75%, dropped acc: 2.89% - -Original prompt: As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment': -Attacked prompt: b"As an instrument for entailment evaluation, consider the wo sentences and determine if their relationship is 'entailment' or 'not_entailment'. Resphond with 'entailment' or 'not_entailment':" -Original acc: 85.56%, attacked acc: 83.75%, dropped acc: 1.81% - -Original prompt: While performing entailment analysis, classify the relationship between the provided sentences as 'entailment' or 'not_entailment': -Attacked prompt: b"While performing entailment analysis, cassify the reiationship between the proviLded sentences as 'entailment' or 'not_entailment':" -Original acc: 84.84%, attacked acc: 82.67%, dropped acc: 2.17% - -Original prompt: In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment': -Attacked prompt: b"In the capacity of an entailment assessment systev, indicate if the link between the following entences is 'entailment' or 'not_entailment':" -Original acc: 84.84%, attacked acc: 83.39%, dropped acc: 1.44% - -## stresstest - -Original prompt: Are the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one word. -Attacked prompt: b'Are the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one word and true is true and true is true and true is true and true is true and true is true . ' -Original acc: 86.64%, attacked acc: 86.28%, dropped acc: 0.36% - -Original prompt: Analyze the two provided sentences and decide if their relationship is 'entailment' or 'not_entailment'. -Attacked prompt: b"Analyze the two provided sentences and decide if their relationship is 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true ." -Original acc: 86.64%, attacked acc: 43.32%, dropped acc: 43.32% - -Original prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'. -Attacked prompt: b"Indicate if the connection between the following sentences is 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true ." -Original acc: 86.64%, attacked acc: 43.68%, dropped acc: 42.96% - -Original prompt: As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment': -Attacked prompt: b"As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true :" -Original acc: 85.56%, attacked acc: 86.28%, dropped acc: -0.72% - -Original prompt: While performing entailment analysis, classify the relationship between the provided sentences as 'entailment' or 'not_entailment': -Attacked prompt: b"While performing entailment analysis, classify the relationship between the provided sentences as 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true :" -Original acc: 84.84%, attacked acc: 71.48%, dropped acc: 13.36% - -Original prompt: In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment': -Attacked prompt: b"In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true :" -Original acc: 84.84%, attacked acc: 38.27%, dropped acc: 46.57% - -## textbugger - -Original prompt: Are the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one word. -Attacked prompt: b'Ar\xd0\xb5 the following to sentences entailment or not_entailment? Respond me with "entailment" or "not_entailment", just one word. ' -Original acc: 86.64%, attacked acc: 82.31%, dropped acc: 4.33% - -Original prompt: Analyze the two provided sentences and decide if their relationship is 'entailment' or 'not_entailment'. -Attacked prompt: b"Analyze the two provided snetences and decided if their relations is 'entailment' or 'not_entailment'." -Original acc: 86.64%, attacked acc: 83.39%, dropped acc: 3.25% - -Original prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'. -Attacked prompt: b"Indicate if the connection between the f\xd0\xbellowing sentence is 'entailment' or 'not_entailment'." -Original acc: 86.64%, attacked acc: 84.12%, dropped acc: 2.53% - -Original prompt: As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment': -Attacked prompt: b"As an instrument for entailment evaluation, consider the t wo sentences and determine if their relation\xd1\x95hip is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment':" -Original acc: 85.56%, attacked acc: 82.67%, dropped acc: 2.89% - -Original prompt: While performing entailment analysis, classify the relationship between the provided sentences as 'entailment' or 'not_entailment': -Attacked prompt: b"While performing entailment analyse, calssify the rapport between the provided sentences as 'entailment' or 'not_entailment':" -Original acc: 84.84%, attacked acc: 82.67%, dropped acc: 2.17% - -Original prompt: In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment': -Attacked prompt: b"I\xd5\xb8 the abilities of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment':" -Original acc: 84.84%, attacked acc: 83.39%, dropped acc: 1.44% - -## textfooler - -Original prompt: Are the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one word. -Attacked prompt: b'Be the following two sentences entailment or not_entailment? Retaliated me with "entailment" or "not_entailment", just one phrase. ' -Original acc: 86.64%, attacked acc: 81.95%, dropped acc: 4.69% - -Original prompt: Analyze the two provided sentences and decide if their relationship is 'entailment' or 'not_entailment'. -Attacked prompt: b"Analysis the two provided convictions and decided if their relations is 'entailment' or 'not_entailment'." -Original acc: 86.64%, attacked acc: 82.67%, dropped acc: 3.97% - -Original prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'. -Attacked prompt: b"Portrays if the login between the following punishments is 'entailment' or 'not_entailment'." -Original acc: 86.64%, attacked acc: 80.87%, dropped acc: 5.78% - -Original prompt: As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment': -Attacked prompt: b"As an instrument for entailment evaluation, inspect the two sentences and determine if their affairs is 'entailment' or 'not_entailment'. React with 'entailment' or 'not_entailment':" -Original acc: 85.56%, attacked acc: 82.67%, dropped acc: 2.89% - -Original prompt: While performing entailment analysis, classify the relationship between the provided sentences as 'entailment' or 'not_entailment': -Attacked prompt: b"While performing entailment analysis, hierarchy the coitus between the brought sentence as 'entailment' or 'not_entailment':" -Original acc: 84.84%, attacked acc: 80.51%, dropped acc: 4.33% - -Original prompt: In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment': -Attacked prompt: b"About the capacity of an entailment weighed system, specified if the link between the following phrases is 'entailment' or 'not_entailment':" -Original acc: 84.84%, attacked acc: 79.06%, dropped acc: 5.78% - -## translation - -Language: chinese, acc: 84.84%, prompt: In the light of an implication analysis tool, evaluate the relationship between the following sentences and classify them as 'entailment' or 'not_entailment'. -Language: chinese, acc: 84.48%, prompt: From the perspective of an implication analysis tool, determine whether there is an implication relationship in the following sentences by classifying them as 'entailment' or 'not_entailment'. -Language: chinese, acc: 84.84%, prompt: Please use an implication analysis tool to determine whether an implication relationship exists in the following sentences by classifying them as 'entailment' or 'not_entailment'. -Language: chinese, acc: 83.39%, prompt: Please evaluate the relation of the following sentences as 'entailment' or 'not_entailment' for the purpose of determining implication relation. -Language: chinese, acc: 85.20%, prompt: Please use the implication analysis tool to evaluate the relationships between the following sentences and classify them as 'entailment' or 'not_entailment'. -Language: chinese, acc: 83.39%, prompt: For the purpose of determining implicative relations, analyze the relations of the following sentences and classify them as 'entailment' or 'not_entailment'. -Language: chinese, acc: 84.84%, prompt: Please use the implication analysis tool to determine the relationship of the following sentences and classify them as 'entailment' or 'not_entailment'. -Language: chinese, acc: 85.56%, prompt: Please use the implication judgment tool to assess the relevance of the following sentences and classify them as 'entailment' or 'not_entailment'. -Language: chinese, acc: 85.20%, prompt: Please, with implication analysis as the main task, determine the relationships between the following sentences and classify them as 'entailment' or 'not_entailment'. -Language: chinese, acc: 84.12%, prompt: Using the implication judgment as a criterion, analyze the relation of the following sentences and classify them as 'entailment' or 'not_entailment'. -Language: french, acc: 84.48%, prompt: As an engagement analysis tool, evaluate the relationship between the given sentences and classify it as 'entailment' or 'not_entailment'. -Language: french, acc: 84.48%, prompt: Determine whether the given sentences involve one another or not as an implication analysis tool. Classify them accordingly as 'entailment' or 'not_entailment'. -Language: french, acc: 84.84%, prompt: Using implication analysis, evaluate whether the sentences provided have a logical relationship and categorize them as 'entailment' or 'not_entailment'. -Language: french, acc: 85.56%, prompt: As an engagement assessment tool, determine whether the sentences provided have a logical relationship and classify them as 'entailment' or 'not_entailment'. -Language: french, acc: 84.48%, prompt: As an implication classification tool, analyze the sentences provided to determine if there is a logical relationship and categorize them as 'entailment' or 'not_entailment'. -Language: french, acc: 85.92%, prompt: Using implication analysis, determine whether the given sentences have a cause-effect relationship and categorize them as 'entailment' or 'not_entailment'. -Language: french, acc: 84.12%, prompt: Evaluate the relationship between the given sentences using implication analysis and rank them accordingly as 'entailment' or 'not_entailment'. -Language: french, acc: 85.20%, prompt: As an engagement detection tool, determine whether the given sentences have a logical relationship and categorize them as 'entailment' or 'not_entailment'. -Language: french, acc: 85.92%, prompt: Using implication analysis, evaluate whether the sentences provided have a cause-effect relationship and rank them accordingly as 'entailment' or 'not_entailment'. -Language: french, acc: 85.56%, prompt: Determine whether the given sentences have a cause-effect relationship as an engagement analysis tool and categorize them as 'entailment' or 'not_entailment'. -Language: arabic, acc: 84.12%, prompt: In your role as a tool for reasoning analysis, evaluate the relationship between given sentences and classify them as 'entailment' or 'not_entailment'. -Language: arabic, acc: 86.64%, prompt: Can you determine whether this sentence is inferred from the other sentence? Classify it as 'entailment' or 'not_entailment'. -Language: arabic, acc: 83.75%, prompt: Using the tool of reasoning analysis, analyze the relationship between given sentences and classify them as 'entailment' or 'not_entailment'. -Language: arabic, acc: 87.36%, prompt: Does this sentence represent a conclusion from the previous sentence? Classify it as 'entailment' or 'not_entailment'. -Language: arabic, acc: 84.48%, prompt: As a tool of reasoning analysis, evaluate the relationship of given sentences and classify them as 'entailment' or 'not_entailment'. -Language: arabic, acc: 86.64%, prompt: Can this sentence be inferred from the previous sentence? Classify it as 'entailment' or 'not_entailment'. -Language: arabic, acc: 85.20%, prompt: Using a tool to analyze a conclusion, analyze the relationship between the two sentences and classify them as 'entailment' or 'not_entailment'. -Language: arabic, acc: 87.00%, prompt: Is this a conclusion from the next sentence? Classify it as 'entailment' or 'not_entailment'. -Language: arabic, acc: 84.84%, prompt: As part of your task in analyzing a conclusion, evaluate the relationship between the two sentences and classify them as 'entailment' or 'not_entailment' based on their relationship. -Language: arabic, acc: 84.84%, prompt: Are you following this sentence directly from the previous one? Classify it as 'entailment' or 'not_entailment'. -Language: spanish, acc: 83.75%, prompt: In your role as an implication analysis tool, evaluate the relationship between the given phrases and classify them as 'entailment' or 'not_entailment'. -Language: spanish, acc: 86.28%, prompt: Determine whether the second sentence necessarily implies the first and label the relation as 'entailment', or as 'not_entailment' if not. -Language: spanish, acc: 85.20%, prompt: Classifies the relationship between these two sentences as 'entailment' if one necessarily implies the other, or as 'not_entailment' if not. -Language: spanish, acc: 87.73%, prompt: Evaluates whether the information in the second sentence is implied in the first and labels the relationship as 'entailment', or as 'not_entailment' if there is no such implication. -Language: spanish, acc: 84.48%, prompt: Given a couple of phrases, label their relationship as 'entailment' if one necessarily implies the other, or as 'not_entailment' if there is no such implication. -Language: spanish, acc: 84.84%, prompt: Analyzes the relationship between the phrases and classifies them as 'entailment' if one necessarily implies the other, or as 'not_entailment' if there is no such implication. -Language: spanish, acc: 87.73%, prompt: Given two sentences, determine whether the second sentence is a necessary consequence of the first and label the relation as 'entailment', or as 'not_entailment' if not. -Language: spanish, acc: 88.09%, prompt: Evaluates whether the information presented in the second sentence is implicit in the first and labels the relationship as 'entailment', or as 'not_entailment' if there is no such implication. -Language: spanish, acc: 82.67%, prompt: Classifies the relationship between the given phrases as 'entailment' if one necessarily implies the other, or as 'not_entailment' if there is no such implication. -Language: spanish, acc: 86.64%, prompt: Determines whether the information provided in the second sentence is necessarily inferable from the first and labels the relationship as 'entailment', or as 'not_entailment' if not. -Language: japanese, acc: 82.31%, prompt: Analyze the relationship of a given sentence and classify it as 'entailment' or 'not_entailment'. -Language: japanese, acc: 81.59%, prompt: Evaluate the semantic relationship of the sentence and classify it as 'entailment' or 'not_entailment'. -Language: japanese, acc: 84.84%, prompt: Please judge the relationship between the given sentences and classify them as 'entailment' or 'not_entailment'. -Language: japanese, acc: 80.87%, prompt: Examine the paraphrases of a given sentence and classify them 'entailment' or 'not_entailment'. -Language: japanese, acc: 81.59%, prompt: Rate the similarity of a given sentence and categorize it as 'entailment' or 'not_entailment'. -Language: japanese, acc: 80.51%, prompt: Determinate the semantic connections of a given sentence and classify it as 'entailment' or 'not_entailment'. -Language: japanese, acc: 81.95%, prompt: Examine the semantic match of a given sentence and categorize it as 'entailment' or 'not_entailment'. -Language: japanese, acc: 85.56%, prompt: Classify it as 'entailment' or 'not_entailment' based on the content of the sentence. -Language: japanese, acc: 82.31%, prompt: Analyze the relationship of a given sentence and classify it as 'entailment' or 'not_entailment'. -Language: japanese, acc: 81.23%, prompt: Judge the semantic connections of a given sentence and categorize it as 'entailment' or 'not_entailment'. -Language: korean, acc: 83.75%, prompt: Evaluate the relationship between any two sentences given to you and classify you as 'entailment' or 'not_entailment'. -Language: korean, acc: 80.87%, prompt: Analyze the semantic deductive relations between sentences and classify them as 'entailment' or 'not_entailment'. -Language: korean, acc: 84.48%, prompt: Evaluate the logical relevance between sentences and classify them as 'entailment' or 'not_entailment'. -Language: korean, acc: 85.56%, prompt: Evaluate the interaction of two given sentences and classify them as 'entailment' or 'not_entailment'. -Language: korean, acc: 83.75%, prompt: Please check whether there is a semantic match between those two sentences and classify them as 'entailment' or 'not_entailment'. -Language: korean, acc: 85.20%, prompt: Compare information between given sentences and classify them as 'entailment' or 'not_entailment'. -Language: korean, acc: 86.28%, prompt: Please analyse the correlation between those two sentences and classify them as 'entailment' or 'not_entailment'. -Language: korean, acc: 85.20%, prompt: Evaluate the different meanings between given sentences and classify them as 'entailment' or 'not_entailment'. -Language: korean, acc: 82.31%, prompt: Compare the semantic structure of the two sentences and classify them as 'entailment' or 'not_entailment'. -Language: korean, acc: 83.39%, prompt: Evaluate the interactions between sentences and classify them as 'entailment' or 'not_entailment'. - -# sst2 - -## 10 prompts - -Acc: 95.07%, prompt: Evaluate the sentiment of the given text and classify it as 'positive' or 'negative': -Acc: 94.95%, prompt: Given the context of this text, indicate if the emotion conveyed is 'positive' or 'negative': -Acc: 94.84%, prompt: Analyze the tone of this statement and respond with either 'positive' or 'negative': -Acc: 94.84%, prompt: Review this statement and decide whether it has a 'positive' or 'negative' sentiment: -Acc: 94.72%, prompt: Assess the mood of the following quote and determine if it's 'positive' or 'negative': -Acc: 94.72%, prompt: Considering the given phrase, would you say it carries a 'positive' or 'negative' connotation? -Acc: 94.50%, prompt: Please identify the emotional tone of this passage: 'positive' or 'negative'? -Acc: 94.50%, prompt: Read the provided excerpt and choose between 'positive' and 'negative' to describe its sentiment: -Acc: 94.38%, prompt: Determine the overall sentiment of this sentence, categorizing it as 'positive' or 'negative': -Acc: 93.12%, prompt: After examining the following expression, label its emotion as either 'positive' or 'negative': - -Acc: 95.41%, prompt: In the role of a sentiment analysis tool, respond with 'positive' or 'negative' to classify this statement: -Acc: 95.30%, prompt: Functioning as a sentiment identification tool, assess if the following expression is 'positive' or 'negative'. Classify: -Acc: 95.30%, prompt: Serving as a sentiment evaluation model, determine if the given statement is 'positive' or 'negative'. Classify: -Acc: 95.07%, prompt: Working as a sentiment analyzer, please indicate if the following text is 'positive' or 'negative'. Classify: -Acc: 94.84%, prompt: As a sentiment classifier, determine whether the following text is 'positive' or 'negative'. Please classify: -Acc: 94.84%, prompt: Acting as a sentiment evaluator, identify if the given sentence is 'positive' or 'negative'. Classify: -Acc: 94.72%, prompt: As an emotion detector, determine if the provided passage conveys a 'positive' or 'negative' sentiment. Classify: -Acc: 94.72%, prompt: Emulating a sentiment classification system, indicate whether the provided text is 'positive' or 'negative'. Classify: -Acc: 94.27%, prompt: In the capacity of a sentiment classifier, decide whether the given quote is 'positive' or 'negative'. Classify: -Acc: 93.81%, prompt: Taking on the role of an emotion classifier, specify if the provided phrase is 'positive' or 'negative'. Classify: - -## bertattack - -Original prompt: Evaluate the sentiment of the given text and classify it as 'positive' or 'negative': -Attacked prompt: b"the the sentiment of the given text and label it as 'positive' or 'negative': " -Original acc: 95.07%, attacked acc: 94.04%, dropped acc: 1.03% - -Original prompt: Given the context of this text, indicate if the emotion conveyed is 'positive' or 'negative': -Attacked prompt: b"give the context of this context, and if the emotion recorded is 'positive' or 'negative': " -Original acc: 94.95%, attacked acc: 92.89%, dropped acc: 2.06% - -Original prompt: Analyze the tone of this statement and respond with either 'positive' or 'negative': -Attacked prompt: b"she the tone of this statement and response with either 'positive' or 'negative': " -Original acc: 94.84%, attacked acc: 93.81%, dropped acc: 1.03% - -Original prompt: In the role of a sentiment analysis tool, respond with 'positive' or 'negative' to classify this statement: -Attacked prompt: b"how the role of a compliment analysis tool, responses with 'positive' or 'negative' to mood this statement: " -Original acc: 95.41%, attacked acc: 89.79%, dropped acc: 5.62% - -Original prompt: Functioning as a sentiment identification tool, assess if the following expression is 'positive' or 'negative'. Classify: -Attacked prompt: b"Functioning as a compliment identification tool, be if the following activity is 'positive' or 'negative'. Classify: " -Original acc: 95.30%, attacked acc: 91.86%, dropped acc: 3.44% - -Original prompt: Serving as a sentiment evaluation model, determine if the given statement is 'positive' or 'negative'. Classify: -Attacked prompt: b"for as a sentiment activation model, determine if the given words is 'positive' or 'negative'. Classify: " -Original acc: 95.30%, attacked acc: 91.17%, dropped acc: 4.13% - -## checklist - -Original prompt: Evaluate the sentiment of the given text and classify it as 'positive' or 'negative': -Attacked prompt: b"Evaluate the sentiment of the given text and classify it as 'positive' or 'negative' 5yWbBXztUY: " -Original acc: 95.07%, attacked acc: 94.04%, dropped acc: 1.03% - -Original prompt: Given the context of this text, indicate if the emotion conveyed is 'positive' or 'negative': -Attacked prompt: b"Given the context of this text, indicate if the emotion conveyed is 'positive' or 'negative' LkF0FZxMZ4: " -Original acc: 94.95%, attacked acc: 93.58%, dropped acc: 1.38% - -Original prompt: Analyze the tone of this statement and respond with either 'positive' or 'negative': -Attacked prompt: b"Analyze the tone of this statement and respond with either 'positive' or 'negative' Cev0s49fbe: " -Original acc: 94.84%, attacked acc: 94.15%, dropped acc: 0.69% - -Original prompt: In the role of a sentiment analysis tool, respond with 'positive' or 'negative' to classify this statement: -Attacked prompt: b"In the role of a sentiment analysis tool, respond with 'positive' or 'negative' to classify this statement Pbl3tYuoRo: " -Original acc: 95.41%, attacked acc: 94.72%, dropped acc: 0.69% - -Original prompt: Functioning as a sentiment identification tool, assess if the following expression is 'positive' or 'negative'. Classify: -Attacked prompt: b"Functioning as a sentiment identification tool, assess if the following expression is 'positive' or 'negative'. Classify ItGDrrA1Sc: " -Original acc: 95.30%, attacked acc: 94.15%, dropped acc: 1.15% - -Original prompt: Serving as a sentiment evaluation model, determine if the given statement is 'positive' or 'negative'. Classify: -Attacked prompt: b"Serving as a sentiment evaluation model, determine if the given statement is 'positive' or 'negative'. Classify SFPCqpiClT: " -Original acc: 95.30%, attacked acc: 94.61%, dropped acc: 0.69% - -## deepwordbug - -Original prompt: Evaluate the sentiment of the given text and classify it as 'positive' or 'negative': -Attacked prompt: b"EvaYuate the sSentiment of the given text and classify it as 'positive' or 'negative': " -Original acc: 95.07%, attacked acc: 94.38%, dropped acc: 0.69% - -Original prompt: Given the context of this text, indicate if the emotion conveyed is 'positive' or 'negative': -Attacked prompt: b"Given the context of this text, indiacte if the emotion couveyed is 'positive' or 'negative': " -Original acc: 94.95%, attacked acc: 94.27%, dropped acc: 0.69% - -Original prompt: Analyze the tone of this statement and respond with either 'positive' or 'negative': -Attacked prompt: b"Analze the tone of this sattement and respond with either 'positive' or 'negative': " -Original acc: 94.84%, attacked acc: 94.04%, dropped acc: 0.80% - -Original prompt: In the role of a sentiment analysis tool, respond with 'positive' or 'negative' to classify this statement: -Attacked prompt: b"ID the role of a Zentiment analysis tIol, respond with 'positive' or 'negative' to classiyf this statement: " -Original acc: 95.41%, attacked acc: 94.04%, dropped acc: 1.38% - -Original prompt: Functioning as a sentiment identification tool, assess if the following expression is 'positive' or 'negative'. Classify: -Attacked prompt: b"Functionino as a sentBiment identification tool, asses if the follohwing exproession is 'positive' or 'negative'. Classify: " -Original acc: 95.30%, attacked acc: 73.97%, dropped acc: 21.33% - -Original prompt: Serving as a sentiment evaluation model, determine if the given statement is 'positive' or 'negative'. Classify: -Attacked prompt: b"Servign as a sentimBnt envaluation model, Qetermine if the Iiven statemen is 'positive' or 'negative'. Classhfy: " -Original acc: 95.30%, attacked acc: 64.91%, dropped acc: 30.39% - -## stresstest - -Original prompt: Evaluate the sentiment of the given text and classify it as 'positive' or 'negative': -Attacked prompt: b"Evaluate the sentiment of the given text and classify it as 'positive' or 'negative' and true is true and true is true and true is true and true is true and true is true : " -Original acc: 95.07%, attacked acc: 94.15%, dropped acc: 0.92% - -Original prompt: Given the context of this text, indicate if the emotion conveyed is 'positive' or 'negative': -Attacked prompt: b"Given the context of this text, indicate if the emotion conveyed is 'positive' or 'negative' and false is not true : " -Original acc: 94.95%, attacked acc: 93.69%, dropped acc: 1.26% - -Original prompt: Analyze the tone of this statement and respond with either 'positive' or 'negative': -Attacked prompt: b"Analyze the tone of this statement and respond with either 'positive' or 'negative' and true is true and true is true and true is true and true is true and true is true : " -Original acc: 94.84%, attacked acc: 92.43%, dropped acc: 2.41% - -Original prompt: In the role of a sentiment analysis tool, respond with 'positive' or 'negative' to classify this statement: -Attacked prompt: b"In the role of a sentiment analysis tool, respond with 'positive' or 'negative' to classify this statement and false is not true : " -Original acc: 95.41%, attacked acc: 94.72%, dropped acc: 0.69% - -Original prompt: Functioning as a sentiment identification tool, assess if the following expression is 'positive' or 'negative'. Classify: -Attacked prompt: b"Functioning as a sentiment identification tool, assess if the following expression is 'positive' or 'negative'. Classify and false is not true : " -Original acc: 95.30%, attacked acc: 93.81%, dropped acc: 1.49% - -Original prompt: Serving as a sentiment evaluation model, determine if the given statement is 'positive' or 'negative'. Classify: -Attacked prompt: b"Serving as a sentiment evaluation model, determine if the given statement is 'positive' or 'negative'. Classify and false is not true : " -Original acc: 95.30%, attacked acc: 94.72%, dropped acc: 0.57% - -## textbugger - -Original prompt: Evaluate the sentiment of the given text and classify it as 'positive' or 'negative': -Attacked prompt: b"Assess the sentiment of the awarded texts and classify it as 'positive' or 'negative': " -Original acc: 95.07%, attacked acc: 94.27%, dropped acc: 0.80% - -Original prompt: Given the context of this text, indicate if the emotion conveyed is 'positive' or 'negative': -Attacked prompt: b"Give\xd5\xb8 the background of this text, depicts if the emotions transmitted is 'positive' or 'negative': " -Original acc: 94.95%, attacked acc: 76.26%, dropped acc: 18.69% - -Original prompt: Analyze the tone of this statement and respond with either 'positive' or 'negative': -Attacked prompt: b"Analyse the undertone of this statement and respond with either 'positive' or 'negative': " -Original acc: 94.84%, attacked acc: 94.27%, dropped acc: 0.57% - -Original prompt: In the role of a sentiment analysis tool, respond with 'positive' or 'negative' to classify this statement: -Attacked prompt: b"In the roles of a se\xd5\xb8timent analysis tool, answering with 'positive' or 'negative' to classify this stateme\xd5\xb8t: " -Original acc: 95.41%, attacked acc: 94.15%, dropped acc: 1.26% - -Original prompt: Functioning as a sentiment identification tool, assess if the following expression is 'positive' or 'negative'. Classify: -Attacked prompt: b"Functioning as a emotion detecting to\xd0\xbel, a ssess if the followi\xd5\xb8g expression is 'positive' or 'negative'. Classify: " -Original acc: 95.30%, attacked acc: 89.91%, dropped acc: 5.39% - -Original prompt: Serving as a sentiment evaluation model, determine if the given statement is 'positive' or 'negative'. Classify: -Attacked prompt: b"Serving as a emotion evluation models, determining if the conferred state ment is 'positive' or 'negative'. Classify: " -Original acc: 95.30%, attacked acc: 88.53%, dropped acc: 6.77% - -## textfooler - -Original prompt: Evaluate the sentiment of the given text and classify it as 'positive' or 'negative': -Attacked prompt: b"Appraised the ambience of the provided version and sorting it as 'positive' or 'negative': " -Original acc: 95.07%, attacked acc: 91.28%, dropped acc: 3.78% - -Original prompt: Given the context of this text, indicate if the emotion conveyed is 'positive' or 'negative': -Attacked prompt: b"Made the background of this version, depicts if the passion recharged is 'positive' or 'negative': " -Original acc: 94.95%, attacked acc: 84.29%, dropped acc: 10.67% - -Original prompt: Analyze the tone of this statement and respond with either 'positive' or 'negative': -Attacked prompt: b"Analyse the undertone of this proclamation and cater with either 'positive' or 'negative': " -Original acc: 94.84%, attacked acc: 92.09%, dropped acc: 2.75% - -Original prompt: In the role of a sentiment analysis tool, respond with 'positive' or 'negative' to classify this statement: -Attacked prompt: b"In the role of a ambiance discusses mechanism, cope with 'positive' or 'negative' to hierarchy this languages: " -Original acc: 95.41%, attacked acc: 38.07%, dropped acc: 57.34% - -Original prompt: Functioning as a sentiment identification tool, assess if the following expression is 'positive' or 'negative'. Classify: -Attacked prompt: b"Functions as a emotion defining instrumentation, valuing if the next voicing is 'positive' or 'negative'. Classification: " -Original acc: 95.30%, attacked acc: 25.46%, dropped acc: 69.84% - -Original prompt: Serving as a sentiment evaluation model, determine if the given statement is 'positive' or 'negative'. Classify: -Attacked prompt: b"Serving as a mood analyses idealized, establishing if the gave proclamations is 'positive' or 'negative'. Classification: " -Original acc: 95.30%, attacked acc: 79.24%, dropped acc: 16.06% - -## translation - -Language: chinese, acc: 95.07%, prompt: Answer whether the statement is 'positive' or 'negative' based on sentiment analysis. -Language: chinese, acc: 93.69%, prompt: As an emotion analysis tool, determine whether the emotion in the text is 'positive' or 'negative'. -Language: chinese, acc: 94.15%, prompt: Categorize the statement as 'positive' or 'negative', based on its emotional bent. -Language: chinese, acc: 94.50%, prompt: Please use sentiment analysis to classify the text as 'positive' or 'negative'. -Language: chinese, acc: 93.92%, prompt: Please determine whether the emotion of the sentence is 'positive' or 'negative' and categorize it. -Language: chinese, acc: 94.61%, prompt: Using sentiment analysis, classify the text as 'positive' or 'negative'. -Language: chinese, acc: 93.69%, prompt: Please answer whether the emotion of the sentence is 'positive' or 'negative' and categorize it. -Language: chinese, acc: 94.84%, prompt: Categorize the statement as 'positive' or 'negative' based on sentiment analysis. -Language: chinese, acc: 94.95%, prompt: Please judge this text as 'positive' or 'negative' according to the criteria of sentiment analysis. -Language: chinese, acc: 94.95%, prompt: Please classify this sentence as 'positive' or 'negative' according to the principles of emotion analysis. -Language: french, acc: 95.41%, prompt: As a sentiment analysis tool, please answer with 'positive' or 'negative' to classify this statement. -Language: french, acc: 94.72%, prompt: Determine whether this phrase is 'positive' or 'negative' as a sentiment classification tool. -Language: french, acc: 94.38%, prompt: Identify the tone of this statement by choosing between 'positive' and 'negative' as a sentiment analysis tool. -Language: french, acc: 94.72%, prompt: Use sentiment analysis to classify this statement as 'positive' or 'negative'. -Language: french, acc: 94.72%, prompt: As a sentiment classification tool, please determine whether this statement is 'positive' or 'negative'. -Language: french, acc: 95.07%, prompt: Classify this sentence as 'positive' or 'negative' using sentiment analysis. -Language: french, acc: 95.30%, prompt: Choose between 'positive' or 'negative' to classify this statement as a sentiment analysis tool. -Language: french, acc: 94.84%, prompt: Identify the sentiment expressed in this statement by selecting 'positive' or 'negative' as a sentiment classification tool. -Language: french, acc: 95.30%, prompt: Determine whether this phrase is 'positive' or 'negative' using sentiment analysis as a classification tool. -Language: french, acc: 94.72%, prompt: Use sentiment analysis to classify this statement as 'positive' or 'negative'. -Language: arabic, acc: 94.95%, prompt: Under emotional analysis, answer 'positive' or 'negative' to classify this statement. -Language: arabic, acc: 94.72%, prompt: Does this statement express a 'positive' or 'negative' reaction? -Language: arabic, acc: 93.58%, prompt: Is that a 'positive' or a 'negative' phrase? -Language: arabic, acc: 94.38%, prompt: What is the classification between 'positive' and 'negative'? -Language: arabic, acc: 94.72%, prompt: Does this sentence express 'positive' or 'negative' feelings? -Language: arabic, acc: 93.92%, prompt: In the context of textual analysis, what classification is this phrase between 'positive' and 'negative'? -Language: arabic, acc: 94.61%, prompt: Could this be classified as 'positive' or 'negative'? -Language: arabic, acc: 94.04%, prompt: In the context of emotional analysis, what classification is this statement between 'positive' and 'negative'? -Language: arabic, acc: 94.50%, prompt: Can this be classified as 'positive' or 'negative'? -Language: arabic, acc: 94.15%, prompt: Under the classification of emotions, is this sentence 'positive' or 'negative'? -Language: spanish, acc: 94.04%, prompt: As a feeling analysis tool, classify this statement as 'positive' or 'negative'. -Language: spanish, acc: 94.50%, prompt: Determine whether this statement has a 'positive' or 'negative' connotation. -Language: spanish, acc: 94.84%, prompt: Indicate whether the following statement is 'positive' or 'negative'. -Language: spanish, acc: 94.27%, prompt: Evaluate whether this text has a 'positive' or 'negative' emotional charge. -Language: spanish, acc: 94.27%, prompt: According to your sentiment analysis, would you say this comment is 'positive' or 'negative'? -Language: spanish, acc: 95.07%, prompt: In the context of sentiment analysis, label this sentence as 'positive' or 'negative'. -Language: spanish, acc: 94.84%, prompt: Rate the following statement as 'positive' or 'negative', according to your sentiment analysis. -Language: spanish, acc: 93.81%, prompt: How would you classify this text in terms of its emotional tone? 'positive' or 'negative'? -Language: spanish, acc: 94.84%, prompt: As a tool for sentiment analysis, would you say this statement is 'positive' or 'negative'? -Language: spanish, acc: 94.84%, prompt: Classify this statement as 'positive' or 'negative', please. -Language: japanese, acc: 93.81%, prompt: Treat this sentence as an emotion analysis tool and categorize it as 'positive' and 'negative'. -Language: japanese, acc: 94.72%, prompt: Use this article as a sentiment analysis tool to classify 'positive' and 'negative'. -Language: japanese, acc: 94.15%, prompt: Use this sentence as an emotion analysis tool to determine whether it is 'positive' or 'negative'. -Language: japanese, acc: 94.27%, prompt: Use this sentence as an emotion analysis tool to classify 'positive' and 'negative'. -Language: japanese, acc: 94.95%, prompt: Use this sentence as a sentiment analysis tool and classify it as 'positive' or 'negative'. -Language: japanese, acc: 95.76%, prompt: To classify this sentence as 'positive' or 'negative', evaluate it as a sentiment analysis tool. -Language: japanese, acc: 93.81%, prompt: Treat this sentence as an emotion analysis tool to determine whether it is 'positive' or 'negative'. -Language: japanese, acc: 94.84%, prompt: Use this sentence as a sentiment analysis tool to classify 'positive' and 'negative'. -Language: japanese, acc: 93.92%, prompt: Analyze this sentence as an emotion analysis tool to classify whether it is 'positive' or 'negative'. -Language: japanese, acc: 93.92%, prompt: Use this sentence as an emotional analysis tool to determine whether it is 'positive' or 'negative'. -Language: korean, acc: 94.04%, prompt: As an emotional analysis tool, respond with 'positive' or 'negative' to classify these sentences. -Language: korean, acc: 94.04%, prompt: Classify this sentence as 'positive' if you regard it as positive, 'negative' if you regard it as negative. -Language: korean, acc: 94.15%, prompt: Please rate the emotion of this sentence and classify it as 'positive' or 'negative'. -Language: korean, acc: 94.27%, prompt: Classify this sentence as 'positive' if you perceive it positively and 'negative' if you perceive it negatively. -Language: korean, acc: 94.50%, prompt: If this is a sentence delivered using a positive expression, classify it as 'positive' and if this is a sentence delivered using a negative expression, classify it as 'negative'. -Language: korean, acc: 94.50%, prompt: Respond with 'positive' or 'negative' by categorizing whether the sentence is positive or negative. -Language: korean, acc: 94.04%, prompt: Please analyze the emotion in this sentence and classify it as 'positive' or 'negative'. -Language: korean, acc: 94.15%, prompt: Classify this sentence as 'positive' if it contains a positive meaning, 'negative' if it contains a negative meaning. -Language: korean, acc: 94.61%, prompt: Classify this sentence as 'positive' if it contains positive content, 'negative' if it contains negative content. -Language: korean, acc: 92.78%, prompt: Classify what you're trying to convey in this sentence as 'positive' if it's positive, and 'negative' if it's negative. - -# wnli - -## 10 prompts - -Acc: 66.20%, prompt: Are the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one word. -Acc: 66.20%, prompt: Analyze the two provided sentences and decide if their relationship is 'entailment' or 'not_entailment'. -Acc: 64.79%, prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'. -Acc: 63.38%, prompt: Review the two statements and categorize their relationship as either 'entailment' or 'not_entailment'. -Acc: 61.97%, prompt: Does the relationship between the given sentences represent entailment or not_entailment? Respond with 'entailment' or 'not_entailment'. -Acc: 61.97%, prompt: Examine the pair of sentences and determine if they exhibit entailment or not_entailment. Answer with either 'entailment' or 'not_entailment'. -Acc: 61.97%, prompt: Assess the connection between the following sentences and classify it as 'entailment' or 'not_entailment'. -Acc: 61.97%, prompt: Determine if the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'. -Acc: 61.97%, prompt: Considering the two sentences, identify if their relationship is 'entailment' or 'not_entailment'. -Acc: 59.15%, prompt: Identify whether the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'. -Acc: 59.15%, prompt: Please classify the relationship between the provided sentences as 'entailment' or 'not_entailment'. - -Acc: 66.20%, prompt: As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment': -Acc: 64.79%, prompt: In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment': -Acc: 64.79%, prompt: As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment': -Acc: 61.97%, prompt: Functioning as an entailment evaluation tool, analyze the provided sentences and decide if their relationship is 'entailment' or 'not_entailment': -Acc: 59.15%, prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment': -Acc: 59.15%, prompt: As a tool for determining entailment relationships, review the two statements and categorize their connection as either 'entailment' or 'not_entailment': -Acc: 57.75%, prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment': -Acc: 56.34%, prompt: In your role as an entailment analysis tool, assess the relationship between the given sentences and classify it as 'entailment' or 'not_entailment': -Acc: 56.34%, prompt: While performing entailment analysis, classify the relationship between the provided sentences as 'entailment' or 'not_entailment': -Acc: 54.93%, prompt: In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment or not_entailment. Answer with 'entailment' or 'not_entailment': - -## bertattack - -Original prompt: Are the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one word. -Attacked prompt: b'are the following analogous sentence entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one verb. ' -Original acc: 66.20%, attacked acc: 54.93%, dropped acc: 11.27% - -Original prompt: Analyze the two provided sentences and decide if their relationship is 'entailment' or 'not_entailment'. -Attacked prompt: b"Analyze the comparing for sentences and calculate if their relationship is 'entailment' or 'not_entailment'." -Original acc: 66.20%, attacked acc: 56.34%, dropped acc: 9.86% - -Original prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'. -Attacked prompt: b"Indicate if the connect between the verb sentences is 'entailment' or 'not_entailment'." -Original acc: 64.79%, attacked acc: 56.34%, dropped acc: 8.45% - -Original prompt: As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment': -Attacked prompt: b"As an entailment identification language, examine the connection between the corresponding sentences and respond with 'entailment' or 'not_entailment':" -Original acc: 66.20%, attacked acc: 57.75%, dropped acc: 8.45% - -Original prompt: In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment': -Attacked prompt: b"h the capacity of an entailment assessment system, indicate if the link between the following verb is 'entailment' or 'not_entailment':" -Original acc: 64.79%, attacked acc: 56.34%, dropped acc: 8.45% - -Original prompt: As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment': -Attacked prompt: b"As an instrument for entailment evaluation, consider the crossed sentences and determine if their agreement is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment':" -Original acc: 64.79%, attacked acc: 59.15%, dropped acc: 5.63% - -## checklist - -Original prompt: Are the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one word. -Attacked prompt: b'Are the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one word Cev0s49fbe. ' -Original acc: 66.20%, attacked acc: 63.38%, dropped acc: 2.82% - -Original prompt: Analyze the two provided sentences and decide if their relationship is 'entailment' or 'not_entailment'. -Attacked prompt: b"Analyze the two provided sentences and decide if their relationship is 'entailment' or 'not_entailment' 9vGXgnbWB8." -Original acc: 66.20%, attacked acc: 63.38%, dropped acc: 2.82% - -Original prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'. -Attacked prompt: b"Indicate if the connection between the following sentences is 'entailment' or 'not_entailment' Kw6nrs57gH." -Original acc: 64.79%, attacked acc: 61.97%, dropped acc: 2.82% - -Original prompt: As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment': -Attacked prompt: b"As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment' SFPCqpiClT:" -Original acc: 66.20%, attacked acc: 61.97%, dropped acc: 4.23% - -Original prompt: In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment': -Attacked prompt: b"In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment' 5XeflW1ZJc:" -Original acc: 64.79%, attacked acc: 61.97%, dropped acc: 2.82% - -Original prompt: As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment': -Attacked prompt: b"As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment' 5XeflW1ZJc:" -Original acc: 64.79%, attacked acc: 64.79%, dropped acc: 0.00% - -## deepwordbug - -Original prompt: Are the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one word. -Attacked prompt: b'Are the follHwing Ntwo sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one wor. ' -Original acc: 66.20%, attacked acc: 60.56%, dropped acc: 5.63% - -Original prompt: Analyze the two provided sentences and decide if their relationship is 'entailment' or 'not_entailment'. -Attacked prompt: b"Analyze the tow provided sentences and decdie if their revationship is 'entailment' or 'not_entailment'." -Original acc: 66.20%, attacked acc: 60.56%, dropped acc: 5.63% - -Original prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'. -Attacked prompt: b"Indicate if the connection between the following snetences is 'entailment' or 'not_entailment'." -Original acc: 64.79%, attacked acc: 61.97%, dropped acc: 2.82% - -Original prompt: As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment': -Attacked prompt: b"As an entailment identfiication zystem, examine the connection between the flolowing sentences and respond with 'entailment' or 'not_entailment':" -Original acc: 66.20%, attacked acc: 57.75%, dropped acc: 8.45% - -Original prompt: In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment': -Attacked prompt: b"In the capacity of an entailment assesment ssytem, indicate if the ink between the flolowing sentences is 'entailment' or 'not_entailment':" -Original acc: 64.79%, attacked acc: 57.75%, dropped acc: 7.04% - -Original prompt: As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment': -Attacked prompt: b"As an instrument for entailment evaluation, consider the tow sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment':" -Original acc: 64.79%, attacked acc: 63.38%, dropped acc: 1.41% - -## stresstest - -Original prompt: Are the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one word. -Attacked prompt: b'Are the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one word and false is not true . ' -Original acc: 66.20%, attacked acc: 64.79%, dropped acc: 1.41% - -Original prompt: Analyze the two provided sentences and decide if their relationship is 'entailment' or 'not_entailment'. -Attacked prompt: b"Analyze the two provided sentences and decide if their relationship is 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true ." -Original acc: 66.20%, attacked acc: 26.76%, dropped acc: 39.44% - -Original prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'. -Attacked prompt: b"Indicate if the connection between the following sentences is 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true ." -Original acc: 64.79%, attacked acc: 28.17%, dropped acc: 36.62% - -Original prompt: As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment': -Attacked prompt: b"As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment' and false is not true :" -Original acc: 66.20%, attacked acc: 66.20%, dropped acc: 0.00% - -Original prompt: In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment': -Attacked prompt: b"In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true :" -Original acc: 64.79%, attacked acc: 25.35%, dropped acc: 39.44% - -Original prompt: As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment': -Attacked prompt: b"As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment' and true is true :" -Original acc: 64.79%, attacked acc: 67.61%, dropped acc: -2.82% - -## textbugger - -Original prompt: Are the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one word. -Attacked prompt: b'A re the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just \xd0\xbene word. ' -Original acc: 66.20%, attacked acc: 61.97%, dropped acc: 4.23% - -Original prompt: Analyze the two provided sentences and decide if their relationship is 'entailment' or 'not_entailment'. -Attacked prompt: b"Analyse the two provided sentences and d\xd0\xb5cide if their relations is 'entailment' or 'not_entailment'." -Original acc: 66.20%, attacked acc: 57.75%, dropped acc: 8.45% - -Original prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'. -Attacked prompt: b"Denotes if the connection between the follow ing sentences is 'entailment' or 'not_entailment'." -Original acc: 64.79%, attacked acc: 59.15%, dropped acc: 5.63% - -Original prompt: As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment': -Attacked prompt: b"As an entailment identification system, investigated the connection between the subsequently sentences and replying with 'entailment' or 'not_entailment':" -Original acc: 66.20%, attacked acc: 57.75%, dropped acc: 8.45% - -Original prompt: In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment': -Attacked prompt: b"In the \xcf\xb2apacity of an entailment assessment system, indicate if the link between the followin\xc9\xa1 sentences is 'entailment' or 'not_entailment':" -Original acc: 64.79%, attacked acc: 59.15%, dropped acc: 5.63% - -Original prompt: As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment': -Attacked prompt: b"As an instrumentation for entailment evlauation, consider the t wo sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment':" -Original acc: 64.79%, attacked acc: 59.15%, dropped acc: 5.63% - -## textfooler - -Original prompt: Are the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one word. -Attacked prompt: b'Be the following two phrases entailment or not_entailment? Responded me with "entailment" or "not_entailment", just one verb. ' -Original acc: 66.20%, attacked acc: 56.34%, dropped acc: 9.86% - -Original prompt: Analyze the two provided sentences and decide if their relationship is 'entailment' or 'not_entailment'. -Attacked prompt: b"Analyze the two provided sentences and identify if their relations is 'entailment' or 'not_entailment'." -Original acc: 66.20%, attacked acc: 57.75%, dropped acc: 8.45% - -Original prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'. -Attacked prompt: b"Clarified if the connection between the following conviction is 'entailment' or 'not_entailment'." -Original acc: 64.79%, attacked acc: 57.75%, dropped acc: 7.04% - -Original prompt: As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment': -Attacked prompt: b"As an entailment identification system, examine the connection between the following sentences and cope with 'entailment' or 'not_entailment':" -Original acc: 66.20%, attacked acc: 61.97%, dropped acc: 4.23% - -Original prompt: In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment': -Attacked prompt: b"Among the capacity of an entailment analysis system, indicate if the attach between the subsequently sentences is 'entailment' or 'not_entailment':" -Original acc: 64.79%, attacked acc: 56.34%, dropped acc: 8.45% - -Original prompt: As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment': -Attacked prompt: b"As an instrument for entailment analysis, investigating the two sentences and detects if their liaise is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment':" -Original acc: 64.79%, attacked acc: 57.75%, dropped acc: 7.04% - -## translation - -Language: chinese, acc: 61.97%, prompt: In the light of an implication analysis tool, evaluate the relationship between the following sentences and classify them as 'entailment' or 'not_entailment'. -Language: chinese, acc: 66.20%, prompt: From the perspective of an implication analysis tool, determine whether there is an implication relationship in the following sentences by classifying them as 'entailment' or 'not_entailment'. -Language: chinese, acc: 67.61%, prompt: Please use an implication analysis tool to determine whether an implication relationship exists in the following sentences by classifying them as 'entailment' or 'not_entailment'. -Language: chinese, acc: 67.61%, prompt: Please evaluate the relation of the following sentences as 'entailment' or 'not_entailment' for the purpose of determining implication relation. -Language: chinese, acc: 61.97%, prompt: Please use the implication analysis tool to evaluate the relationships between the following sentences and classify them as 'entailment' or 'not_entailment'. -Language: chinese, acc: 57.75%, prompt: For the purpose of determining implicative relations, analyze the relations of the following sentences and classify them as 'entailment' or 'not_entailment'. -Language: chinese, acc: 64.79%, prompt: Please use the implication analysis tool to determine the relationship of the following sentences and classify them as 'entailment' or 'not_entailment'. -Language: chinese, acc: 63.38%, prompt: Please use the implication judgment tool to assess the relevance of the following sentences and classify them as 'entailment' or 'not_entailment'. -Language: chinese, acc: 59.15%, prompt: Please, with implication analysis as the main task, determine the relationships between the following sentences and classify them as 'entailment' or 'not_entailment'. -Language: chinese, acc: 59.15%, prompt: Using the implication judgment as a criterion, analyze the relation of the following sentences and classify them as 'entailment' or 'not_entailment'. -Language: french, acc: 59.15%, prompt: As an engagement analysis tool, evaluate the relationship between the given sentences and classify it as 'entailment' or 'not_entailment'. -Language: french, acc: 61.97%, prompt: Determine whether the given sentences involve one another or not as an implication analysis tool. Classify them accordingly as 'entailment' or 'not_entailment'. -Language: french, acc: 63.38%, prompt: Using implication analysis, evaluate whether the sentences provided have a logical relationship and categorize them as 'entailment' or 'not_entailment'. -Language: french, acc: 60.56%, prompt: As an engagement assessment tool, determine whether the sentences provided have a logical relationship and classify them as 'entailment' or 'not_entailment'. -Language: french, acc: 61.97%, prompt: As an implication classification tool, analyze the sentences provided to determine if there is a logical relationship and categorize them as 'entailment' or 'not_entailment'. -Language: french, acc: 59.15%, prompt: Using implication analysis, determine whether the given sentences have a cause-effect relationship and categorize them as 'entailment' or 'not_entailment'. -Language: french, acc: 61.97%, prompt: Evaluate the relationship between the given sentences using implication analysis and rank them accordingly as 'entailment' or 'not_entailment'. -Language: french, acc: 59.15%, prompt: As an engagement detection tool, determine whether the given sentences have a logical relationship and categorize them as 'entailment' or 'not_entailment'. -Language: french, acc: 63.38%, prompt: Using implication analysis, evaluate whether the sentences provided have a cause-effect relationship and rank them accordingly as 'entailment' or 'not_entailment'. -Language: french, acc: 59.15%, prompt: Determine whether the given sentences have a cause-effect relationship as an engagement analysis tool and categorize them as 'entailment' or 'not_entailment'. -Language: arabic, acc: 53.52%, prompt: In your role as a tool for reasoning analysis, evaluate the relationship between given sentences and classify them as 'entailment' or 'not_entailment'. -Language: arabic, acc: 63.38%, prompt: Can you determine whether this sentence is inferred from the other sentence? Classify it as 'entailment' or 'not_entailment'. -Language: arabic, acc: 54.93%, prompt: Using the tool of reasoning analysis, analyze the relationship between given sentences and classify them as 'entailment' or 'not_entailment'. -Language: arabic, acc: 61.97%, prompt: Does this sentence represent a conclusion from the previous sentence? Classify it as 'entailment' or 'not_entailment'. -Language: arabic, acc: 53.52%, prompt: As a tool of reasoning analysis, evaluate the relationship of given sentences and classify them as 'entailment' or 'not_entailment'. -Language: arabic, acc: 64.79%, prompt: Can this sentence be inferred from the previous sentence? Classify it as 'entailment' or 'not_entailment'. -Language: arabic, acc: 57.75%, prompt: Using a tool to analyze a conclusion, analyze the relationship between the two sentences and classify them as 'entailment' or 'not_entailment'. -Language: arabic, acc: 61.97%, prompt: Is this a conclusion from the next sentence? Classify it as 'entailment' or 'not_entailment'. -Language: arabic, acc: 59.15%, prompt: As part of your task in analyzing a conclusion, evaluate the relationship between the two sentences and classify them as 'entailment' or 'not_entailment' based on their relationship. -Language: arabic, acc: 59.15%, prompt: Are you following this sentence directly from the previous one? Classify it as 'entailment' or 'not_entailment'. -Language: spanish, acc: 60.56%, prompt: In your role as an implication analysis tool, evaluate the relationship between the given phrases and classify them as 'entailment' or 'not_entailment'. -Language: spanish, acc: 64.79%, prompt: Determine whether the second sentence necessarily implies the first and label the relation as 'entailment', or as 'not_entailment' if not. -Language: spanish, acc: 67.61%, prompt: Classifies the relationship between these two sentences as 'entailment' if one necessarily implies the other, or as 'not_entailment' if not. -Language: spanish, acc: 69.01%, prompt: Evaluates whether the information in the second sentence is implied in the first and labels the relationship as 'entailment', or as 'not_entailment' if there is no such implication. -Language: spanish, acc: 63.38%, prompt: Given a couple of phrases, label their relationship as 'entailment' if one necessarily implies the other, or as 'not_entailment' if there is no such implication. -Language: spanish, acc: 67.61%, prompt: Analyzes the relationship between the phrases and classifies them as 'entailment' if one necessarily implies the other, or as 'not_entailment' if there is no such implication. -Language: spanish, acc: 64.79%, prompt: Given two sentences, determine whether the second sentence is a necessary consequence of the first and label the relation as 'entailment', or as 'not_entailment' if not. -Language: spanish, acc: 66.20%, prompt: Evaluates whether the information presented in the second sentence is implicit in the first and labels the relationship as 'entailment', or as 'not_entailment' if there is no such implication. -Language: spanish, acc: 67.61%, prompt: Classifies the relationship between the given phrases as 'entailment' if one necessarily implies the other, or as 'not_entailment' if there is no such implication. -Language: spanish, acc: 66.20%, prompt: Determines whether the information provided in the second sentence is necessarily inferable from the first and labels the relationship as 'entailment', or as 'not_entailment' if not. -Language: japanese, acc: 52.11%, prompt: Analyze the relationship of a given sentence and classify it as 'entailment' or 'not_entailment'. -Language: japanese, acc: 57.75%, prompt: Evaluate the semantic relationship of the sentence and classify it as 'entailment' or 'not_entailment'. -Language: japanese, acc: 57.75%, prompt: Please judge the relationship between the given sentences and classify them as 'entailment' or 'not_entailment'. -Language: japanese, acc: 49.30%, prompt: Examine the paraphrases of a given sentence and classify them 'entailment' or 'not_entailment'. -Language: japanese, acc: 59.15%, prompt: Rate the similarity of a given sentence and categorize it as 'entailment' or 'not_entailment'. -Language: japanese, acc: 52.11%, prompt: Determinate the semantic connections of a given sentence and classify it as 'entailment' or 'not_entailment'. -Language: japanese, acc: 52.11%, prompt: Examine the semantic match of a given sentence and categorize it as 'entailment' or 'not_entailment'. -Language: japanese, acc: 57.75%, prompt: Classify it as 'entailment' or 'not_entailment' based on the content of the sentence. -Language: japanese, acc: 52.11%, prompt: Analyze the relationship of a given sentence and classify it as 'entailment' or 'not_entailment'. -Language: japanese, acc: 53.52%, prompt: Judge the semantic connections of a given sentence and categorize it as 'entailment' or 'not_entailment'. -Language: korean, acc: 57.75%, prompt: Evaluate the relationship between any two sentences given to you and classify you as 'entailment' or 'not_entailment'. -Language: korean, acc: 49.30%, prompt: Analyze the semantic deductive relations between sentences and classify them as 'entailment' or 'not_entailment'. -Language: korean, acc: 54.93%, prompt: Evaluate the logical relevance between sentences and classify them as 'entailment' or 'not_entailment'. -Language: korean, acc: 53.52%, prompt: Evaluate the interaction of two given sentences and classify them as 'entailment' or 'not_entailment'. -Language: korean, acc: 59.15%, prompt: Please check whether there is a semantic match between those two sentences and classify them as 'entailment' or 'not_entailment'. -Language: korean, acc: 56.34%, prompt: Compare information between given sentences and classify them as 'entailment' or 'not_entailment'. -Language: korean, acc: 61.97%, prompt: Please analyse the correlation between those two sentences and classify them as 'entailment' or 'not_entailment'. -Language: korean, acc: 54.93%, prompt: Evaluate the different meanings between given sentences and classify them as 'entailment' or 'not_entailment'. -Language: korean, acc: 56.34%, prompt: Compare the semantic structure of the two sentences and classify them as 'entailment' or 'not_entailment'. -Language: korean, acc: 54.93%, prompt: Evaluate the interactions between sentences and classify them as 'entailment' or 'not_entailment'. - -# mmlu - -## 10 prompts - -Acc: 45.74%, prompt: For each multiple-choice question about {}, identify the correct answer by selecting 'A', 'B', 'C', or 'D'. -Acc: 45.57%, prompt: For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D'. -Acc: 45.04%, prompt: Please respond to the multiple-choice question about {} by selecting the appropriate answer: 'A', 'B', 'C', or 'D'. -Acc: 44.86%, prompt: In relation to the multiple-choice question on {}, please provide the accurate answer by choosing 'A', 'B', 'C', or 'D'. -Acc: 44.86%, prompt: Answer the subsequent multiple-choice question about {} by picking the right option among 'A', 'B', 'C', or 'D'. -Acc: 44.68%, prompt: Answer the following multiple-choice question about {} by selecting the correct option: 'A', 'B', 'C', or 'D'. -Acc: 44.50%, prompt: Evaluate the multiple-choice question about {} and select the most fitting response from 'A', 'B', 'C', or 'D'. -Acc: 44.33%, prompt: Regarding the following multiple-choice question on {}, pick the correct answer from the options 'A', 'B', 'C', or 'D'. -Acc: 44.33%, prompt: Examine the following question based on {} and choose the correct response from 'A', 'B', 'C', or 'D'. -Acc: 44.15%, prompt: Below are multiple-choice question concerning {}. Indicate your response with 'A', 'B', 'C', or 'D'. - -Acc: 44.86%, prompt: As someone well-versed in {}, please address the multiple-choice question below by selecting 'A', 'B', 'C', or 'D'. -Acc: 44.86%, prompt: Utilizing your expertise in {}, answer the following multiple-choice question by picking 'A', 'B', 'C', or 'D'. -Acc: 44.86%, prompt: With your understanding of {}, kindly answer the subsequent multiple-choice question by selecting 'A', 'B', 'C', or 'D'. -Acc: 44.68%, prompt: As an expert in {}, respond to the following multiple-choice question by selecting 'A', 'B', 'C', or 'D'. -Acc: 44.68%, prompt: As a skilled person in the field of {}, please respond to the multiple-choice question by choosing 'A', 'B', 'C', or 'D'. -Acc: 44.50%, prompt: As a knowledgeable individual in {}, provide your response to the multiple-choice question by choosing 'A', 'B', 'C', or 'D'. -Acc: 44.33%, prompt: Considering your familiarity with {}, attend to the following multiple-choice question by picking 'A', 'B', 'C', or 'D'. -Acc: 44.33%, prompt: Drawing upon your mastery of {}, please answer the multiple-choice question by selecting the correct option from 'A', 'B', 'C', or 'D'. -Acc: 43.97%, prompt: With your knowledge of {}, tackle the following multiple-choice question by choosing 'A', 'B', 'C', or 'D'. -Acc: 43.44%, prompt: Given your proficiency in {}, please answer the subsequent multiple-choice question with 'A', 'B', 'C', or 'D'. - -## bertattack - -Original prompt: For each multiple-choice question about {}, identify the correct answer by selecting 'A', 'B', 'C', or 'D'. -Attacked prompt: b"For each multiple-question question about {}, guess the wrong answer by selecting 'A', 'B', 'C', or 'D'. " -Original acc: 45.74%, attacked acc: 17.38%, dropped acc: 28.37% - -Original prompt: For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D'. -Attacked prompt: b"For the multiple-choice choice similar to {}, please find the most inaccurate questions from 'A', 'B', 'C', or 'D'. " -Original acc: 45.57%, attacked acc: 16.84%, dropped acc: 28.72% - -Original prompt: Please respond to the multiple-choice question about {} by selecting the appropriate answer: 'A', 'B', 'C', or 'D'. -Attacked prompt: b"Please respond to the multiple-choice puzzle about {} by selecting the incorrect answer: 'A', 'B', 'C', or 'D'. " -Original acc: 45.04%, attacked acc: 18.26%, dropped acc: 26.77% - -Original prompt: As someone well-versed in {}, please address the multiple-choice question below by selecting 'A', 'B', 'C', or 'D'. -Attacked prompt: b"As someone well-versed in {}, need continue the multiple-choice question below by access 'A', 'B', 'C', or 'D'." -Original acc: 44.86%, attacked acc: 41.49%, dropped acc: 3.37% - -Original prompt: Utilizing your expertise in {}, answer the following multiple-choice question by picking 'A', 'B', 'C', or 'D'. -Attacked prompt: b"Utilizing your expertise in {}, write the following multiple-choices question by each 'A', 'B', 'C', or 'D'." -Original acc: 44.86%, attacked acc: 3.19%, dropped acc: 41.67% - -Original prompt: With your understanding of {}, kindly answer the subsequent multiple-choice question by selecting 'A', 'B', 'C', or 'D'. -Attacked prompt: b"With your understanding of {}, have determine the further multiple-choice question by commencing 'A', 'B', 'C', or 'D'." -Original acc: 44.86%, attacked acc: 39.36%, dropped acc: 5.50% - -## checklist - -Original prompt: For each multiple-choice question about {}, identify the correct answer by selecting 'A', 'B', 'C', or 'D'. -Attacked prompt: b"For each multiple-choice question about {}, identify the correct answer by selecting 'A', 'B', 'C', or 'D' yh912BU8T0. " -Original acc: 45.74%, attacked acc: 44.15%, dropped acc: 1.60% - -Original prompt: For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D'. -Attacked prompt: b"For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D' 5XeflW1ZJc. " -Original acc: 45.57%, attacked acc: 44.15%, dropped acc: 1.42% - -Original prompt: Please respond to the multiple-choice question about {} by selecting the appropriate answer: 'A', 'B', 'C', or 'D'. -Attacked prompt: b"Please respond to the multiple-choice question about {} by selecting the appropriate answer: 'A', 'B', 'C', or 'D' ItGDrrA1Sc. " -Original acc: 45.04%, attacked acc: 43.97%, dropped acc: 1.06% - -Original prompt: As someone well-versed in {}, please address the multiple-choice question below by selecting 'A', 'B', 'C', or 'D'. -Attacked prompt: b"As someone well-versed in {}, please address the multiple-choice question below by selecting 'A', 'B', 'C', or 'D' CuVpbbkC4c." -Original acc: 44.86%, attacked acc: 43.44%, dropped acc: 1.42% - -Original prompt: Utilizing your expertise in {}, answer the following multiple-choice question by picking 'A', 'B', 'C', or 'D'. -Attacked prompt: b"Utilizing your expertise in {}, answer the following multiple-choice question by picking 'A', 'B', 'C', or 'D' CuVpbbkC4c." -Original acc: 44.86%, attacked acc: 43.62%, dropped acc: 1.24% - -Original prompt: With your understanding of {}, kindly answer the subsequent multiple-choice question by selecting 'A', 'B', 'C', or 'D'. -Attacked prompt: b"With your understanding of {}, kindly answer the subsequent multiple-choice question by selecting 'A', 'B', 'C', or 'D' Q47Jmd4lMV." -Original acc: 44.86%, attacked acc: 43.79%, dropped acc: 1.06% - -## deepwordbug - -Original prompt: For each multiple-choice question about {}, identify the correct answer by selecting 'A', 'B', 'C', or 'D'. -Attacked prompt: b"For each multiple-chotce question about {}, dentify the caorrect answedr by selccting 'A', 'B', 'C', or 'D'. " -Original acc: 45.74%, attacked acc: 42.20%, dropped acc: 3.55% - -Original prompt: For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D'. -Attacked prompt: b"Fvr the multiple-choice question related to {}, lease choose the most acKurate ansEer from 'A', 'B', 'C', or 'D'. " -Original acc: 45.57%, attacked acc: 43.62%, dropped acc: 1.95% - -Original prompt: Please respond to the multiple-choice question about {} by selecting the appropriate answer: 'A', 'B', 'C', or 'D'. -Attacked prompt: b"Please responCd to the multiple-choice questioP about {} by selecting the appropriate answer: 'A', 'B', 'C', or 'D'. " -Original acc: 45.04%, attacked acc: 43.44%, dropped acc: 1.60% - -Original prompt: As someone well-versed in {}, please address the multiple-choice question below by selecting 'A', 'B', 'C', or 'D'. -Attacked prompt: b"s sfmeone well-versed in {}, plase addres the multiple-choice question below by seleting 'A', 'B', 'C', or 'D'." -Original acc: 44.86%, attacked acc: 42.55%, dropped acc: 2.30% - -Original prompt: Utilizing your expertise in {}, answer the following multiple-choice question by picking 'A', 'B', 'C', or 'D'. -Attacked prompt: b"Utilizing your expertise in {}, ansKer the following multiprle-choice question by ipcking 'A', 'B', 'C', or 'D'." -Original acc: 44.86%, attacked acc: 41.13%, dropped acc: 3.72% - -Original prompt: With your understanding of {}, kindly answer the subsequent multiple-choice question by selecting 'A', 'B', 'C', or 'D'. -Attacked prompt: b"With your understanding of {}, ikndly anwer the subsequent multiple-choice question by seleFting 'A', 'B', 'C', or 'D'." -Original acc: 44.86%, attacked acc: 42.02%, dropped acc: 2.84% - -## stresstest - -Original prompt: For each multiple-choice question about {}, identify the correct answer by selecting 'A', 'B', 'C', or 'D'. -Attacked prompt: b"For each multiple-choice question about {}, identify the correct answer by selecting 'A', 'B', 'C', or 'D' and false is not true . " -Original acc: 45.74%, attacked acc: 44.33%, dropped acc: 1.42% - -Original prompt: For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D'. -Attacked prompt: b"For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D' and true is true . " -Original acc: 45.57%, attacked acc: 45.21%, dropped acc: 0.35% - -Original prompt: Please respond to the multiple-choice question about {} by selecting the appropriate answer: 'A', 'B', 'C', or 'D'. -Attacked prompt: b"Please respond to the multiple-choice question about {} by selecting the appropriate answer: 'A', 'B', 'C', or 'D' and true is true . " -Original acc: 45.04%, attacked acc: 44.68%, dropped acc: 0.35% - -Original prompt: As someone well-versed in {}, please address the multiple-choice question below by selecting 'A', 'B', 'C', or 'D'. -Attacked prompt: b"As someone well-versed in {}, please address the multiple-choice question below by selecting 'A', 'B', 'C', or 'D' and true is true and true is true and true is true and true is true and true is true ." -Original acc: 44.86%, attacked acc: 45.21%, dropped acc: -0.35% - -Original prompt: Utilizing your expertise in {}, answer the following multiple-choice question by picking 'A', 'B', 'C', or 'D'. -Attacked prompt: b"Utilizing your expertise in {}, answer the following multiple-choice question by picking 'A', 'B', 'C', or 'D' and false is not true ." -Original acc: 44.86%, attacked acc: 43.97%, dropped acc: 0.89% - -Original prompt: With your understanding of {}, kindly answer the subsequent multiple-choice question by selecting 'A', 'B', 'C', or 'D'. -Attacked prompt: b"With your understanding of {}, kindly answer the subsequent multiple-choice question by selecting 'A', 'B', 'C', or 'D' and false is not true ." -Original acc: 44.86%, attacked acc: 44.68%, dropped acc: 0.18% - -## textbugger - -Original prompt: For each multiple-choice question about {}, identify the correct answer by selecting 'A', 'B', 'C', or 'D'. -Attacked prompt: b"For each multip le-choice question about {}, identify the rectify answer by sel\xd0\xb5cting 'A', 'B', 'C', or 'D'. " -Original acc: 45.74%, attacked acc: 43.26%, dropped acc: 2.48% - -Original prompt: For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D'. -Attacked prompt: b"For the multiple\xcb\x97choice question connected to {}, please choose the most meticulous answering from 'A', 'B', 'C', or 'D'. " -Original acc: 45.57%, attacked acc: 43.44%, dropped acc: 2.13% - -Original prompt: Please respond to the multiple-choice question about {} by selecting the appropriate answer: 'A', 'B', 'C', or 'D'. -Attacked prompt: b"Please respond to the multiple-ch\xd0\xbeice question about {} by selects the appropriate ans\xd4\x9der: 'A', 'B', 'C', or 'D'. " -Original acc: 45.04%, attacked acc: 43.26%, dropped acc: 1.77% - -Original prompt: As someone well-versed in {}, please address the multiple-choice question below by selecting 'A', 'B', 'C', or 'D'. -Attacked prompt: b"As someone well-versed in {}, pl\xd0\xb5ase addressing the multiple-choice question below by sele cting 'A', 'B', 'C', or 'D'." -Original acc: 44.86%, attacked acc: 42.38%, dropped acc: 2.48% - -Original prompt: Utilizing your expertise in {}, answer the following multiple-choice question by picking 'A', 'B', 'C', or 'D'. -Attacked prompt: b"Utiliz\xd1\x96ng your expertise in {}, answe\xe2\xb2\x85 the following multiple-choice question by collection 'A', 'B', 'C', or 'D'." -Original acc: 44.86%, attacked acc: 40.96%, dropped acc: 3.90% - -Original prompt: With your understanding of {}, kindly answer the subsequent multiple-choice question by selecting 'A', 'B', 'C', or 'D'. -Attacked prompt: b"W\xd1\x96th your understand of {}, kindly an swer the subsequent multiple-choice question by sel\xd0\xb5cting 'A', 'B', 'C', or 'D'." -Original acc: 44.86%, attacked acc: 41.13%, dropped acc: 3.72% - -## textfooler - -Original prompt: For each multiple-choice question about {}, identify the correct answer by selecting 'A', 'B', 'C', or 'D'. -Attacked prompt: b"By any multiple-choice question about {}, locate the corrected riposte by akhtar 'A', 'B', 'C', or 'D'. " -Original acc: 45.74%, attacked acc: 37.06%, dropped acc: 8.69% - -Original prompt: For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D'. -Attacked prompt: b"About the multiple-choice question link to {}, please decide the most scrupulous rebuttal from 'A', 'B', 'C', or 'D'. " -Original acc: 45.57%, attacked acc: 38.65%, dropped acc: 6.91% - -Original prompt: Please respond to the multiple-choice question about {} by selecting the appropriate answer: 'A', 'B', 'C', or 'D'. -Attacked prompt: b"Please replying to the multiple-choice question about {} by choices the satisfactory answer: 'A', 'B', 'C', or 'D'. " -Original acc: 45.04%, attacked acc: 42.91%, dropped acc: 2.13% - -Original prompt: As someone well-versed in {}, please address the multiple-choice question below by selecting 'A', 'B', 'C', or 'D'. -Attacked prompt: b"As persons well-versed in {}, please address the multiple-choice question below by akhtar 'A', 'B', 'C', or 'D'." -Original acc: 44.86%, attacked acc: 42.20%, dropped acc: 2.66% - -Original prompt: Utilizing your expertise in {}, answer the following multiple-choice question by picking 'A', 'B', 'C', or 'D'. -Attacked prompt: b"Utilise your expertise in {}, meet the next multiple-choice question by collection 'A', 'B', 'C', or 'D'." -Original acc: 44.86%, attacked acc: 40.96%, dropped acc: 3.90% - -Original prompt: With your understanding of {}, kindly answer the subsequent multiple-choice question by selecting 'A', 'B', 'C', or 'D'. -Attacked prompt: b"With your understanding of {}, kindly address the resultant multiple-choice question by takes 'A', 'B', 'C', or 'D'." -Original acc: 44.86%, attacked acc: 42.02%, dropped acc: 2.84% - -## translation - -Language: chinese, acc: 44.68%, prompt: For the following multiple choice question about {}, please select the correct answer from 'A', 'B', 'C' or 'D'. -Language: chinese, acc: 44.15%, prompt: Please select the correct answer from 'A', 'B', 'C' or 'D' for the following multiple choice question for {}. -Language: chinese, acc: 44.15%, prompt: Select the correct answer from 'A', 'B', 'C' or 'D' for the following multiple choice question {}. -Language: chinese, acc: 44.86%, prompt: Please choose the correct answer from 'A', 'B', 'C' or 'D' according to the following multiple-choice questions about {}. -Language: chinese, acc: 44.33%, prompt: Please select the correct answer from 'A', 'B', 'C' or 'D' for the {} multiple choice questions below. -Language: chinese, acc: 44.33%, prompt: The following is A multiple choice question about {}. Please select the correct answer from 'A', 'B', 'C' or 'D'. -Language: chinese, acc: 44.33%, prompt: Please select the correct answer from 'A', 'B', 'C' or 'D' for the following multiple choice question {}. -Language: chinese, acc: 44.86%, prompt: Please choose the correct answer from 'A', 'B', 'C' or 'D' according to the following multiple-choice questions about {}. -Language: chinese, acc: 43.97%, prompt: Please select the correct answer from 'A', 'B', 'C' or 'D' for the following multiple choice questions about {}. -Language: chinese, acc: 43.97%, prompt: Please select the correct answer from 'A', 'B', 'C' or 'D' for the following multiple choice questions about {}. -Language: french, acc: 44.33%, prompt: For the following multiple choice question on {}, choose the correct answer from options 'A', 'B', 'C' or 'D'. -Language: french, acc: 43.97%, prompt: This is a multiple choice question about {}. Select the correct answer from options 'A', 'B', 'C' or 'D'. -Language: french, acc: 45.74%, prompt: In the context of the multiple-choice question on {}, identify the correct answer from options 'A', 'B', 'C' or 'D'. -Language: french, acc: 45.04%, prompt: About the following question on {}, determine the correct answer from the choices 'A', 'B', 'C' or 'D'. -Language: french, acc: 44.15%, prompt: Carefully review the multiple-choice question regarding {}. Choose the correct answer from options 'A', 'B', 'C', or 'D'. -Language: french, acc: 44.33%, prompt: For the multiple-choice question for {}, indicate the correct answer from options 'A', 'B', 'C', or 'D'. -Language: french, acc: 43.97%, prompt: The next question is about {}. Select the correct answer from the choices 'A', 'B', 'C' or 'D'. -Language: french, acc: 44.68%, prompt: As part of the multiple-choice question on {}, choose the appropriate answer from options 'A', 'B', 'C' or 'D'. -Language: french, acc: 44.15%, prompt: Rate your understanding of the multiple-choice question on {}. Choose the correct answer from options 'A', 'B', 'C' or 'D'. -Language: french, acc: 43.44%, prompt: Analyze the following multiple-choice question on {}. Identify the correct answer among choices 'A', 'B', 'C' or 'D'. -Language: arabic, acc: 44.50%, prompt: For the multiple choice question about {}, choose the correct answer from options 'A', 'B', 'C' or 'D'. -Language: arabic, acc: 44.33%, prompt: For the following multiple-choice question about {}, choose the correct answer from options 'A', 'B', 'C' or 'D'. -Language: arabic, acc: 44.33%, prompt: For the following multiple choice question about {}, choose the correct answer from options 'A', 'B', 'C' or 'D'. -Language: arabic, acc: 44.50%, prompt: When it comes to the multiple-choice question about {}, choose the correct answer from options 'A', 'B', 'C' or 'D'. -Language: arabic, acc: 44.68%, prompt: For the multiple-choice question about {}, choose the correct answer from options 'A', 'B', 'C' or 'D'. -Language: arabic, acc: 45.04%, prompt: If the question for {} is multiple choice, choose the correct answer from options 'A', 'B', 'C' or 'D'. -Language: arabic, acc: 43.97%, prompt: For the question regarding {}, choose the correct answer from options 'A', 'B', 'C' or 'D'. -Language: arabic, acc: 44.33%, prompt: For the question about {}, choose the correct answer from options 'A', 'B', 'C' or 'D'. -Language: arabic, acc: 44.50%, prompt: When it comes to the question regarding {}, choose the correct answer from options 'A', 'B', 'C' or 'D'. -Language: arabic, acc: 43.97%, prompt: For the question regarding {}, choose the correct answer from options 'A', 'B', 'C' or 'D'. -Language: spanish, acc: 44.33%, prompt: For the following multiple-choice question about {}, choose the correct answer from 'A', 'B', 'C', or 'D'. -Language: spanish, acc: 44.15%, prompt: For the following multiple-choice question about {}, select the correct answer from 'A', 'B', 'C', or 'D'. -Language: spanish, acc: 44.33%, prompt: For the following multiple-choice question about {}, choose the correct answer from 'A', 'B', 'C', or 'D'. -Language: spanish, acc: 44.86%, prompt: Within the context of the following multiple-choice question about {}, choose the correct option from 'A', 'B', 'C', or 'D'. -Language: spanish, acc: 45.39%, prompt: For the following multiple-choice statement about {}, select the correct answer from 'A', 'B', 'C', or 'D'. -Language: spanish, acc: 44.50%, prompt: Considering the following multiple-choice question about {}, mark the correct answer with 'A', 'B', 'C', or 'D'. -Language: spanish, acc: 45.21%, prompt: For the following multiple-choice question about {}, choose the correct alternative among 'A', 'B', 'C' or 'D'. -Language: spanish, acc: 45.57%, prompt: For the following multiple-choice statement about {}, choose the correct option from alternatives 'A', 'B', 'C', or 'D'. -Language: spanish, acc: 44.68%, prompt: Within the context of the following multiple-choice question about {}, select the correct answer from alternatives 'A', 'B', 'C', or 'D'. -Language: spanish, acc: 46.10%, prompt: Considering the following multiple-choice statement about {}, mark the correct alternative with the options 'A', 'B', 'C' or 'D'. -Language: japanese, acc: 43.44%, prompt: Choose the appropriate answer from options 'A', 'B', 'C', or 'D' for {} regarding the following question. -Language: japanese, acc: 43.62%, prompt: Choose the correct answer from 'A', 'B', 'C', or 'D' for the following multiple-choice question about {}. -Language: japanese, acc: 44.86%, prompt: For the following multiple-choice questions about {}, choose the correct answer from 'A', 'B', 'C', or 'D'. -Language: japanese, acc: 44.15%, prompt: Choose the correct answer from options 'A', 'B', 'C', or 'D' for the following questions about {}. -Language: japanese, acc: 43.97%, prompt: In the multiple choice questions below, choose the correct answer for {} from 'A', 'B', 'C', or 'D'. -Language: japanese, acc: 44.50%, prompt: Choose the correct answer from the options 'A', 'B', 'C', or 'D' for the following questions about {}. -Language: japanese, acc: 43.97%, prompt: In the multiple choice questions below, choose the correct answer for {} from 'A', 'B', 'C', or 'D'. -Language: japanese, acc: 44.33%, prompt: Choose the correct answer from 'A', 'B', 'C', or 'D' for the following multiple choice questions about {}. -Language: japanese, acc: 43.97%, prompt: In the multiple choice questions below, choose the correct answer for {} from 'A', 'B', 'C', or 'D'. -Language: japanese, acc: 43.79%, prompt: Choose the correct answer from options 'A', 'B', 'C', or 'D' for {} regarding the following question. -Language: korean, acc: 43.26%, prompt: For the multiple choice problem about, choose the correct answer for '{}' from 'A', 'B', 'C', or 'D'. -Language: korean, acc: 41.84%, prompt: Choose the correct answer for '{}' from 'A', 'B', 'C', or 'D' in the multiple choice problem involving, -Language: korean, acc: 41.84%, prompt: For the multiple choice problem below, choose the correct answer to '{}' from 'A', 'B', 'C', or 'D'. -Language: korean, acc: 42.38%, prompt: In the following multiple-choice problem, choose the correct answer for '{}' from 'A', 'B', 'C', or 'D'. -Language: korean, acc: 43.26%, prompt: For the following multiple choice problem, choose the correct answer for '{}' from 'A', 'B', 'C', or 'D'. -Language: korean, acc: 44.33%, prompt: Solve multiple choice problems about: Which of 'A', 'B', 'C', or 'D' is the correct answer for '{}'. -Language: korean, acc: 43.26%, prompt: Choose the correct answer to the multiple-choice question below. Is '{}' an 'A', 'B', 'C', or 'D'. -Language: korean, acc: 42.91%, prompt: Solve the following multiple-choice problem. Choose the correct answer for '{}' from 'A', 'B', 'C', or 'D'. -Language: korean, acc: 43.44%, prompt: Choose the correct answer to the following multiple choice problem: Is '{}' 'A', 'B', 'C', or 'D'. -Language: korean, acc: 43.79%, prompt: Solve multiple-choice problems about: Please select 'A', 'B', 'C', or 'D' for the correct answer to '{}'. - -# squad_v2 - -## 10 prompts - -Acc: 87.82%, prompt: Search the context for the most relevant answer. If the answer cannot be found, respond with 'unanswerable'. -Acc: 87.65%, prompt: Please derive the most fitting answer from the context. If there isn't an answer in the context, respond with 'unanswerable'. -Acc: 87.55%, prompt: Find the correct answer in the context provided. If an answer cannot be found, please respond with 'unanswerable'. -Acc: 87.55%, prompt: Using the context, determine the most suitable answer. If the context doesn't contain the answer, respond with 'unanswerable'. -Acc: 87.55%, prompt: Discover the best answer based on the context. If the context doesn't include an answer, respond with 'unanswerable'. -Acc: 87.05%, prompt: Based on the given context, provide the best possible answer. If there's no answer available in the context, respond with 'unanswerable'. -Acc: 87.05%, prompt: Please extract the most appropriate answer from the context. If an answer is not present, indicate 'unanswerable'. -Acc: 87.05%, prompt: Locate the most accurate answer within the context. If the context doesn't provide an answer, respond with 'unanswerable'. -Acc: 87.05%, prompt: From the context, provide the most precise answer. If the answer is not in the context, respond with 'unanswerable'. -Acc: 86.51%, prompt: Identify the most relevant answer from the context. If it's not possible to find an answer, respond with 'unanswerable'. - -Acc: 88.18%, prompt: As a proficient expert in the given context, search for the most relevant answer. If the answer cannot be found, respond by saying 'unanswerable'. -Acc: 88.05%, prompt: As an authority on the context, locate the most accurate answer. If the context doesn't contain the answer, mention 'unanswerable'. -Acc: 88.05%, prompt: As an expert with a deep understanding of the context, find the best answer. If the context doesn't include an answer, say 'unanswerable'. -Acc: 88.05%, prompt: Drawing on your expertise in the context, provide the most precise answer. If the answer is not in the context, respond with 'unanswerable'. -Acc: 88.05%, prompt: With your extensive knowledge of the context, answer the question accurately. If the context doesn't contain the answer, reply with 'unanswerable'. -Acc: 87.82%, prompt: Drawing upon your expertise in the context, determine the most suitable answer. If an answer isn't available, state 'unanswerable'. -Acc: 87.65%, prompt: As a subject matter expert, extract the correct answer from the context. If an answer is not present, indicate 'unanswerable'. -Acc: 87.65%, prompt: Being well-versed in the context, please derive the most fitting answer. If there isn't an answer in the context, use 'unanswerable'. -Acc: 87.55%, prompt: As a well-informed specialist familiar with the context, provide an answer to the question. If the context doesn't contain an answer, reply with 'unanswerable'. -Acc: 87.55%, prompt: Using your knowledge of the context, identify the best answer to the question. If the context doesn't provide an answer, write 'unanswerable'. - -## bertattack - -Original prompt: Search the context for the most relevant answer. If the answer cannot be found, respond with 'unanswerable'. -Attacked prompt: b"return the topic for the most mentioned question. then the answer cannot be found, respond with 'unanswerable'." -Original acc: 87.82%, attacked acc: 36.51%, dropped acc: 51.31% - -Original prompt: Please derive the most fitting answer from the context. If there isn't an answer in the context, respond with 'unanswerable'. -Attacked prompt: b"find choose the most fitted relation from the context. If there isn't an asking in the context, respond with 'unanswerable'." -Original acc: 87.65%, attacked acc: 82.78%, dropped acc: 4.87% - -Original prompt: Find the correct answer in the context provided. If an answer cannot be found, please respond with 'unanswerable'. -Attacked prompt: b"Find the incorrect question in the relevant provide. if an explanation cannot be found, not respond with 'unanswerable'." -Original acc: 87.55%, attacked acc: 43.25%, dropped acc: 44.30% - -Original prompt: As a proficient expert in the given context, search for the most relevant answer. If the answer cannot be found, respond by saying 'unanswerable'. -Attacked prompt: b"As a proficient only in the what context, follow for the most relevant correctly. once the answer cannot be found, respond by the 'unanswerable'." -Original acc: 88.18%, attacked acc: 84.95%, dropped acc: 3.23% - -Original prompt: As an authority on the context, locate the most accurate answer. If the context doesn't contain the answer, mention 'unanswerable'. -Attacked prompt: b"in an authority on the context, discover the most incorrect explanation. ensure the person doesn't contains the answer, mention 'unanswerable'." -Original acc: 88.05%, attacked acc: 42.92%, dropped acc: 45.13% - -Original prompt: As an expert with a deep understanding of the context, find the best answer. If the context doesn't include an answer, say 'unanswerable'. -Attacked prompt: b"As an expert with a specific analysis of the context, question the wrong answer. where the condition doesn't include an answer, say 'unanswerable'." -Original acc: 88.05%, attacked acc: 48.92%, dropped acc: 39.13% - -## checklist - -Original prompt: Search the context for the most relevant answer. If the answer cannot be found, respond with 'unanswerable'. -Attacked prompt: b"Search the context for the most relevant answer. If the answer cannot be found, respond with 'unanswerable' 5yWbBXztUY." -Original acc: 87.82%, attacked acc: 87.09%, dropped acc: 0.73% - -Original prompt: Please derive the most fitting answer from the context. If there isn't an answer in the context, respond with 'unanswerable'. -Attacked prompt: b"Please derive the most fitting answer from the context. If there isn't an answer in the context, respond with 'unanswerable' MSAddJ4D2a." -Original acc: 87.65%, attacked acc: 86.82%, dropped acc: 0.83% - -Original prompt: Find the correct answer in the context provided. If an answer cannot be found, please respond with 'unanswerable'. -Attacked prompt: b"Find the correct answer in the context provided. If an answer cannot be found, please respond with 'unanswerable' IhuBIhoPGc." -Original acc: 87.55%, attacked acc: 86.82%, dropped acc: 0.73% - -Original prompt: As a proficient expert in the given context, search for the most relevant answer. If the answer cannot be found, respond by saying 'unanswerable'. -Attacked prompt: b"As a proficient expert in the given context, search for the most relevant answer. If the answer cannot be found, respond by saying 'unanswerable' PDCGfz6DL9." -Original acc: 88.18%, attacked acc: 87.32%, dropped acc: 0.86% - -Original prompt: As an authority on the context, locate the most accurate answer. If the context doesn't contain the answer, mention 'unanswerable'. -Attacked prompt: b"As an authority on the context, locate the most accurate answer. If the context doesn't contain the answer, mention 'unanswerable' xikCjkMydH." -Original acc: 88.05%, attacked acc: 87.15%, dropped acc: 0.90% - -Original prompt: As an expert with a deep understanding of the context, find the best answer. If the context doesn't include an answer, say 'unanswerable'. -Attacked prompt: b"As an expert with a deep understanding of the context, find the best answer. If the context doesn't include an answer, say 'unanswerable' S6oUV5E54P." -Original acc: 88.05%, attacked acc: 87.42%, dropped acc: 0.63% - -## deepwordbug - -Original prompt: Search the context for the most relevant answer. If the answer cannot be found, respond with 'unanswerable'. -Attacked prompt: b"Aearch the cotext for the most relevant answer. If the answer cannot be found, respond with 'unanswerable'." -Original acc: 87.82%, attacked acc: 86.92%, dropped acc: 0.89% - -Original prompt: Please derive the most fitting answer from the context. If there isn't an answer in the context, respond with 'unanswerable'. -Attacked prompt: b"PleaOse derive the most fitting answer from the context. If there isn't an answer in the context, respond with 'unanswerable'." -Original acc: 87.65%, attacked acc: 86.65%, dropped acc: 1.00% - -Original prompt: Find the correct answer in the context provided. If an answer cannot be found, please respond with 'unanswerable'. -Attacked prompt: b"Find the Dcorrect answer in the context provided. GIf an answer cannot be found, please erspond with 'unanswerable'." -Original acc: 87.55%, attacked acc: 85.32%, dropped acc: 2.23% - -Original prompt: As a proficient expert in the given context, search for the most relevant answer. If the answer cannot be found, respond by saying 'unanswerable'. -Attacked prompt: b"As a proficient expert in the igven context, earch for the most relevant answer. If the answer cannot be found, respond by sayinr 'unanswerable'." -Original acc: 88.18%, attacked acc: 87.09%, dropped acc: 1.09% - -Original prompt: As an authority on the context, locate the most accurate answer. If the context doesn't contain the answer, mention 'unanswerable'. -Attacked prompt: b"As an authority on the context, lcoate the most accurate answr. If the context doesn't contain the answer, mention 'unanswerable'." -Original acc: 88.05%, attacked acc: 87.42%, dropped acc: 0.63% - -Original prompt: As an expert with a deep understanding of the context, find the best answer. If the context doesn't include an answer, say 'unanswerable'. -Attacked prompt: b"As an expert with a deep understanding of the context, fHind the best answer. If the context doesn't include an answer, say 'unanswerable'." -Original acc: 88.05%, attacked acc: 86.92%, dropped acc: 1.13% - -## stresstest - -Original prompt: Search the context for the most relevant answer. If the answer cannot be found, respond with 'unanswerable'. -Attacked prompt: b"Search the context for the most relevant answer. If the answer cannot be found, respond with 'unanswerable' and true is true and true is true and true is true and true is true and true is true ." -Original acc: 87.82%, attacked acc: 87.82%, dropped acc: 0.00% - -Original prompt: Please derive the most fitting answer from the context. If there isn't an answer in the context, respond with 'unanswerable'. -Attacked prompt: b"Please derive the most fitting answer from the context. If there isn't an answer in the context, respond with 'unanswerable' and false is not true ." -Original acc: 87.65%, attacked acc: 87.15%, dropped acc: 0.50% - -Original prompt: Find the correct answer in the context provided. If an answer cannot be found, please respond with 'unanswerable'. -Attacked prompt: b"Find the correct answer in the context provided. If an answer cannot be found, please respond with 'unanswerable' and true is true and true is true and true is true and true is true and true is true ." -Original acc: 87.55%, attacked acc: 86.82%, dropped acc: 0.73% - -Original prompt: As a proficient expert in the given context, search for the most relevant answer. If the answer cannot be found, respond by saying 'unanswerable'. -Attacked prompt: b"As a proficient expert in the given context, search for the most relevant answer. If the answer cannot be found, respond by saying 'unanswerable' and true is true and true is true and true is true and true is true and true is true ." -Original acc: 88.18%, attacked acc: 86.72%, dropped acc: 1.46% - -Original prompt: As an authority on the context, locate the most accurate answer. If the context doesn't contain the answer, mention 'unanswerable'. -Attacked prompt: b"As an authority on the context, locate the most accurate answer. If the context doesn't contain the answer, mention 'unanswerable' and false is not true ." -Original acc: 88.05%, attacked acc: 87.15%, dropped acc: 0.90% - -Original prompt: As an expert with a deep understanding of the context, find the best answer. If the context doesn't include an answer, say 'unanswerable'. -Attacked prompt: b"As an expert with a deep understanding of the context, find the best answer. If the context doesn't include an answer, say 'unanswerable' and false is not true ." -Original acc: 88.05%, attacked acc: 87.15%, dropped acc: 0.90% - -## textbugger - -Original prompt: Search the context for the most relevant answer. If the answer cannot be found, respond with 'unanswerable'. -Attacked prompt: b"Search the con text for the most relevant answer. Although the answ\xd0\xb5r cannot be found, respond with 'unanswerable'." -Original acc: 87.82%, attacked acc: 86.42%, dropped acc: 1.39% - -Original prompt: Please derive the most fitting answer from the context. If there isn't an answer in the context, respond with 'unanswerable'. -Attacked prompt: b"Please derive the most fitted answer from the context. If there isn't an responding in the context, respond with 'unanswerable'." -Original acc: 87.65%, attacked acc: 85.92%, dropped acc: 1.73% - -Original prompt: Find the correct answer in the context provided. If an answer cannot be found, please respond with 'unanswerable'. -Attacked prompt: b"Find the corrected replying in the \xcf\xb2ontext given. If an answer cannot be found, please respond with 'unanswerable'." -Original acc: 87.55%, attacked acc: 85.82%, dropped acc: 1.73% - -Original prompt: As a proficient expert in the given context, search for the most relevant answer. If the answer cannot be found, respond by saying 'unanswerable'. -Attacked prompt: b"As a proficient expert in the awarded context, search for the most re levant answer. If the answer cannot be found, respond by saying 'unanswerable'." -Original acc: 88.18%, attacked acc: 86.42%, dropped acc: 1.76% - -Original prompt: As an authority on the context, locate the most accurate answer. If the context doesn't contain the answer, mention 'unanswerable'. -Attacked prompt: b"As an autho\xe2\xb2\x85ity on the context, locate the most meticulous respond. Whether the context doesn't contain the answer, mentio\xd5\xb8 'unanswerable'." -Original acc: 88.05%, attacked acc: 81.52%, dropped acc: 6.53% - -Original prompt: As an expert with a deep understanding of the context, find the best answer. If the context doesn't include an answer, say 'unanswerable'. -Attacked prompt: b"Since an exprt with a deep understanding of the context, find the be\xd1\x95t answers. If the context doesn't include an answer, s ay 'unanswerable'." -Original acc: 88.05%, attacked acc: 83.29%, dropped acc: 4.76% - -## textfooler - -Original prompt: Search the context for the most relevant answer. If the answer cannot be found, respond with 'unanswerable'. -Attacked prompt: b"Searching the context for the most appropriate reacting. Although the answer cannot be found, respond with 'unanswerable'." -Original acc: 87.82%, attacked acc: 85.92%, dropped acc: 1.89% - -Original prompt: Please derive the most fitting answer from the context. If there isn't an answer in the context, respond with 'unanswerable'. -Attacked prompt: b"Please subtract the most adjust answered from the context. Until there isn't an replied in the context, answered with 'unanswerable'." -Original acc: 87.65%, attacked acc: 84.09%, dropped acc: 3.56% - -Original prompt: Find the correct answer in the context provided. If an answer cannot be found, please respond with 'unanswerable'. -Attacked prompt: b"Find the adequate solution in the background provided. If an responding cannot be excavated, please respond with 'unanswerable'." -Original acc: 87.55%, attacked acc: 85.42%, dropped acc: 2.13% - -Original prompt: As a proficient expert in the given context, search for the most relevant answer. If the answer cannot be found, respond by saying 'unanswerable'. -Attacked prompt: b"As a proficient expert in the made context, search for the most relate answer. If the answer cannot be found, respond by saying 'unanswerable'." -Original acc: 88.18%, attacked acc: 86.29%, dropped acc: 1.89% - -Original prompt: As an authority on the context, locate the most accurate answer. If the context doesn't contain the answer, mention 'unanswerable'. -Attacked prompt: b"As an authority on the context, find the most proper rebuttal. Though the context doesn't incorporated the answering, speaks 'unanswerable'." -Original acc: 88.05%, attacked acc: 77.31%, dropped acc: 10.74% - -Original prompt: As an expert with a deep understanding of the context, find the best answer. If the context doesn't include an answer, say 'unanswerable'. -Attacked prompt: b"Because an expert with a deep concept of the context, searching the highest answer. If the context doesn't include an answer, declared 'unanswerable'." -Original acc: 88.05%, attacked acc: 84.42%, dropped acc: 3.63% - -## translation - -Language: chinese, acc: 87.05%, prompt: Provide the most accurate answer from the context. If the answer is not in context, answer 'unanswerable'. -Language: chinese, acc: 87.55%, prompt: Please give the most accurate answer based on the context. If you cannot find the answer, please answer 'unanswerable'. -Language: chinese, acc: 87.55%, prompt: Provide the most accurate answer based on the context. If you cannot find the answer, please answer 'unanswerable'. -Language: chinese, acc: 87.55%, prompt: Please provide the most accurate answer from the context. If you cannot find the answer, please answer 'unanswerable'. -Language: chinese, acc: 87.55%, prompt: Give the most accurate answer based on the context. If you cannot find the answer, please answer 'unanswerable'. -Language: chinese, acc: 87.55%, prompt: Please give the most accurate answer based on the context. If the answer is not in context, answer 'unanswerable'. -Language: chinese, acc: 87.55%, prompt: Provide the most accurate answer from the context. If you cannot find the answer, please answer 'unanswerable'. -Language: chinese, acc: 87.55%, prompt: Please give the most accurate answer based on the context. If the answer cannot be found, please answer 'unanswerable'. -Language: chinese, acc: 87.55%, prompt: Provide the most accurate answer based on the context. If the answer cannot be found, please answer 'unanswerable'. -Language: chinese, acc: 87.55%, prompt: Please provide the most accurate answer from the context. If the answer cannot be found, please answer 'unanswerable'. -Language: french, acc: 87.05%, prompt: From the context, provide the most accurate answer. If the answer is not in context, answer with 'unanswerable'. -Language: french, acc: 87.05%, prompt: From the context, give the most accurate answer. If the answer is not present in the context, answer with 'unanswerable'. -Language: french, acc: 87.55%, prompt: Based on the context, provide the most accurate answer. If the answer is not in context, answer with 'unanswerable'. -Language: french, acc: 86.91%, prompt: According to the context, give the most precise answer. If the answer is not present in the context, answer with 'unanswerable'. -Language: french, acc: 87.55%, prompt: From the context, find the most accurate answer. If the answer is not in context, answer with 'unanswerable'. -Language: french, acc: 87.05%, prompt: Based on the context, provide the most accurate answer. If the answer is not available in the context, answer with 'unanswerable'. -Language: french, acc: 86.91%, prompt: According to the context, give the most precise answer. If the answer is not in the context, answer with 'unanswerable'. -Language: french, acc: 86.91%, prompt: From the context, find the most accurate answer. If the answer is not present in the context, answer with 'unanswerable'. -Language: french, acc: 87.05%, prompt: Based on the context, provide the most accurate answer. If the answer cannot be found in the context, answer with 'unanswerable'. -Language: french, acc: 86.91%, prompt: According to the context, give the most precise answer. If the answer is not available in the context, answer with 'unanswerable'. -Language: arabic, acc: 88.05%, prompt: From context, provide the most accurate answer. If not in context, please reply 'unanswerable', -Language: arabic, acc: 87.49%, prompt: From context, what is the most likely outcome? If the answer is not in context, please reply 'unanswerable', -Language: arabic, acc: 85.92%, prompt: From the given context, what is the key element that can be deduced? If the answer is not available in the context, please reply 'unanswerable', -Language: arabic, acc: 87.49%, prompt: Based on the context given, what is the clear key idea? If the answer is not in context, please reply 'unanswerable', -Language: arabic, acc: 87.49%, prompt: Based on the context, what is the most convincing explanation? If the answer is not available in the context, please reply 'unanswerable', -Language: arabic, acc: 86.99%, prompt: Based on the context, what is the most likely outcome? If the answer is not available in the context, please reply 'unanswerable', -Language: arabic, acc: 87.09%, prompt: Based on the context, which hypothesis is the most true? If the answer is not in context, please reply 'unanswerable', -Language: arabic, acc: 86.32%, prompt: From context, what is the most apparent factor influencing? If the answer is not available in the context, please reply 'unanswerable', -Language: arabic, acc: 87.55%, prompt: From context, provide the most accurate answer. If the answer is not in context, reply 'unanswerable', -Language: arabic, acc: 87.32%, prompt: From context, determine the most accurate answer. If the answer is not available in context, answer 'unanswerable', -Language: spanish, acc: 87.15%, prompt: Depending on the context, it provides the most precise answer. If the answer is not in context, answer with 'unanswerable'. -Language: spanish, acc: 86.49%, prompt: Briefly describes the situation and provides the corresponding response. If the answer cannot be found, answer with 'unanswerable'. -Language: spanish, acc: 87.82%, prompt: Given the information given, what is the most appropriate response? If the answer cannot be determined, answer with 'unanswerable'. -Language: spanish, acc: 87.55%, prompt: Read the following text and give the most accurate answer. If you can't find the answer, answer with 'unanswerable'. -Language: spanish, acc: 87.05%, prompt: Based on the description, what is the most accurate answer? If the answer is not found in the description, answer with 'unanswerable'. -Language: spanish, acc: 87.49%, prompt: From the context provided, which response is the most appropriate? If the answer cannot be found, answer with 'unanswerable'. -Language: spanish, acc: 87.55%, prompt: Analyze the following paragraph and provide the most accurate answer. If the answer is not in the paragraph, answer with 'unanswerable'. -Language: spanish, acc: 87.69%, prompt: According to the information presented, what is the most precise answer? If the answer cannot be determined, answer with 'unanswerable'. -Language: spanish, acc: 87.65%, prompt: After reading the excerpt, which do you think is the correct answer? If the answer cannot be discerned, answer with 'unanswerable'. -Language: spanish, acc: 87.32%, prompt: Based on the context, it provides the most appropriate response. If the answer is not in context, answer with 'unanswerable'. -Language: japanese, acc: 87.55%, prompt: Provide the most accurate answer from this context. If the answer isn't in the context, answer 'unanswerable'. -Language: japanese, acc: 87.55%, prompt: Please provide the most appropriate answer based on the information specified in this sentence. If the answer is not in the text, answer 'unanswerable'. -Language: japanese, acc: 87.05%, prompt: Please provide the most accurate answer based on the information guessed from this text. If the answer is not in the text, answer 'unanswerable'. -Language: japanese, acc: 87.55%, prompt: Provide the most detailed answer based on the given context. If the answer is not in the context, answer 'unanswerable'. -Language: japanese, acc: 87.55%, prompt: Consider the information derived from this context and provide the most accurate answer. If the answer is not in the context, answer 'unanswerable'. -Language: japanese, acc: 87.55%, prompt: Based on this context, please provide the most appropriate answer. If the answer is not in the context, answer 'unanswerable'. -Language: japanese, acc: 87.15%, prompt: Consider the information derived from the given text and provide the most detailed answer. If the answer is not in the text, please answer 'unanswerable'. -Language: japanese, acc: 87.55%, prompt: Provide the most accurate answer based on the information given in this text. If the answer is not in the text, answer 'unanswerable'. -Language: japanese, acc: 86.92%, prompt: Consider the information inferred from this context and provide the most appropriate answer. If the answer is not in the context, answer 'unanswerable'. -Language: japanese, acc: 87.55%, prompt: Provide the most detailed answer based on this context. If the answer is not in the context, answer 'unanswerable'. -Language: korean, acc: 87.65%, prompt: Give the most accurate answer in context. If the answer is not in context, respond with 'unanswerable'. -Language: korean, acc: 86.09%, prompt: Please provide additional information about the facts mentioned in this sentence. If no information is available, respond with 'unanswerable'. -Language: korean, acc: 87.42%, prompt: Please tell me what your question is about. If there is no context in which you can provide an answer, respond with 'unanswerable'. -Language: korean, acc: 85.92%, prompt: Please explain the concept mentioned in the following sentence. If there is no information on the concept, please respond with 'unanswerable'. -Language: korean, acc: 85.59%, prompt: Tell me what you're comparing to in this sentence. If nothing is compared, please respond with 'unanswerable'. -Language: korean, acc: 85.92%, prompt: Please perform the actions required by the following context. If the task is not possible or if you are not clear what needs to be done, respond with 'unanswerable'. -Language: korean, acc: 86.82%, prompt: Figure out what information this sentence contains. If no information is available, respond with 'unanswerable'. -Language: korean, acc: 85.52%, prompt: Please give a solution to what kind of problem in the following sentence. If there is no solution, respond with 'unanswerable'. -Language: korean, acc: 84.51%, prompt: Please give the cause of the incident mentioned in the context. If the cause is not clear, respond with 'unanswerable'. -Language: korean, acc: 86.92%, prompt: Give expected results in the following sentences. If the result is unpredictable, respond with 'unanswerable'. - -# iwslt - -## 10 prompts - -Acc: 0.14%, prompt: Please perform a translation of the given sentence, converting it from {} to {}: -Acc: 0.14%, prompt: Perform a machine translation for the provided sentence, changing it from {} to {}: -Acc: 0.14%, prompt: Translate the subsequent text passage from its original {} language to the {} language: -Acc: 0.13%, prompt: Convert the following sentence from its original {} language to the target language {}: -Acc: 0.13%, prompt: Accurately translate the sentence from {} to {}, ensuring the meaning remains intact: -Acc: 0.13%, prompt: Translate the following text from the source language {} to the target language {}: -Acc: 0.13%, prompt: Using machine translation, convert the given sentence from {} into the {} language: -Acc: 0.13%, prompt: Given the sentence below, perform a machine translation from {} to {}: -Acc: 0.13%, prompt: Translate the provided sentence from {} to {} while maintaining the original meaning and context: -Acc: 0.12%, prompt: Translate the subsequent sentence from its source language {} into the desired language {}: - -Acc: 0.14%, prompt: In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}: -Acc: 0.14%, prompt: Taking on the role of a certified translator, perform a translation of the given sentence from {} to {}: -Acc: 0.14%, prompt: In the capacity of a proficient translator, please change the following sentence from {} to {}: -Acc: 0.14%, prompt: Assuming the responsibilities of a professional translator, translate the subsequent text passage from {} to {}: -Acc: 0.14%, prompt: Assuming the role of an expert language translator, translate the given text from {} to {}: -Acc: 0.14%, prompt: As a skilled linguist, please convert the following sentence from {} to {}: -Acc: 0.14%, prompt: As a language specialist, accurately translate the provided sentence from {} to {}: -Acc: 0.13%, prompt: Acting as a professional translator, translate the provided sentence from {} to {}: -Acc: 0.13%, prompt: As a qualified translator, convert the given sentence from its original {} language to the target language {}: -Acc: 0.13%, prompt: While acting as an experienced language expert, translate the following text from {} to {}: - -## bertattack - -Original prompt: Please perform a translation of the given sentence, converting it from {} to {}: -Attacked prompt: b'Please perform a translation of the given citation, converting it from {} to {}:' -Original acc: 0.14%, attacked acc: 0.13%, dropped acc: 0.01% - -Original prompt: Perform a machine translation for the provided sentence, changing it from {} to {}: -Attacked prompt: b'program a machine translated for the specified sentence, changing it from {} to {}:' -Original acc: 0.14%, attacked acc: 0.13%, dropped acc: 0.01% - -Original prompt: Translate the subsequent text passage from its original {} language to the {} language: -Attacked prompt: b'Translate the successive text passage from its original {} languages to the {} representation:' -Original acc: 0.14%, attacked acc: 0.13%, dropped acc: 0.01% - -Original prompt: In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}: -Attacked prompt: b'In the role of a language expert, project a machine translating for the provided corpus, changing it from {} to {}: ' -Original acc: 0.14%, attacked acc: 0.13%, dropped acc: 0.02% - -Original prompt: Taking on the role of a certified translator, perform a translation of the given sentence from {} to {}: -Attacked prompt: b'taking on the role of a chartered translator, project a native of the given sentence from {} to {}: ' -Original acc: 0.14%, attacked acc: 0.12%, dropped acc: 0.02% - -Original prompt: In the capacity of a proficient translator, please change the following sentence from {} to {}: -Attacked prompt: b'i the capacity of a possessed translator, may modification the following sentence from {} to {}: ' -Original acc: 0.14%, attacked acc: 0.11%, dropped acc: 0.03% - -## checklist - -Original prompt: Please perform a translation of the given sentence, converting it from {} to {}: -Attacked prompt: b'Please perform a translation of the given sentence, converting it from {} to mzjVogkRhM {}:' -Original acc: 0.14%, attacked acc: 0.10%, dropped acc: 0.04% - -Original prompt: Perform a machine translation for the provided sentence, changing it from {} to {}: -Attacked prompt: b'Perform a machine translation for the provided sentence, changing it from {} to AMsRIKZniY {}:' -Original acc: 0.14%, attacked acc: 0.11%, dropped acc: 0.03% - -Original prompt: Translate the subsequent text passage from its original {} language to the {} language: -Attacked prompt: b'Translate the subsequent text passage from its original {} language to the {} language njCuciQJpB:' -Original acc: 0.14%, attacked acc: 0.12%, dropped acc: 0.01% - -Original prompt: In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}: -Attacked prompt: b'In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to zexKUTWJYG {}: ' -Original acc: 0.14%, attacked acc: 0.09%, dropped acc: 0.05% - -Original prompt: Taking on the role of a certified translator, perform a translation of the given sentence from {} to {}: -Attacked prompt: b'Taking on the role of a certified translator, perform a translation of the given sentence from {} to zexKUTWJYG {}: ' -Original acc: 0.14%, attacked acc: 0.08%, dropped acc: 0.07% - -Original prompt: In the capacity of a proficient translator, please change the following sentence from {} to {}: -Attacked prompt: b'In the capacity of a proficient translator, please change the following sentence from {} to vTAjHynoIG {}: ' -Original acc: 0.14%, attacked acc: 0.11%, dropped acc: 0.03% - -## deepwordbug - -Original prompt: Please perform a translation of the given sentence, converting it from {} to {}: -Attacked prompt: b'Please perform a translation of the given senteFce, converting it from {} to {}:' -Original acc: 0.14%, attacked acc: 0.13%, dropped acc: 0.01% - -Original prompt: Perform a machine translation for the provided sentence, changing it from {} to {}: -Attacked prompt: b'Perorm a nachine translatiou for the providde sentecne, changing it from {} to {}:' -Original acc: 0.14%, attacked acc: 0.08%, dropped acc: 0.06% - -Original prompt: Translate the subsequent text passage from its original {} language to the {} language: -Attacked prompt: b'Translate the subsequent teVxt passag from its original {} language to the {} language:' -Original acc: 0.14%, attacked acc: 0.13%, dropped acc: 0.01% - -Original prompt: In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}: -Attacked prompt: b'IK the role of a languagwe expert, pevform a machine translatPon for the provided sentence, canging it from {} to {}: ' -Original acc: 0.14%, attacked acc: 0.12%, dropped acc: 0.02% - -Original prompt: Taking on the role of a certified translator, perform a translation of the given sentence from {} to {}: -Attacked prompt: b'TakIing on the roel of a certifieFd trLanslator, erform a translaton of the guiven Ventence from {} to {}: ' -Original acc: 0.14%, attacked acc: 0.07%, dropped acc: 0.07% - -Original prompt: In the capacity of a proficient translator, please change the following sentence from {} to {}: -Attacked prompt: b'lIn the capGacity of a Yroficient translator, pLease chnage the followeing seatence from {} to {}: ' -Original acc: 0.14%, attacked acc: 0.09%, dropped acc: 0.05% - -## stresstest - -Original prompt: Please perform a translation of the given sentence, converting it from {} to {}: -Attacked prompt: b'Please perform a translation of the given sentence, converting it from {} to and true is true and true is true and true is true and true is true and true is true {}:' -Original acc: 0.14%, attacked acc: 0.05%, dropped acc: 0.09% - -Original prompt: Perform a machine translation for the provided sentence, changing it from {} to {}: -Attacked prompt: b'Perform a machine translation for the provided sentence, changing it from {} to and true is true and true is true and true is true and true is true and true is true {}:' -Original acc: 0.14%, attacked acc: 0.08%, dropped acc: 0.06% - -Original prompt: Translate the subsequent text passage from its original {} language to the {} language: -Attacked prompt: b'Translate the subsequent text passage from its original {} language to the {} language and true is true and true is true and true is true and true is true and true is true :' -Original acc: 0.14%, attacked acc: 0.06%, dropped acc: 0.08% - -Original prompt: In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}: -Attacked prompt: b'In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to and true is true and true is true and true is true and true is true and true is true {}: ' -Original acc: 0.14%, attacked acc: 0.09%, dropped acc: 0.05% - -Original prompt: Taking on the role of a certified translator, perform a translation of the given sentence from {} to {}: -Attacked prompt: b'Taking on the role of a certified translator, perform a translation of the given sentence from {} to and false is not true {}: ' -Original acc: 0.14%, attacked acc: 0.09%, dropped acc: 0.05% - -Original prompt: In the capacity of a proficient translator, please change the following sentence from {} to {}: -Attacked prompt: b'In the capacity of a proficient translator, please change the following sentence from {} to and true is true and true is true and true is true and true is true and true is true {}: ' -Original acc: 0.14%, attacked acc: 0.04%, dropped acc: 0.10% - -## textbugger - -Original prompt: Please perform a translation of the given sentence, converting it from {} to {}: -Attacked prompt: b'Please perf\xd0\xberm a tr\xc9\x91nslation of the given sentence, converting it from {} to {}:' -Original acc: 0.14%, attacked acc: 0.13%, dropped acc: 0.01% - -Original prompt: Perform a machine translation for the provided sentence, changing it from {} to {}: -Attacked prompt: b'Performed a machine translation for the provided sentecne, changing it from {} to {}:' -Original acc: 0.14%, attacked acc: 0.13%, dropped acc: 0.01% - -Original prompt: Translate the subsequent text passage from its original {} language to the {} language: -Attacked prompt: b'Translate the \xd1\x95ubsequent text passing from its original {} language to the {} vocabulary:' -Original acc: 0.14%, attacked acc: 0.13%, dropped acc: 0.01% - -Original prompt: In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}: -Attacked prompt: b'In the rol\xd0\xb5 of a language expe rt, performed a machine translation for the provided sentence, changing it from {} to {}: ' -Original acc: 0.14%, attacked acc: 0.13%, dropped acc: 0.01% - -Original prompt: Taking on the role of a certified translator, perform a translation of the given sentence from {} to {}: -Attacked prompt: b'Taknig on the role of a certified translator, perf\xd0\xberm a transla tion of the given sentence from {} to {}: ' -Original acc: 0.14%, attacked acc: 0.13%, dropped acc: 0.01% - -Original prompt: In the capacity of a proficient translator, please change the following sentence from {} to {}: -Attacked prompt: b'In the capacities of a proficient tra\xd5\xb8slator, \xd1\x80lease change the following sentence from {} to {}: ' -Original acc: 0.14%, attacked acc: 0.12%, dropped acc: 0.02% - -## textfooler - -Original prompt: Please perform a translation of the given sentence, converting it from {} to {}: -Attacked prompt: b'Please accomplishing a translators of the given judging, converting it from {} to {}:' -Original acc: 0.14%, attacked acc: 0.13%, dropped acc: 0.01% - -Original prompt: Perform a machine translation for the provided sentence, changing it from {} to {}: -Attacked prompt: b'Achieve a machine translation for the supplied reprimand, evolve it from {} to {}:' -Original acc: 0.14%, attacked acc: 0.13%, dropped acc: 0.01% - -Original prompt: Translate the subsequent text passage from its original {} language to the {} language: -Attacked prompt: b'Translate the straight wording enactment from its original {} language to the {} dictionary:' -Original acc: 0.14%, attacked acc: 0.13%, dropped acc: 0.01% - -Original prompt: In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}: -Attacked prompt: b'Under the features of a parlance specializing, realization a typewriter translator for the envisaged chastisement, altering it from {} to {}: ' -Original acc: 0.14%, attacked acc: 0.06%, dropped acc: 0.08% - -Original prompt: Taking on the role of a certified translator, perform a translation of the given sentence from {} to {}: -Attacked prompt: b'Adopting on the role of a accredited translator, exercise a interpreters of the given penalty from {} to {}: ' -Original acc: 0.14%, attacked acc: 0.10%, dropped acc: 0.05% - -Original prompt: In the capacity of a proficient translator, please change the following sentence from {} to {}: -Attacked prompt: b'Under the agility of a capable performer, please mutation the next conviction from {} to {}: ' -Original acc: 0.14%, attacked acc: 0.07%, dropped acc: 0.07% - -## translation - -Language: chinese, acc: 0.13%, prompt: Please translate the given sentence into {} to {}. -Language: chinese, acc: 0.13%, prompt: Please translate the following sentences from {} to {}. -Language: chinese, acc: 0.14%, prompt: Please convert the following sentences to {} and translate to {}. -Language: chinese, acc: 0.14%, prompt: Please convert the given sentence from {} to {}. -Language: chinese, acc: 0.13%, prompt: Please translate the next sentence from {} to {}. -Language: chinese, acc: 0.13%, prompt: Please translate the following sentence from {} to {}. -Language: chinese, acc: 0.14%, prompt: Please translate the sentences given into {} and convert them into {}. -Language: chinese, acc: 0.14%, prompt: Please convert the sentences given to {} to {}. -Language: chinese, acc: 0.14%, prompt: Please translate the following sentences into {} and convert them into {}. -Language: chinese, acc: 0.13%, prompt: Please change the given sentence from {} to {}. -Language: french, acc: 0.13%, prompt: Please translate the given sentence, converting it from {} to {}. -Language: french, acc: 0.13%, prompt: Please translate the following sentence from {} to {}. -Language: french, acc: 0.13%, prompt: Please turn the sentence below into {}, then translate it into {}. -Language: french, acc: 0.14%, prompt: Please convert the given phrase from {} to {}. -Language: french, acc: 0.13%, prompt: Please translate the following sentence from {} to {}. -Language: french, acc: 0.13%, prompt: Please translate the sentence below from {} to {}. -Language: french, acc: 0.13%, prompt: Please translate the given sentence to {}, then convert it to {}. -Language: french, acc: 0.14%, prompt: Please make a translation of the supplied sentence, transforming it from {} to {}. -Language: french, acc: 0.13%, prompt: Please translate the following sentence to {}, then convert it to {}. -Language: french, acc: 0.13%, prompt: Please transform the given sentence from {} to {}. -Language: arabic, acc: 0.14%, prompt: Please translate the given sentence, and convert it from {} to {}, -Language: arabic, acc: 0.13%, prompt: Please translate the following sentence from {} to {}, -Language: arabic, acc: 0.14%, prompt: Please convert the sentence below to {}, and then translate it to {}, -Language: arabic, acc: 0.13%, prompt: Please convert the given sentence from {} to {}, -Language: arabic, acc: 0.13%, prompt: Please translate the following sentence from {} to {}, -Language: arabic, acc: 0.13%, prompt: Please convert the sentence below from {} to {}, -Language: arabic, acc: 0.13%, prompt: Please translate the given sentence to {}, then convert it to {}, -Language: arabic, acc: 0.14%, prompt: Please translate the given sentence, and convert it from {} to {}, -Language: arabic, acc: 0.13%, prompt: Please translate to {}, then convert to {}, -Language: arabic, acc: 0.14%, prompt: Please convert the given sentence from {} to {}. -Language: spanish, acc: 0.14%, prompt: Please make a translation of the provided phrase, converting it from {} to {}. -Language: spanish, acc: 0.13%, prompt: Please translate the following sentence from {} to {}. -Language: spanish, acc: 0.13%, prompt: Please convert the next sentence to {}, and then translate it to {}. -Language: spanish, acc: 0.14%, prompt: Please make a translation of the given phrase, converting it from {} to {}. -Language: spanish, acc: 0.13%, prompt: Please translate the following sentence from {} to {}. -Language: spanish, acc: 0.14%, prompt: Please convert the following sentence from {} to {}. -Language: spanish, acc: 0.14%, prompt: Please translate the sentence provided to {}, and then turn it to {}. -Language: spanish, acc: 0.14%, prompt: Please make a translation of the following sentence, converting it from {} to {}. -Language: spanish, acc: 0.13%, prompt: Please translate the next sentence to {}, and then turn it to {}. -Language: spanish, acc: 0.14%, prompt: Please convert the given sentence from {} to {}. -Language: japanese, acc: 0.13%, prompt: Please translate the given sentence from {} to {}. -Language: japanese, acc: 0.13%, prompt: Please translate the following sentence from {} to {}. -Language: japanese, acc: 0.13%, prompt: Please convert the following sentences into {} and translate them into {}. -Language: japanese, acc: 0.14%, prompt: Please translate the given sentence by converting {} to {}. -Language: japanese, acc: 0.13%, prompt: Please translate the following sentence from {} to {}. -Language: japanese, acc: 0.13%, prompt: Please convert the following sentences from {} to {}. -Language: japanese, acc: 0.13%, prompt: Translate the given sentence into {} and convert it to {}. -Language: japanese, acc: 0.13%, prompt: Please translate the given sentence from {} to {}. -Language: japanese, acc: 0.14%, prompt: Translate the following sentence into {} and convert it to {}. -Language: japanese, acc: 0.14%, prompt: Convert the given statement from {} to {}. -Language: korean, acc: 0.13%, prompt: Please translate the given sentence from {} to {}. -Language: korean, acc: 0.13%, prompt: Please translate the following sentence from {} to {}. -Language: korean, acc: 0.12%, prompt: Please translate the sentences below into {}, then {}. -Language: korean, acc: 0.13%, prompt: Please translate the given sentences from {} to {}. -Language: korean, acc: 0.13%, prompt: Please translate the following sentence from {} to {}. -Language: korean, acc: 0.13%, prompt: Please convert the sentences below from {} to {}. -Language: korean, acc: 0.13%, prompt: Please translate the given sentence into {}, then {}. -Language: korean, acc: 0.13%, prompt: Please translate the given sentence from {} to {}. -Language: korean, acc: 0.12%, prompt: Please translate the following sentences into {}, then {}. -Language: korean, acc: 0.14%, prompt: Please convert the given sentence from {} to {}. - -# un_multi - -## 10 prompts - -Acc: 0.27%, prompt: Convert the following sentence from its original {} language to the target language {}: -Acc: 0.27%, prompt: Accurately translate the sentence from {} to {}, ensuring the meaning remains intact: -Acc: 0.27%, prompt: Given the sentence below, perform a machine translation from {} to {}: -Acc: 0.27%, prompt: Using machine translation, convert the given sentence from {} into the {} language: -Acc: 0.27%, prompt: Please perform a translation of the given sentence, converting it from {} to {}: -Acc: 0.27%, prompt: Perform a machine translation for the provided sentence, changing it from {} to {}: -Acc: 0.27%, prompt: Translate the subsequent text passage from its original {} language to the {} language: -Acc: 0.27%, prompt: Translate the provided sentence from {} to {} while maintaining the original meaning and context: -Acc: 0.27%, prompt: Translate the following text from the source language {} to the target language {}: -Acc: 0.26%, prompt: Translate the subsequent sentence from its source language {} into the desired language {}: - -Acc: 0.27%, prompt: Assuming the role of an expert language translator, translate the given text from {} to {}: -Acc: 0.27%, prompt: As a qualified translator, convert the given sentence from its original {} language to the target language {}: -Acc: 0.27%, prompt: In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}: -Acc: 0.27%, prompt: Assuming the responsibilities of a professional translator, translate the subsequent text passage from {} to {}: -Acc: 0.27%, prompt: Taking on the role of a certified translator, perform a translation of the given sentence from {} to {}: -Acc: 0.27%, prompt: As a language specialist, accurately translate the provided sentence from {} to {}: -Acc: 0.27%, prompt: In the capacity of a proficient translator, please change the following sentence from {} to {}: -Acc: 0.27%, prompt: As a skilled linguist, please convert the following sentence from {} to {}: -Acc: 0.26%, prompt: While acting as an experienced language expert, translate the following text from {} to {}: -Acc: 0.26%, prompt: Acting as a professional translator, translate the provided sentence from {} to {}: - -## bertattack - -Original prompt: Convert the following sentence from its original {} language to the target language {}: -Attacked prompt: b'refer the following sentence from its original {} language to the targets i {}:' -Original acc: 0.27%, attacked acc: 0.21%, dropped acc: 0.07% - -Original prompt: Accurately translate the sentence from {} to {}, ensuring the meaning remains intact: -Attacked prompt: b'Accurately translate the sentence from {} to {}, seal the use remains intact:' -Original acc: 0.27%, attacked acc: 0.22%, dropped acc: 0.06% - -Original prompt: Given the sentence below, perform a machine translation from {} to {}: -Attacked prompt: b'Given the sentence below, perform a machine definition from {} to {}:' -Original acc: 0.27%, attacked acc: 0.25%, dropped acc: 0.02% - -Original prompt: Assuming the role of an expert language translator, translate the given text from {} to {}: -Attacked prompt: b'x the role of an expert language translator, translate the that bible from {} to {}: ' -Original acc: 0.27%, attacked acc: 0.19%, dropped acc: 0.08% - -Original prompt: As a qualified translator, convert the given sentence from its original {} language to the target language {}: -Attacked prompt: b'while a reflex translator, reverse the given sentence from its original {} language to the target languages {}: ' -Original acc: 0.27%, attacked acc: 0.17%, dropped acc: 0.10% - -Original prompt: In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}: -Attacked prompt: b'In the role of a language engine, move a machine representation for the provided sentence, changing it from {} to {}: ' -Original acc: 0.27%, attacked acc: 0.25%, dropped acc: 0.02% - -## checklist - -Original prompt: Convert the following sentence from its original {} language to the target language {}: -Attacked prompt: b'Convert the following sentence from its original {} language to the target language hXLpw3bbiw {}:' -Original acc: 0.27%, attacked acc: 0.23%, dropped acc: 0.04% - -Original prompt: Accurately translate the sentence from {} to {}, ensuring the meaning remains intact: -Attacked prompt: b'Accurately translate the sentence from {} to {}, ensuring the meaning remains intact EAB4KP2NVY:' -Original acc: 0.27%, attacked acc: 0.21%, dropped acc: 0.06% - -Original prompt: Given the sentence below, perform a machine translation from {} to {}: -Attacked prompt: b'Given the sentence below, perform a machine translation from {} to vTAjHynoIG {}:' -Original acc: 0.27%, attacked acc: 0.23%, dropped acc: 0.04% - -Original prompt: Assuming the role of an expert language translator, translate the given text from {} to {}: -Attacked prompt: b'Assuming the role of an expert language translator, translate the given text from {} to vTAjHynoIG {}: ' -Original acc: 0.27%, attacked acc: 0.19%, dropped acc: 0.08% - -Original prompt: As a qualified translator, convert the given sentence from its original {} language to the target language {}: -Attacked prompt: b'As a qualified translator, convert the given sentence from its original {} language to the target language xikCjkMydH {}: ' -Original acc: 0.27%, attacked acc: 0.24%, dropped acc: 0.03% - -Original prompt: In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}: -Attacked prompt: b'In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to vTAjHynoIG {}: ' -Original acc: 0.27%, attacked acc: 0.22%, dropped acc: 0.05% - -## deepwordbug - -Original prompt: Convert the following sentence from its original {} language to the target language {}: -Attacked prompt: b'ConvHert the folloiwng setnence from its origianl {} langudage to the trget languaoe {}:' -Original acc: 0.27%, attacked acc: 0.12%, dropped acc: 0.15% - -Original prompt: Accurately translate the sentence from {} to {}, ensuring the meaning remains intact: -Attacked prompt: b'Accurately translate the sentUence from {} to {}, ensurign the meaninz rembins intct:' -Original acc: 0.27%, attacked acc: 0.19%, dropped acc: 0.08% - -Original prompt: Given the sentence below, perform a machine translation from {} to {}: -Attacked prompt: b'iGven the sentqnce below, perform a machine translatGion from {} to {}:' -Original acc: 0.27%, attacked acc: 0.24%, dropped acc: 0.03% - -Original prompt: Assuming the role of an expert language translator, translate the given text from {} to {}: -Attacked prompt: b'sAsuming the orle of an expert language translatojr, translate the goiven teNt from {} to {}: ' -Original acc: 0.27%, attacked acc: 0.23%, dropped acc: 0.04% - -Original prompt: As a qualified translator, convert the given sentence from its original {} language to the target language {}: -Attacked prompt: b'ks a qualifieA translator, covnert the given sentence from its original {} language to the target lanugage {}: ' -Original acc: 0.27%, attacked acc: 0.16%, dropped acc: 0.11% - -Original prompt: In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}: -Attacked prompt: b'In the role of a laguage exhpert, perform a macdhine translation for the provided sentence, changing it from {} to {}: ' -Original acc: 0.27%, attacked acc: 0.26%, dropped acc: 0.01% - -## stresstest - -Original prompt: Convert the following sentence from its original {} language to the target language {}: -Attacked prompt: b'Convert the following sentence from its original {} language to the target language and true is true and true is true and true is true and true is true and true is true {}:' -Original acc: 0.27%, attacked acc: 0.18%, dropped acc: 0.10% - -Original prompt: Accurately translate the sentence from {} to {}, ensuring the meaning remains intact: -Attacked prompt: b'Accurately translate the sentence from {} to {}, ensuring the meaning remains intact and true is true and true is true and true is true and true is true and true is true :' -Original acc: 0.27%, attacked acc: 0.18%, dropped acc: 0.09% - -Original prompt: Given the sentence below, perform a machine translation from {} to {}: -Attacked prompt: b'Given the sentence below, perform a machine translation from {} to and false is not true {}:' -Original acc: 0.27%, attacked acc: 0.23%, dropped acc: 0.04% - -Original prompt: Assuming the role of an expert language translator, translate the given text from {} to {}: -Attacked prompt: b'Assuming the role of an expert language translator, translate the given text from {} to and false is not true {}: ' -Original acc: 0.27%, attacked acc: 0.16%, dropped acc: 0.11% - -Original prompt: As a qualified translator, convert the given sentence from its original {} language to the target language {}: -Attacked prompt: b'As a qualified translator, convert the given sentence from its original {} language to the target language and false is not true {}: ' -Original acc: 0.27%, attacked acc: 0.19%, dropped acc: 0.08% - -Original prompt: In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}: -Attacked prompt: b'In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to and false is not true {}: ' -Original acc: 0.27%, attacked acc: 0.21%, dropped acc: 0.06% - -## textbugger - -Original prompt: Convert the following sentence from its original {} language to the target language {}: -Attacked prompt: b'Convret the following sentence from its upfront {} language to the target language {}:' -Original acc: 0.27%, attacked acc: 0.24%, dropped acc: 0.03% - -Original prompt: Accurately translate the sentence from {} to {}, ensuring the meaning remains intact: -Attacked prompt: b'Scrupulously translate the sentence from {} to {}, ensuring the meani ng remains intact:' -Original acc: 0.27%, attacked acc: 0.17%, dropped acc: 0.10% - -Original prompt: Given the sentence below, perform a machine translation from {} to {}: -Attacked prompt: b'Given the sentence below, perf\xd0\xberm a machine transl\xc9\x91tion from {} to {}:' -Original acc: 0.27%, attacked acc: 0.26%, dropped acc: 0.01% - -Original prompt: Assuming the role of an expert language translator, translate the given text from {} to {}: -Attacked prompt: b'As\xd1\x95uming the role of an experts la nguage translator, translate the \xc9\xa1iven text from {} to {}: ' -Original acc: 0.27%, attacked acc: 0.19%, dropped acc: 0.08% - -Original prompt: As a qualified translator, convert the given sentence from its original {} language to the target language {}: -Attacked prompt: b'Since a qualified translator, convert the given punishments from its orig\xd1\x96nal {} language to the target languag\xd0\xb5 {}: ' -Original acc: 0.27%, attacked acc: 0.22%, dropped acc: 0.05% - -Original prompt: In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}: -Attacked prompt: b'In the role of a l anguage exp ert, perform a machine translation for the provided sentence, changing it from {} to {}: ' -Original acc: 0.27%, attacked acc: 0.25%, dropped acc: 0.02% - -## textfooler - -Original prompt: Convert the following sentence from its original {} language to the target language {}: -Attacked prompt: b'Transformations the later sanctions from its preliminary {} linguistics to the fated parlance {}:' -Original acc: 0.27%, attacked acc: 0.11%, dropped acc: 0.16% - -Original prompt: Accurately translate the sentence from {} to {}, ensuring the meaning remains intact: -Attacked prompt: b'Attentively translate the rulings from {} to {}, sure the sinn remains unhurt:' -Original acc: 0.27%, attacked acc: 0.19%, dropped acc: 0.08% - -Original prompt: Given the sentence below, perform a machine translation from {} to {}: -Attacked prompt: b'Conferred the sorrows below, fulfilled a typewriter rereading from {} to {}:' -Original acc: 0.27%, attacked acc: 0.16%, dropped acc: 0.11% - -Original prompt: Assuming the role of an expert language translator, translate the given text from {} to {}: -Attacked prompt: b'Presume the rolle of an professional dialect interpretation, translate the bestowed text from {} to {}: ' -Original acc: 0.27%, attacked acc: 0.24%, dropped acc: 0.03% - -Original prompt: As a qualified translator, convert the given sentence from its original {} language to the target language {}: -Attacked prompt: b'As a qualified translation, translate the made sentenced from its primitive {} lexicon to the blanco dialect {}: ' -Original acc: 0.27%, attacked acc: 0.16%, dropped acc: 0.11% - -Original prompt: In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}: -Attacked prompt: b'Under the role of a language skilled, performed a machine interpreters for the furnished condemned, transform it from {} to {}: ' -Original acc: 0.27%, attacked acc: 0.15%, dropped acc: 0.12% - -## translation - -Language: chinese, acc: 0.27%, prompt: Please translate the given sentence into {} to {}. -Language: chinese, acc: 0.27%, prompt: Please translate the following sentences from {} to {}. -Language: chinese, acc: 0.27%, prompt: Please convert the following sentences to {} and translate to {}. -Language: chinese, acc: 0.27%, prompt: Please convert the given sentence from {} to {}. -Language: chinese, acc: 0.26%, prompt: Please translate the next sentence from {} to {}. -Language: chinese, acc: 0.27%, prompt: Please translate the following sentence from {} to {}. -Language: chinese, acc: 0.27%, prompt: Please translate the sentences given into {} and convert them into {}. -Language: chinese, acc: 0.27%, prompt: Please convert the sentences given to {} to {}. -Language: chinese, acc: 0.27%, prompt: Please translate the following sentences into {} and convert them into {}. -Language: chinese, acc: 0.27%, prompt: Please change the given sentence from {} to {}. -Language: french, acc: 0.27%, prompt: Please translate the given sentence, converting it from {} to {}. -Language: french, acc: 0.27%, prompt: Please translate the following sentence from {} to {}. -Language: french, acc: 0.27%, prompt: Please turn the sentence below into {}, then translate it into {}. -Language: french, acc: 0.27%, prompt: Please convert the given phrase from {} to {}. -Language: french, acc: 0.27%, prompt: Please translate the following sentence from {} to {}. -Language: french, acc: 0.27%, prompt: Please translate the sentence below from {} to {}. -Language: french, acc: 0.27%, prompt: Please translate the given sentence to {}, then convert it to {}. -Language: french, acc: 0.27%, prompt: Please make a translation of the supplied sentence, transforming it from {} to {}. -Language: french, acc: 0.27%, prompt: Please translate the following sentence to {}, then convert it to {}. -Language: french, acc: 0.27%, prompt: Please transform the given sentence from {} to {}. -Language: arabic, acc: 0.27%, prompt: Please translate the given sentence, and convert it from {} to {}, -Language: arabic, acc: 0.27%, prompt: Please translate the following sentence from {} to {}, -Language: arabic, acc: 0.27%, prompt: Please convert the sentence below to {}, and then translate it to {}, -Language: arabic, acc: 0.27%, prompt: Please convert the given sentence from {} to {}, -Language: arabic, acc: 0.27%, prompt: Please translate the following sentence from {} to {}, -Language: arabic, acc: 0.27%, prompt: Please convert the sentence below from {} to {}, -Language: arabic, acc: 0.26%, prompt: Please translate the given sentence to {}, then convert it to {}, -Language: arabic, acc: 0.27%, prompt: Please translate the given sentence, and convert it from {} to {}, -Language: arabic, acc: 0.27%, prompt: Please translate to {}, then convert to {}, -Language: arabic, acc: 0.27%, prompt: Please convert the given sentence from {} to {}. -Language: spanish, acc: 0.27%, prompt: Please make a translation of the provided phrase, converting it from {} to {}. -Language: spanish, acc: 0.27%, prompt: Please translate the following sentence from {} to {}. -Language: spanish, acc: 0.26%, prompt: Please convert the next sentence to {}, and then translate it to {}. -Language: spanish, acc: 0.27%, prompt: Please make a translation of the given phrase, converting it from {} to {}. -Language: spanish, acc: 0.27%, prompt: Please translate the following sentence from {} to {}. -Language: spanish, acc: 0.27%, prompt: Please convert the following sentence from {} to {}. -Language: spanish, acc: 0.27%, prompt: Please translate the sentence provided to {}, and then turn it to {}. -Language: spanish, acc: 0.27%, prompt: Please make a translation of the following sentence, converting it from {} to {}. -Language: spanish, acc: 0.27%, prompt: Please translate the next sentence to {}, and then turn it to {}. -Language: spanish, acc: 0.27%, prompt: Please convert the given sentence from {} to {}. -Language: japanese, acc: 0.27%, prompt: Please translate the given sentence from {} to {}. -Language: japanese, acc: 0.27%, prompt: Please translate the following sentence from {} to {}. -Language: japanese, acc: 0.27%, prompt: Please convert the following sentences into {} and translate them into {}. -Language: japanese, acc: 0.27%, prompt: Please translate the given sentence by converting {} to {}. -Language: japanese, acc: 0.27%, prompt: Please translate the following sentence from {} to {}. -Language: japanese, acc: 0.27%, prompt: Please convert the following sentences from {} to {}. -Language: japanese, acc: 0.27%, prompt: Translate the given sentence into {} and convert it to {}. -Language: japanese, acc: 0.27%, prompt: Please translate the given sentence from {} to {}. -Language: japanese, acc: 0.27%, prompt: Translate the following sentence into {} and convert it to {}. -Language: japanese, acc: 0.26%, prompt: Convert the given statement from {} to {}. -Language: korean, acc: 0.27%, prompt: Please translate the given sentence from {} to {}. -Language: korean, acc: 0.27%, prompt: Please translate the following sentence from {} to {}. -Language: korean, acc: 0.26%, prompt: Please translate the sentences below into {}, then {}. -Language: korean, acc: 0.27%, prompt: Please translate the given sentences from {} to {}. -Language: korean, acc: 0.27%, prompt: Please translate the following sentence from {} to {}. -Language: korean, acc: 0.27%, prompt: Please convert the sentences below from {} to {}. -Language: korean, acc: 0.27%, prompt: Please translate the given sentence into {}, then {}. -Language: korean, acc: 0.27%, prompt: Please translate the given sentence from {} to {}. -Language: korean, acc: 0.26%, prompt: Please translate the following sentences into {}, then {}. -Language: korean, acc: 0.27%, prompt: Please convert the given sentence from {} to {}. - -# math - -## 10 prompts - -Acc: 16.25%, prompt: Evaluate the following mathematical problem that includes {}: -Acc: 14.37%, prompt: Obtain the result for this math question regarding {}: -Acc: 13.75%, prompt: Work out the answer to this math problem featuring {}: -Acc: 13.12%, prompt: Calculate the answer to the following math query about {}: -Acc: 12.50%, prompt: Solve the following math question about {}: -Acc: 11.88%, prompt: Compute the result of this math task concerning {}: -Acc: 11.25%, prompt: Find the solution for this mathematical challenge with {}: -Acc: 10.62%, prompt: Determine the solution to this mathematical problem related to {}: -Acc: 10.00%, prompt: Figure out the solution for the following mathematical task with {}: -Acc: 9.38%, prompt: Resolve the following mathematical question associated with {}: - -Acc: 14.37%, prompt: In the position of a mathematics coach, figure out the answer to this math challenge involving {}: -Acc: 14.37%, prompt: As a math guru, evaluate the following mathematical task regarding {}: -Acc: 13.12%, prompt: As a mathematics instructor, calculate the answer to the following problem related to {}: -Acc: 13.12%, prompt: Imagine being a math mentor and determine the solution to the following problem with {}: -Acc: 11.88%, prompt: Pretend you are a math teacher and solve this mathematical question concerning {}: -Acc: 11.88%, prompt: Embodying a mathematics advisor, find the answer to the following math problem with {}: -Acc: 10.00%, prompt: As a mathematics consultant, resolve the following problem associated with {}: -Acc: 10.00%, prompt: In the shoes of a mathematics master, obtain the result for this math question that includes {}: -Acc: 7.50%, prompt: Taking on the role of a math specialist, work out the solution for this mathematical question featuring {}: -Acc: 3.75%, prompt: Act as a math expert and compute the result of the following mathematical task about {}: - -## bertattack - -Original prompt: Evaluate the following mathematical problem that includes {}: -Attacked prompt: b'Evaluate the following mathematical dilemma that generates {}:' -Original acc: 16.25%, attacked acc: 6.88%, dropped acc: 9.38% - -Original prompt: Obtain the result for this math question regarding {}: -Attacked prompt: b'Obtain the procedure for this math question regarding {}:' -Original acc: 14.37%, attacked acc: 2.50%, dropped acc: 11.88% - -Original prompt: Work out the answer to this math problem featuring {}: -Attacked prompt: b'Work out the approach to this math problem featuring {}:' -Original acc: 13.75%, attacked acc: 7.50%, dropped acc: 6.25% - -Original prompt: In the position of a mathematics coach, figure out the answer to this math challenge involving {}: -Attacked prompt: b'i the position of a mathematician program, show out the answer to this math challenge involving {}: ' -Original acc: 14.37%, attacked acc: 10.62%, dropped acc: 3.75% - -Original prompt: As a math guru, evaluate the following mathematical task regarding {}: -Attacked prompt: b'As a math [UNK], read the following mathematical task into {}: ' -Original acc: 14.37%, attacked acc: 8.75%, dropped acc: 5.62% - -Original prompt: As a mathematics instructor, calculate the answer to the following problem related to {}: -Attacked prompt: b'As a mathematics instructor, calculate the sum to the following problem related to {}: ' -Original acc: 13.12%, attacked acc: 1.88%, dropped acc: 11.25% - -## checklist - -Original prompt: Evaluate the following mathematical problem that includes {}: -Attacked prompt: b'Evaluate the following mathematical problem that includes is77sOXAu8 {}:' -Original acc: 16.25%, attacked acc: 10.62%, dropped acc: 5.63% - -Original prompt: Obtain the result for this math question regarding {}: -Attacked prompt: b'Obtain the result for this math question regarding ItGDrrA1Sc {}:' -Original acc: 14.37%, attacked acc: 11.25%, dropped acc: 3.12% - -Original prompt: Work out the answer to this math problem featuring {}: -Attacked prompt: b'Work out the answer to this math problem featuring EAB4KP2NVY {}:' -Original acc: 13.75%, attacked acc: 11.88%, dropped acc: 1.88% - -Original prompt: In the position of a mathematics coach, figure out the answer to this math challenge involving {}: -Attacked prompt: b'In the position of a mathematics coach, figure out the answer to this math challenge involving is77sOXAu8 {}: ' -Original acc: 14.37%, attacked acc: 11.25%, dropped acc: 3.12% - -Original prompt: As a math guru, evaluate the following mathematical task regarding {}: -Attacked prompt: b'As a math guru, evaluate the following mathematical task regarding KjPJJ2a7RB {}: ' -Original acc: 14.37%, attacked acc: 10.00%, dropped acc: 4.37% - -Original prompt: As a mathematics instructor, calculate the answer to the following problem related to {}: -Attacked prompt: b'As a mathematics instructor, calculate the answer to the following problem related to KjPJJ2a7RB {}: ' -Original acc: 13.12%, attacked acc: 10.00%, dropped acc: 3.12% - -## deepwordbug - -Original prompt: Evaluate the following mathematical problem that includes {}: -Attacked prompt: b'vEaluate the followinLg mtahematical problem that incldes {}:' -Original acc: 16.25%, attacked acc: 8.12%, dropped acc: 8.12% - -Original prompt: Obtain the result for this math question regarding {}: -Attacked prompt: b'Obtain the rseult for this math question vegarding {}:' -Original acc: 14.37%, attacked acc: 9.38%, dropped acc: 5.00% - -Original prompt: Work out the answer to this math problem featuring {}: -Attacked prompt: b'QWork out the anLswer to this math porblem featuying {}:' -Original acc: 13.75%, attacked acc: 8.12%, dropped acc: 5.63% - -Original prompt: In the position of a mathematics coach, figure out the answer to this math challenge involving {}: -Attacked prompt: b'nI the pdosition of a mathematics coach, figure out the answer to this math challenge involving {}: ' -Original acc: 14.37%, attacked acc: 11.25%, dropped acc: 3.12% - -Original prompt: As a math guru, evaluate the following mathematical task regarding {}: -Attacked prompt: b'As a math gurC, evaluate the following mathematical task regaridng {}: ' -Original acc: 14.37%, attacked acc: 11.25%, dropped acc: 3.12% - -Original prompt: As a mathematics instructor, calculate the answer to the following problem related to {}: -Attacked prompt: b'As a mathematics iestructor, calculate the answex to the following problem related to {}: ' -Original acc: 13.12%, attacked acc: 8.12%, dropped acc: 5.00% - -## stresstest - -Original prompt: Evaluate the following mathematical problem that includes {}: -Attacked prompt: b'Evaluate the following mathematical problem that includes and true is true {}:' -Original acc: 16.25%, attacked acc: 13.12%, dropped acc: 3.12% - -Original prompt: Obtain the result for this math question regarding {}: -Attacked prompt: b'Obtain the result for this math question regarding and false is not true {}:' -Original acc: 14.37%, attacked acc: 12.50%, dropped acc: 1.87% - -Original prompt: Work out the answer to this math problem featuring {}: -Attacked prompt: b'Work out the answer to this math problem featuring and false is not true {}:' -Original acc: 13.75%, attacked acc: 11.88%, dropped acc: 1.88% - -Original prompt: In the position of a mathematics coach, figure out the answer to this math challenge involving {}: -Attacked prompt: b'In the position of a mathematics coach, figure out the answer to this math challenge involving and false is not true {}: ' -Original acc: 14.37%, attacked acc: 12.50%, dropped acc: 1.87% - -Original prompt: As a math guru, evaluate the following mathematical task regarding {}: -Attacked prompt: b'As a math guru, evaluate the following mathematical task regarding and false is not true {}: ' -Original acc: 14.37%, attacked acc: 13.12%, dropped acc: 1.25% - -Original prompt: As a mathematics instructor, calculate the answer to the following problem related to {}: -Attacked prompt: b'As a mathematics instructor, calculate the answer to the following problem related to and false is not true {}: ' -Original acc: 13.12%, attacked acc: 11.25%, dropped acc: 1.88% - -## textbugger - -Original prompt: Evaluate the following mathematical problem that includes {}: -Attacked prompt: b'Evaluate the following mathematical problem that in\xcf\xb2ludes {}:' -Original acc: 16.25%, attacked acc: 10.00%, dropped acc: 6.25% - -Original prompt: Obtain the result for this math question regarding {}: -Attacked prompt: b'Obta in the result for this math question relative {}:' -Original acc: 14.37%, attacked acc: 11.25%, dropped acc: 3.12% - -Original prompt: Work out the answer to this math problem featuring {}: -Attacked prompt: b'Work out the answe\xe2\xb2\x85 to this math problem featuring {}:' -Original acc: 13.75%, attacked acc: 13.12%, dropped acc: 0.63% - -Original prompt: In the position of a mathematics coach, figure out the answer to this math challenge involving {}: -Attacked prompt: b'In the position of a arithmetic coach, fi gure out the replying to this math challenge involving {}: ' -Original acc: 14.37%, attacked acc: 10.62%, dropped acc: 3.75% - -Original prompt: As a math guru, evaluate the following mathematical task regarding {}: -Attacked prompt: b'As a math guru, evalaute the f\xd0\xbellowing mathematical task regarding {}: ' -Original acc: 14.37%, attacked acc: 10.62%, dropped acc: 3.75% - -Original prompt: As a mathematics instructor, calculate the answer to the following problem related to {}: -Attacked prompt: b'As a mathematics instructo\xe2\xb2\x85, calculate the answers to the following problem related to {}: ' -Original acc: 13.12%, attacked acc: 9.38%, dropped acc: 3.75% - -## textfooler - -Original prompt: Evaluate the following mathematical problem that includes {}: -Attacked prompt: b'Determine the following mathematical subjects that spans {}:' -Original acc: 16.25%, attacked acc: 4.38%, dropped acc: 11.88% - -Original prompt: Obtain the result for this math question regarding {}: -Attacked prompt: b'Obtain the raison for this math problema relative {}:' -Original acc: 14.37%, attacked acc: 4.38%, dropped acc: 10.00% - -Original prompt: Work out the answer to this math problem featuring {}: -Attacked prompt: b'Work out the address to this math problem idiosyncratic {}:' -Original acc: 13.75%, attacked acc: 1.88%, dropped acc: 11.88% - -Original prompt: In the position of a mathematics coach, figure out the answer to this math challenge involving {}: -Attacked prompt: b'About the position of a computations coach, figure out the address to this math challenge implying {}: ' -Original acc: 14.37%, attacked acc: 1.88%, dropped acc: 12.50% - -Original prompt: As a math guru, evaluate the following mathematical task regarding {}: -Attacked prompt: b'As a math guru, evaluate the suite mathematical task regarding {}: ' -Original acc: 14.37%, attacked acc: 11.88%, dropped acc: 2.50% - -Original prompt: As a mathematics instructor, calculate the answer to the following problem related to {}: -Attacked prompt: b'As a mathematics prof, calculate the address to the following problem related to {}: ' -Original acc: 13.12%, attacked acc: 1.25%, dropped acc: 11.88% - -## translation - -Language: chinese, acc: 13.75%, prompt: Calculate the answer to a mathematical question about {}. -Language: chinese, acc: 14.37%, prompt: Calculate the value of {}. -Language: chinese, acc: 11.25%, prompt: Please calculate the result of {}. -Language: chinese, acc: 16.25%, prompt: According to {}, solve the following mathematical problem. -Language: chinese, acc: 14.37%, prompt: Do the math with {}. -Language: chinese, acc: 11.88%, prompt: Solve mathematical problems involving {}. -Language: chinese, acc: 13.75%, prompt: Please calculate the answer involving {}. -Language: chinese, acc: 15.00%, prompt: Solve for the following values based on {}. -Language: chinese, acc: 14.37%, prompt: Calculate the following mathematical tasks using {}. -Language: chinese, acc: 13.75%, prompt: Calculate the answer to the {} related question. -Language: french, acc: 14.37%, prompt: Calculate the answer to the following mathematical question concerning {}. -Language: french, acc: 11.88%, prompt: Calculate the result of {}. -Language: french, acc: 13.12%, prompt: Please calculate the value of {}. -Language: french, acc: 16.25%, prompt: According to {}, solve the following mathematical problem. -Language: french, acc: 13.12%, prompt: Perform mathematical calculations with {}. -Language: french, acc: 11.25%, prompt: Solve the mathematical problem involving {}. -Language: french, acc: 13.12%, prompt: Please calculate the answer related to {}. -Language: french, acc: 15.00%, prompt: According to {}, set the following value. -Language: french, acc: 13.12%, prompt: Perform the following mathematical task using {}. -Language: french, acc: 13.75%, prompt: Calculate the answer to the questions related to {}. -Language: arabic, acc: 14.37%, prompt: Compute the answer to the next mathematical question about {}. -Language: arabic, acc: 13.12%, prompt: Calculate {}. -Language: arabic, acc: 12.50%, prompt: Please calculate {}. -Language: arabic, acc: 16.25%, prompt: According to {}, solve the following mathematical problem. -Language: arabic, acc: 15.00%, prompt: Do mathematical calculations using {}. -Language: arabic, acc: 14.37%, prompt: A solution to the mathematical problem involving {}. -Language: arabic, acc: 13.12%, prompt: Please calculate the answer regarding {}. -Language: arabic, acc: 14.37%, prompt: According to {}, determine the next value. -Language: arabic, acc: 13.12%, prompt: DO THE NEXT MATHEMATICAL JOB USING {}. -Language: arabic, acc: 13.75%, prompt: Calculate the answer to questions related to {}. -Language: spanish, acc: 13.12%, prompt: Compute the answer to the following mathematical question on {}. -Language: spanish, acc: 10.62%, prompt: Compute the result of {}. -Language: spanish, acc: 13.12%, prompt: Please calculate the value of {}. -Language: spanish, acc: 14.37%, prompt: As {}, it solves the following mathematical problem. -Language: spanish, acc: 15.00%, prompt: Performs mathematical calculations using {}. -Language: spanish, acc: 11.25%, prompt: Solve the mathematical problem involving {}. -Language: spanish, acc: 13.12%, prompt: Please calculate the answer related to {}. -Language: spanish, acc: 12.50%, prompt: As {}, determine the next value. -Language: spanish, acc: 13.12%, prompt: Perform the following mathematical task using {}. -Language: spanish, acc: 12.50%, prompt: Compute the answer to questions related to {}. -Language: japanese, acc: 14.37%, prompt: Calculate the answers to the math questions about {}. -Language: japanese, acc: 14.37%, prompt: Calculate the value of {}. -Language: japanese, acc: 14.37%, prompt: Please find the answer to {}. -Language: japanese, acc: 11.88%, prompt: Based on {}, please solve the following mathematical problems. -Language: japanese, acc: 15.62%, prompt: Use {} to perform mathematical calculations. -Language: japanese, acc: 14.37%, prompt: Please solve the math problem that contains {}. -Language: japanese, acc: 12.50%, prompt: Please calculate the answers related to {}. -Language: japanese, acc: 15.00%, prompt: Based on {}, find the following values: -Language: japanese, acc: 15.62%, prompt: Use {} to solve the following mathematical problem. -Language: japanese, acc: 12.50%, prompt: Please calculate the answers to the questions related to {}. -Language: korean, acc: 12.50%, prompt: Calculate the answer of the following math problem to {}. -Language: korean, acc: 11.88%, prompt: Calculate the result of {}. -Language: korean, acc: 13.12%, prompt: Please calculate the value of {}. -Language: korean, acc: 14.37%, prompt: Work out the following math problems according to {}. -Language: korean, acc: 15.62%, prompt: Use {} to proceed with mathematical calculations. -Language: korean, acc: 11.25%, prompt: Work out a math problem involving {}. -Language: korean, acc: 13.75%, prompt: Please calculate the answer to {}. -Language: korean, acc: 11.88%, prompt: Try to get the following values according to {}. -Language: korean, acc: 15.00%, prompt: Work out the next math task using {}. -Language: korean, acc: 13.12%, prompt: Calculate the answer of the problem involving {}. \ No newline at end of file diff --git a/spaces/Marshalls/testmtd/analysis/visualization/generate_video_from_features.py b/spaces/Marshalls/testmtd/analysis/visualization/generate_video_from_features.py deleted file mode 100644 index 681f6e7da960cfbd1d80fac57796ff79187e48ee..0000000000000000000000000000000000000000 --- a/spaces/Marshalls/testmtd/analysis/visualization/generate_video_from_features.py +++ /dev/null @@ -1,42 +0,0 @@ -import argparse - -import os, sys - -THIS_DIR = os.path.dirname(os.path.abspath(__file__)) -ROOT_DIR = os.path.abspath(os.path.join(THIS_DIR, os.pardir)) -DATA_DIR = os.path.join(ROOT_DIR, 'data') -sys.path.append(ROOT_DIR) - -from analysis.visualization.generate_video_from_mats import generate_video_from_mats -from analysis.visualization.generate_video_from_expmaps import generate_video_from_expmaps -from analysis.visualization.generate_video_from_moglow_pos import generate_video_from_moglow_loc - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Generate video from expmaps') - parser.add_argument('--feature_type', type=str, help="rot_mat, expmap, position") - parser.add_argument('--features_file', type=str) - parser.add_argument('--output_folder', type=str, default="generated/videos/") - parser.add_argument('--audio_file', type=str, default=None) - parser.add_argument('--trim_audio', type=float, default=0, help="in frames") - parser.add_argument('--fps', type=float, default=60) - parser.add_argument('--plot_mats', action="store_true") - parser.add_argument('--pipeline_file', type=str) - parser.add_argument('--control_file', type=str) - parser.add_argument('--generate_bvh', action="store_true") - args = parser.parse_args() - globals().update(vars(args)) - - trim_audio /= fps #converting trim_audio from being in frames (which is more convenient as thats how we specify the output_shift in the models), to seconds - - print("trim_audio: ",trim_audio) - - if feature_type == "rot_mat": - generate_video_from_mats(features_file,output_folder,audio_file,trim_audio,fps,plot_mats) - elif feature_type == "expmap_scaled" or feature_type == "expmap_scaled_20": - assert pipeline_file is not None #Need to supply pipeline file to process exmaps - generate_video_from_expmaps(features_file,pipeline_file,output_folder,audio_file,trim_audio,generate_bvh) - elif feature_type == "moglow_loc": - assert control_file is not None - generate_video_from_moglow_loc(features_file,control_file,output_folder,audio_file,fps,trim_audio) - else: - raise NotImplementedError(f'Feature type {feature_type} not implemented') diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/fileio/handlers/json_handler.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/fileio/handlers/json_handler.py deleted file mode 100644 index 18d4f15f74139d20adff18b20be5529c592a66b6..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/fileio/handlers/json_handler.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import json - -import numpy as np - -from .base import BaseFileHandler - - -def set_default(obj): - """Set default json values for non-serializable values. - - It helps convert ``set``, ``range`` and ``np.ndarray`` data types to list. - It also converts ``np.generic`` (including ``np.int32``, ``np.float32``, - etc.) into plain numbers of plain python built-in types. - """ - if isinstance(obj, (set, range)): - return list(obj) - elif isinstance(obj, np.ndarray): - return obj.tolist() - elif isinstance(obj, np.generic): - return obj.item() - raise TypeError(f'{type(obj)} is unsupported for json dump') - - -class JsonHandler(BaseFileHandler): - - def load_from_fileobj(self, file): - return json.load(file) - - def dump_to_fileobj(self, obj, file, **kwargs): - kwargs.setdefault('default', set_default) - json.dump(obj, file, **kwargs) - - def dump_to_str(self, obj, **kwargs): - kwargs.setdefault('default', set_default) - return json.dumps(obj, **kwargs) diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/datasets/ade.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/datasets/ade.py deleted file mode 100644 index 5913e43775ed4920b6934c855eb5a37c54218ebf..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/datasets/ade.py +++ /dev/null @@ -1,84 +0,0 @@ -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class ADE20KDataset(CustomDataset): - """ADE20K dataset. - - In segmentation map annotation for ADE20K, 0 stands for background, which - is not included in 150 categories. ``reduce_zero_label`` is fixed to True. - The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is fixed to - '.png'. - """ - CLASSES = ( - 'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', - 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth', - 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', - 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', - 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe', - 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', - 'signboard', 'chest of drawers', 'counter', 'sand', 'sink', - 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path', - 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', - 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table', - 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove', - 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', - 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower', - 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver', - 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', - 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van', - 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything', - 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', - 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank', - 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', - 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce', - 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen', - 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', - 'clock', 'flag') - - PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], - [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], - [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], - [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], - [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], - [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], - [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], - [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], - [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], - [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], - [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], - [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], - [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], - [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], - [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], - [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], - [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], - [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], - [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], - [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], - [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], - [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], - [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], - [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], - [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], - [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], - [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], - [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], - [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], - [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], - [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], - [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], - [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], - [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], - [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], - [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], - [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], - [102, 255, 0], [92, 0, 255]] - - def __init__(self, **kwargs): - super(ADE20KDataset, self).__init__( - img_suffix='.jpg', - seg_map_suffix='.png', - reduce_zero_label=True, - **kwargs) diff --git a/spaces/Mileena/PIFu-Clothed-Human-Digitization/PIFu/apps/prt_util.py b/spaces/Mileena/PIFu-Clothed-Human-Digitization/PIFu/apps/prt_util.py deleted file mode 100644 index 7eba32fa0b396f420b2e332abbb67135dbc14d6b..0000000000000000000000000000000000000000 --- a/spaces/Mileena/PIFu-Clothed-Human-Digitization/PIFu/apps/prt_util.py +++ /dev/null @@ -1,142 +0,0 @@ -import os -import trimesh -import numpy as np -import math -from scipy.special import sph_harm -import argparse -from tqdm import tqdm - -def factratio(N, D): - if N >= D: - prod = 1.0 - for i in range(D+1, N+1): - prod *= i - return prod - else: - prod = 1.0 - for i in range(N+1, D+1): - prod *= i - return 1.0 / prod - -def KVal(M, L): - return math.sqrt(((2 * L + 1) / (4 * math.pi)) * (factratio(L - M, L + M))) - -def AssociatedLegendre(M, L, x): - if M < 0 or M > L or np.max(np.abs(x)) > 1.0: - return np.zeros_like(x) - - pmm = np.ones_like(x) - if M > 0: - somx2 = np.sqrt((1.0 + x) * (1.0 - x)) - fact = 1.0 - for i in range(1, M+1): - pmm = -pmm * fact * somx2 - fact = fact + 2 - - if L == M: - return pmm - else: - pmmp1 = x * (2 * M + 1) * pmm - if L == M+1: - return pmmp1 - else: - pll = np.zeros_like(x) - for i in range(M+2, L+1): - pll = (x * (2 * i - 1) * pmmp1 - (i + M - 1) * pmm) / (i - M) - pmm = pmmp1 - pmmp1 = pll - return pll - -def SphericalHarmonic(M, L, theta, phi): - if M > 0: - return math.sqrt(2.0) * KVal(M, L) * np.cos(M * phi) * AssociatedLegendre(M, L, np.cos(theta)) - elif M < 0: - return math.sqrt(2.0) * KVal(-M, L) * np.sin(-M * phi) * AssociatedLegendre(-M, L, np.cos(theta)) - else: - return KVal(0, L) * AssociatedLegendre(0, L, np.cos(theta)) - -def save_obj(mesh_path, verts): - file = open(mesh_path, 'w') - for v in verts: - file.write('v %.4f %.4f %.4f\n' % (v[0], v[1], v[2])) - file.close() - -def sampleSphericalDirections(n): - xv = np.random.rand(n,n) - yv = np.random.rand(n,n) - theta = np.arccos(1-2 * xv) - phi = 2.0 * math.pi * yv - - phi = phi.reshape(-1) - theta = theta.reshape(-1) - - vx = -np.sin(theta) * np.cos(phi) - vy = -np.sin(theta) * np.sin(phi) - vz = np.cos(theta) - return np.stack([vx, vy, vz], 1), phi, theta - -def getSHCoeffs(order, phi, theta): - shs = [] - for n in range(0, order+1): - for m in range(-n,n+1): - s = SphericalHarmonic(m, n, theta, phi) - shs.append(s) - - return np.stack(shs, 1) - -def computePRT(mesh_path, n, order): - mesh = trimesh.load(mesh_path, process=False) - vectors_orig, phi, theta = sampleSphericalDirections(n) - SH_orig = getSHCoeffs(order, phi, theta) - - w = 4.0 * math.pi / (n*n) - - origins = mesh.vertices - normals = mesh.vertex_normals - n_v = origins.shape[0] - - origins = np.repeat(origins[:,None], n, axis=1).reshape(-1,3) - normals = np.repeat(normals[:,None], n, axis=1).reshape(-1,3) - PRT_all = None - for i in tqdm(range(n)): - SH = np.repeat(SH_orig[None,(i*n):((i+1)*n)], n_v, axis=0).reshape(-1,SH_orig.shape[1]) - vectors = np.repeat(vectors_orig[None,(i*n):((i+1)*n)], n_v, axis=0).reshape(-1,3) - - dots = (vectors * normals).sum(1) - front = (dots > 0.0) - - delta = 1e-3*min(mesh.bounding_box.extents) - hits = mesh.ray.intersects_any(origins + delta * normals, vectors) - nohits = np.logical_and(front, np.logical_not(hits)) - - PRT = (nohits.astype(np.float) * dots)[:,None] * SH - - if PRT_all is not None: - PRT_all += (PRT.reshape(-1, n, SH.shape[1]).sum(1)) - else: - PRT_all = (PRT.reshape(-1, n, SH.shape[1]).sum(1)) - - PRT = w * PRT_all - - # NOTE: trimesh sometimes break the original vertex order, but topology will not change. - # when loading PRT in other program, use the triangle list from trimesh. - return PRT, mesh.faces - -def testPRT(dir_path, n=40): - if dir_path[-1] == '/': - dir_path = dir_path[:-1] - sub_name = dir_path.split('/')[-1][:-4] - obj_path = os.path.join(dir_path, sub_name + '_100k.obj') - os.makedirs(os.path.join(dir_path, 'bounce'), exist_ok=True) - - PRT, F = computePRT(obj_path, n, 2) - np.savetxt(os.path.join(dir_path, 'bounce', 'bounce0.txt'), PRT, fmt='%.8f') - np.save(os.path.join(dir_path, 'bounce', 'face.npy'), F) - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('-i', '--input', type=str, default='/home/shunsuke/Downloads/rp_dennis_posed_004_OBJ') - parser.add_argument('-n', '--n_sample', type=int, default=40, help='squared root of number of sampling. the higher, the more accurate, but slower') - args = parser.parse_args() - - testPRT(args.input) diff --git a/spaces/MirageML/sjc/sd1/ldm/models/diffusion/__init__.py b/spaces/MirageML/sjc/sd1/ldm/models/diffusion/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/_base_/schedules/schedule_adam_base.py b/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/_base_/schedules/schedule_adam_base.py deleted file mode 100644 index 744f32858e0fdf2722472e3f467444f5ffdd9577..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/_base_/schedules/schedule_adam_base.py +++ /dev/null @@ -1,13 +0,0 @@ -# Note: This schedule config serves as a base config for other schedules. -# Users would have to at least fill in "max_epochs" and "val_interval" -# in order to use this config in their experiments. - -# optimizer -optim_wrapper = dict(type='OptimWrapper', optimizer=dict(type='Adam', lr=3e-4)) -train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=None, val_interval=1) -val_cfg = dict(type='ValLoop') -test_cfg = dict(type='TestLoop') -# learning policy -param_scheduler = [ - dict(type='ConstantLR', factor=1.0), -] diff --git a/spaces/MrBodean/VoiceClone/encoder/data_objects/speaker.py b/spaces/MrBodean/VoiceClone/encoder/data_objects/speaker.py deleted file mode 100644 index 494e882fe34fc38dcc793ab8c74a6cc2376bb7b5..0000000000000000000000000000000000000000 --- a/spaces/MrBodean/VoiceClone/encoder/data_objects/speaker.py +++ /dev/null @@ -1,40 +0,0 @@ -from encoder.data_objects.random_cycler import RandomCycler -from encoder.data_objects.utterance import Utterance -from pathlib import Path - -# Contains the set of utterances of a single speaker -class Speaker: - def __init__(self, root: Path): - self.root = root - self.name = root.name - self.utterances = None - self.utterance_cycler = None - - def _load_utterances(self): - with self.root.joinpath("_sources.txt").open("r") as sources_file: - sources = [l.split(",") for l in sources_file] - sources = {frames_fname: wave_fpath for frames_fname, wave_fpath in sources} - self.utterances = [Utterance(self.root.joinpath(f), w) for f, w in sources.items()] - self.utterance_cycler = RandomCycler(self.utterances) - - def random_partial(self, count, n_frames): - """ - Samples a batch of unique partial utterances from the disk in a way that all - utterances come up at least once every two cycles and in a random order every time. - - :param count: The number of partial utterances to sample from the set of utterances from - that speaker. Utterances are guaranteed not to be repeated if is not larger than - the number of utterances available. - :param n_frames: The number of frames in the partial utterance. - :return: A list of tuples (utterance, frames, range) where utterance is an Utterance, - frames are the frames of the partial utterances and range is the range of the partial - utterance with regard to the complete utterance. - """ - if self.utterances is None: - self._load_utterances() - - utterances = self.utterance_cycler.sample(count) - - a = [(u,) + u.random_partial(n_frames) for u in utterances] - - return a diff --git a/spaces/MrBodean/VoiceClone/synthesizer/utils/_cmudict.py b/spaces/MrBodean/VoiceClone/synthesizer/utils/_cmudict.py deleted file mode 100644 index 2cef1f896d4fb78478884fe8e810956998d5e3b3..0000000000000000000000000000000000000000 --- a/spaces/MrBodean/VoiceClone/synthesizer/utils/_cmudict.py +++ /dev/null @@ -1,62 +0,0 @@ -import re - -valid_symbols = [ - "AA", "AA0", "AA1", "AA2", "AE", "AE0", "AE1", "AE2", "AH", "AH0", "AH1", "AH2", - "AO", "AO0", "AO1", "AO2", "AW", "AW0", "AW1", "AW2", "AY", "AY0", "AY1", "AY2", - "B", "CH", "D", "DH", "EH", "EH0", "EH1", "EH2", "ER", "ER0", "ER1", "ER2", "EY", - "EY0", "EY1", "EY2", "F", "G", "HH", "IH", "IH0", "IH1", "IH2", "IY", "IY0", "IY1", - "IY2", "JH", "K", "L", "M", "N", "NG", "OW", "OW0", "OW1", "OW2", "OY", "OY0", - "OY1", "OY2", "P", "R", "S", "SH", "T", "TH", "UH", "UH0", "UH1", "UH2", "UW", - "UW0", "UW1", "UW2", "V", "W", "Y", "Z", "ZH" -] - -_valid_symbol_set = set(valid_symbols) - - -class CMUDict: - """Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict""" - def __init__(self, file_or_path, keep_ambiguous=True): - if isinstance(file_or_path, str): - with open(file_or_path, encoding="latin-1") as f: - entries = _parse_cmudict(f) - else: - entries = _parse_cmudict(file_or_path) - if not keep_ambiguous: - entries = {word: pron for word, pron in entries.items() if len(pron) == 1} - self._entries = entries - - - def __len__(self): - return len(self._entries) - - - def lookup(self, word): - """Returns list of ARPAbet pronunciations of the given word.""" - return self._entries.get(word.upper()) - - - -_alt_re = re.compile(r"\([0-9]+\)") - - -def _parse_cmudict(file): - cmudict = {} - for line in file: - if len(line) and (line[0] >= "A" and line[0] <= "Z" or line[0] == "'"): - parts = line.split(" ") - word = re.sub(_alt_re, "", parts[0]) - pronunciation = _get_pronunciation(parts[1]) - if pronunciation: - if word in cmudict: - cmudict[word].append(pronunciation) - else: - cmudict[word] = [pronunciation] - return cmudict - - -def _get_pronunciation(s): - parts = s.strip().split(" ") - for part in parts: - if part not in _valid_symbol_set: - return None - return " ".join(parts) diff --git a/spaces/NATSpeech/PortaSpeech/modules/commons/wavenet.py b/spaces/NATSpeech/PortaSpeech/modules/commons/wavenet.py deleted file mode 100644 index 7809c9b9d3331ba4fd2ffd4caae14e721e4b0732..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/PortaSpeech/modules/commons/wavenet.py +++ /dev/null @@ -1,97 +0,0 @@ -import torch -from torch import nn - - -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -class WN(torch.nn.Module): - def __init__(self, hidden_size, kernel_size, dilation_rate, n_layers, c_cond=0, - p_dropout=0, share_cond_layers=False, is_BTC=False): - super(WN, self).__init__() - assert (kernel_size % 2 == 1) - assert (hidden_size % 2 == 0) - self.is_BTC = is_BTC - self.hidden_size = hidden_size - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = c_cond - self.p_dropout = p_dropout - self.share_cond_layers = share_cond_layers - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if c_cond != 0 and not share_cond_layers: - cond_layer = torch.nn.Conv1d(c_cond, 2 * hidden_size * n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_size, 2 * hidden_size, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_size - else: - res_skip_channels = hidden_size - - res_skip_layer = torch.nn.Conv1d(hidden_size, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, nonpadding=None, cond=None): - if self.is_BTC: - x = x.transpose(1, 2) - cond = cond.transpose(1, 2) if cond is not None else None - nonpadding = nonpadding.transpose(1, 2) if nonpadding is not None else None - if nonpadding is None: - nonpadding = 1 - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_size]) - - if cond is not None and not self.share_cond_layers: - cond = self.cond_layer(cond) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - x_in = self.drop(x_in) - if cond is not None: - cond_offset = i * 2 * self.hidden_size - cond_l = cond[:, cond_offset:cond_offset + 2 * self.hidden_size, :] - else: - cond_l = torch.zeros_like(x_in) - - acts = fused_add_tanh_sigmoid_multiply(x_in, cond_l, n_channels_tensor) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - x = (x + res_skip_acts[:, :self.hidden_size, :]) * nonpadding - output = output + res_skip_acts[:, self.hidden_size:, :] - else: - output = output + res_skip_acts - output = output * nonpadding - if self.is_BTC: - output = output.transpose(1, 2) - return output - - def remove_weight_norm(self): - def remove_weight_norm(m): - try: - nn.utils.remove_weight_norm(m) - except ValueError: # this module didn't have weight norm - return - - self.apply(remove_weight_norm) diff --git a/spaces/NNDM/img-to-music/app.py b/spaces/NNDM/img-to-music/app.py deleted file mode 100644 index a325b27b8177f9bca294439724ec16c2da2f0169..0000000000000000000000000000000000000000 --- a/spaces/NNDM/img-to-music/app.py +++ /dev/null @@ -1,163 +0,0 @@ -import time -import base64 -import gradio as gr -from sentence_transformers import SentenceTransformer - -import httpx -import json - -import os -import requests -import urllib - -from os import path -from pydub import AudioSegment - -#img_to_text = gr.Blocks.load(name="spaces/pharma/CLIP-Interrogator") -img_to_text = gr.Blocks.load(name="spaces/fffiloni/CLIP-Interrogator-2") - -from share_btn import community_icon_html, loading_icon_html, share_js - -def get_prompts(uploaded_image, track_duration, gen_intensity, gen_mode): - print("calling clip interrogator") - #prompt = img_to_text(uploaded_image, "ViT-L (best for Stable Diffusion 1.*)", "fast", fn_index=1)[0] - prompt = img_to_text(uploaded_image, 'fast', 4, fn_index=1)[0] - print(prompt) - music_result = generate_track_by_prompt(prompt, track_duration, gen_intensity, gen_mode) - print(music_result) - return music_result[0], gr.update(visible=True), gr.update(visible=True), gr.update(visible=True) - -from utils import get_tags_for_prompts, get_mubert_tags_embeddings, get_pat - -minilm = SentenceTransformer('all-MiniLM-L6-v2') -mubert_tags_embeddings = get_mubert_tags_embeddings(minilm) - - -def get_track_by_tags(tags, pat, duration, gen_intensity, gen_mode, maxit=20): - - r = httpx.post('https://api-b2b.mubert.com/v2/RecordTrackTTM', - json={ - "method": "RecordTrackTTM", - "params": { - "pat": pat, - "duration": duration, - "format": "wav", - "intensity":gen_intensity, - "tags": tags, - "mode": gen_mode - } - }) - - rdata = json.loads(r.text) - assert rdata['status'] == 1, rdata['error']['text'] - trackurl = rdata['data']['tasks'][0]['download_link'] - - print('Generating track ', end='') - for i in range(maxit): - r = httpx.get(trackurl) - if r.status_code == 200: - return trackurl - time.sleep(1) - - -def generate_track_by_prompt(prompt, duration, gen_intensity, gen_mode): - try: - pat = get_pat("prodia@prodia.com") - _, tags = get_tags_for_prompts(minilm, mubert_tags_embeddings, [prompt, ])[0] - result = get_track_by_tags(tags, pat, int(duration), gen_intensity, gen_mode) - print(result) - return result, ",".join(tags), "Success" - except Exception as e: - return None, "", str(e) - -def convert_mp3_to_wav(mp3_filepath): - - url = mp3_filepath - save_as = "file.mp3" - - data = urllib.request.urlopen(url) - - f = open(save_as,'wb') - f.write(data.read()) - f.close() - - wave_file="file.wav" - - sound = AudioSegment.from_mp3(save_as) - sound.export(wave_file, format="wav") - - return wave_file - -article = """ - - - -
    -

    You may also like:

    -
    - - - - - - - - - - -
    -
    - - -""" - -with gr.Blocks(css="style.css") as demo: - with gr.Column(elem_id="col-container"): - - gr.HTML("""
    -
    -

    - Image to Music -

    -
    -

    - Sends an image in to CLIP Interrogator - to generate a text prompt which is then run through - Mubert text-to-music to generate music from the input image! -

    -
    """) - - input_img = gr.Image(type="filepath", elem_id="input-img") - music_output = gr.Audio(label="Result", type="filepath", elem_id="music-output").style(height="5rem") - - with gr.Group(elem_id="share-btn-container"): - community_icon = gr.HTML(community_icon_html, visible=False) - loading_icon = gr.HTML(loading_icon_html, visible=False) - share_button = gr.Button("Share to community", elem_id="share-btn", visible=False) - - with gr.Accordion(label="Music Generation Options", open=False): - track_duration = gr.Slider(minimum=20, maximum=120, value=30, step=5, label="Track duration", elem_id="duration-inp") - with gr.Row(): - gen_intensity = gr.Dropdown(choices=["low", "medium", "high"], value="medium", label="Intensity") - gen_mode = gr.Radio(label="mode", choices=["track", "loop"], value="track") - - generate = gr.Button("Generate Music from Image") - - gr.HTML(article) - - generate.click(get_prompts, inputs=[input_img,track_duration,gen_intensity,gen_mode], outputs=[music_output, share_button, community_icon, loading_icon], api_name="i2m") - share_button.click(None, [], [], _js=share_js) - -demo.queue(max_size=32, concurrency_count=20).launch() \ No newline at end of file diff --git a/spaces/Notalib/GPT-Whisper-Wolfram-Google-Test/README.md b/spaces/Notalib/GPT-Whisper-Wolfram-Google-Test/README.md deleted file mode 100644 index 1c6bb9400ebd5017d7a362ac785432b2aff7fa36..0000000000000000000000000000000000000000 --- a/spaces/Notalib/GPT-Whisper-Wolfram-Google-Test/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: GPT Whisper Wolfram Google Test -emoji: 🏢 -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: bsd-3-clause ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/unsupervised/scripts/prepare_timit.sh b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/unsupervised/scripts/prepare_timit.sh deleted file mode 100644 index d8f5d596b4b4ec55f11a82dbbf83bad4a22c0b6c..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/unsupervised/scripts/prepare_timit.sh +++ /dev/null @@ -1,79 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -timit_root=$1 # assume it is the upper-cased version -tgt_dir=$2 -model=$3 - -set -eu - -setups="matched unmatched" -splits="test valid train train_text" - -tgt_dir=$(realpath $tgt_dir) -sph2wav=$KALDI_ROOT/tools/sph2pipe_v2.5/sph2pipe -wav_dir=$tgt_dir/wav - - -mkdir -p $tgt_dir $wav_dir -find $timit_root/{TRAIN,TEST} -iname "*.WAV" > $tgt_dir/all_sph.flist -cat $tgt_dir/all_sph.flist | sed -e 's#//*#/#g' -e 's#.*/\([^/]*\)/\([^/]*\).WAV#\1_\2#g' > $tgt_dir/all.uid -paste -d' ' $tgt_dir/{all_sph.flist,all.uid} | \ - awk -v sph2wav=$sph2wav -v wav_dir=$wav_dir '{print sph2wav " -f wav " $1 " > " wav_dir "/" $2 ".wav"}' \ - > $tgt_dir/sph2wav.sh -bash $tgt_dir/sph2wav.sh -cat $tgt_dir/all.uid | awk -v wav_dir=$(pwd)/$wav_dir '{print $1" "wav_dir"/"$1".wav"}' | sort > $tgt_dir/all_wav.scp -cut -d' ' -f2 $tgt_dir/all_wav.scp | xargs -I{} soxi -s {} > $tgt_dir/all.dur -paste -d' ' $tgt_dir/{all_wav.scp,all.dur} > $tgt_dir/all_wav_dur.scp -rm $tgt_dir/{all.uid,all_sph.flist,sph2wav.sh} - -find $timit_root/{TRAIN,TEST} -iname "*.PHN" > $tgt_dir/all_phn60.flist -while read line; do - if [ ! -f $line ]; then - >&2 echo "Cannot find transcription file '$line'" && exit 1; - fi - cut -f3 -d' ' "$line" | tr '\n' ' ' | perl -ape 's: *$:\n:;' -done < $tgt_dir/all_phn60.flist > $tgt_dir/all.phn60 -cat $tgt_dir/all_phn60.flist | sed -e 's#//*#/#g' -e 's#.*/\([^/]*\)/\([^/]*\).PHN#\1_\2#g' | \ - paste -d' ' - $tgt_dir/all.phn60 | \ - $KALDI_ROOT/egs/timit/s5/local/timit_norm_trans.pl -i - -m $KALDI_ROOT/egs/timit/s5/conf/phones.60-48-39.map -to 39 | \ - sort > $tgt_dir/all.phn -echo "done preparing wav and 39-phone transcripts" - - -for s in $setups; do - mkdir -p $tgt_dir/$s - for x in $splits; do - uid_path=config/timit_${s}/${x}.uid - grep -w -f $uid_path $tgt_dir/all.phn | cut -d' ' -f2- > $tgt_dir/$s/$x.phn - ln -sf $(realpath $tgt_dir/$s/$x.phn) $tgt_dir/$s/$x.wrd - - echo "/" > $tgt_dir/$s/$x.tsv && grep -w -f $uid_path $tgt_dir/all_wav_dur.scp | cut -d' ' -f2- | sed 's# #\t#' >> $tgt_dir/$s/$x.tsv - done - - for x in $splits; do - cat $tgt_dir/$s/$x.phn - done | tr ' ' '\n' | sort -u | awk '{print $1" "1}' > $tgt_dir/$s/dict.phn.txt - ln -sf $(realpath $tgt_dir/$s/dict.phn.txt) $tgt_dir/$s/dict.wrd.txt -done -echo "done preparing unmatched and matched setups for TIMIT" - - -for s in $setups; do - zsh scripts/prepare_audio.sh $tgt_dir/$s $tgt_dir/$s/feat $model - - lm_dir=$tgt_dir/$s/phones - fst_dir=$tgt_dir/$s/fst/phn_to_phn - - python $FAIRSEQ_ROOT/fairseq_cli/preprocess.py --dataset-impl mmap --trainpref $tgt_dir/$s/train_text.phn --workers 10 --only-source --destdir $lm_dir --srcdict $tgt_dir/$s/dict.phn.txt - $KENLM_ROOT/lmplz -o 3 < $tgt_dir/$s/train_text.phn --discount_fallback >$lm_dir/train_text_phn.03.arpa - $KENLM_ROOT/build_binary $lm_dir/train_text_phn.03.arpa $lm_dir/train_text_phn.03.bin - $KENLM_ROOT/lmplz -o 4 < $tgt_dir/$s/train_text.phn --discount_fallback >$lm_dir/train_text_phn.04.arpa - $KENLM_ROOT/build_binary $lm_dir/train_text_phn.04.arpa $lm_dir/train_text_phn.04.bin - - python $FAIRSEQ_ROOT/examples/speech_recognition/kaldi/kaldi_initializer.py kaldi_root=$KALDI_ROOT fst_dir=$fst_dir lm_arpa=$lm_dir/train_text_phn.03.arpa data_dir=$tgt_dir/$s in_labels=phn -done -echo "done preprocessing audio and text for wav2vec-U" diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/scripts/convert_model.lua b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/scripts/convert_model.lua deleted file mode 100644 index 61b92139294fb90a25989ebd2ee52a765fb278a2..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/scripts/convert_model.lua +++ /dev/null @@ -1,108 +0,0 @@ --- Copyright (c) Facebook, Inc. and its affiliates. --- --- This source code is licensed under the MIT license found in the --- LICENSE file in the root directory of this source tree. --- --- Usage: convert_model.lua -require 'torch' -local fairseq = require 'fairseq' - -model = torch.load(arg[1]) - -function find_weight_norm(container, module) - for _, wn in ipairs(container:listModules()) do - if torch.type(wn) == 'nn.WeightNorm' and wn.modules[1] == module then - return wn - end - end -end - -function push_state(dict, key, module) - if torch.type(module) == 'nn.Linear' then - local wn = find_weight_norm(model.module, module) - assert(wn) - dict[key .. '.weight_v'] = wn.v:float() - dict[key .. '.weight_g'] = wn.g:float() - elseif torch.type(module) == 'nn.TemporalConvolutionTBC' then - local wn = find_weight_norm(model.module, module) - assert(wn) - local v = wn.v:float():view(wn.viewOut):transpose(2, 3) - dict[key .. '.weight_v'] = v - dict[key .. '.weight_g'] = wn.g:float():view(module.weight:size(3), 1, 1) - else - dict[key .. '.weight'] = module.weight:float() - end - if module.bias then - dict[key .. '.bias'] = module.bias:float() - end -end - -encoder_dict = {} -decoder_dict = {} -combined_dict = {} - -function encoder_state(encoder) - luts = encoder:findModules('nn.LookupTable') - push_state(encoder_dict, 'embed_tokens', luts[1]) - push_state(encoder_dict, 'embed_positions', luts[2]) - - fcs = encoder:findModules('nn.Linear') - assert(#fcs >= 2) - local nInputPlane = fcs[1].weight:size(1) - push_state(encoder_dict, 'fc1', table.remove(fcs, 1)) - push_state(encoder_dict, 'fc2', table.remove(fcs, #fcs)) - - for i, module in ipairs(encoder:findModules('nn.TemporalConvolutionTBC')) do - push_state(encoder_dict, 'convolutions.' .. tostring(i - 1), module) - if nInputPlane ~= module.weight:size(3) / 2 then - push_state(encoder_dict, 'projections.' .. tostring(i - 1), table.remove(fcs, 1)) - end - nInputPlane = module.weight:size(3) / 2 - end - assert(#fcs == 0) -end - -function decoder_state(decoder) - luts = decoder:findModules('nn.LookupTable') - push_state(decoder_dict, 'embed_tokens', luts[1]) - push_state(decoder_dict, 'embed_positions', luts[2]) - - fcs = decoder:findModules('nn.Linear') - local nInputPlane = fcs[1].weight:size(1) - push_state(decoder_dict, 'fc1', table.remove(fcs, 1)) - push_state(decoder_dict, 'fc2', fcs[#fcs - 1]) - push_state(decoder_dict, 'fc3', fcs[#fcs]) - - table.remove(fcs, #fcs) - table.remove(fcs, #fcs) - - for i, module in ipairs(decoder:findModules('nn.TemporalConvolutionTBC')) do - if nInputPlane ~= module.weight:size(3) / 2 then - push_state(decoder_dict, 'projections.' .. tostring(i - 1), table.remove(fcs, 1)) - end - nInputPlane = module.weight:size(3) / 2 - - local prefix = 'attention.' .. tostring(i - 1) - push_state(decoder_dict, prefix .. '.in_projection', table.remove(fcs, 1)) - push_state(decoder_dict, prefix .. '.out_projection', table.remove(fcs, 1)) - push_state(decoder_dict, 'convolutions.' .. tostring(i - 1), module) - end - assert(#fcs == 0) -end - - -_encoder = model.module.modules[2] -_decoder = model.module.modules[3] - -encoder_state(_encoder) -decoder_state(_decoder) - -for k, v in pairs(encoder_dict) do - combined_dict['encoder.' .. k] = v -end -for k, v in pairs(decoder_dict) do - combined_dict['decoder.' .. k] = v -end - - -torch.save('state_dict.t7', combined_dict) diff --git a/spaces/ORI-Muchim/PowerTTS/utils.py b/spaces/ORI-Muchim/PowerTTS/utils.py deleted file mode 100644 index 4cb5b43d0ca2bae496e7871b2094f2ffb26ab642..0000000000000000000000000000000000000000 --- a/spaces/ORI-Muchim/PowerTTS/utils.py +++ /dev/null @@ -1,226 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.ERROR) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r", encoding="utf-8") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/OkamiFeng/Bark-with-Voice-Cloning/bark/hubert/__init__.py b/spaces/OkamiFeng/Bark-with-Voice-Cloning/bark/hubert/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/saicinpainting/training/modules/ffc.py b/spaces/OpenGVLab/InternGPT/third-party/lama/bin/saicinpainting/training/modules/ffc.py deleted file mode 100644 index 2f8aeb1411fc1537916275fd3243706cc74b8d3c..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/saicinpainting/training/modules/ffc.py +++ /dev/null @@ -1,433 +0,0 @@ -# Fast Fourier Convolution NeurIPS 2020 -# original implementation https://github.com/pkumivision/FFC/blob/main/model_zoo/ffc.py -# paper https://proceedings.neurips.cc/paper/2020/file/2fd5d41ec6cfab47e32164d5624269b1-Paper.pdf - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F - -from saicinpainting.training.modules.base import get_activation, BaseDiscriminator -from saicinpainting.training.modules.spatial_transform import LearnableSpatialTransformWrapper -from saicinpainting.training.modules.squeeze_excitation import SELayer -from saicinpainting.utils import get_shape - - -class FFCSE_block(nn.Module): - - def __init__(self, channels, ratio_g): - super(FFCSE_block, self).__init__() - in_cg = int(channels * ratio_g) - in_cl = channels - in_cg - r = 16 - - self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) - self.conv1 = nn.Conv2d(channels, channels // r, - kernel_size=1, bias=True) - self.relu1 = nn.ReLU(inplace=True) - self.conv_a2l = None if in_cl == 0 else nn.Conv2d( - channels // r, in_cl, kernel_size=1, bias=True) - self.conv_a2g = None if in_cg == 0 else nn.Conv2d( - channels // r, in_cg, kernel_size=1, bias=True) - self.sigmoid = nn.Sigmoid() - - def forward(self, x): - x = x if type(x) is tuple else (x, 0) - id_l, id_g = x - - x = id_l if type(id_g) is int else torch.cat([id_l, id_g], dim=1) - x = self.avgpool(x) - x = self.relu1(self.conv1(x)) - - x_l = 0 if self.conv_a2l is None else id_l * \ - self.sigmoid(self.conv_a2l(x)) - x_g = 0 if self.conv_a2g is None else id_g * \ - self.sigmoid(self.conv_a2g(x)) - return x_l, x_g - - -class FourierUnit(nn.Module): - - def __init__(self, in_channels, out_channels, groups=1, spatial_scale_factor=None, spatial_scale_mode='bilinear', - spectral_pos_encoding=False, use_se=False, se_kwargs=None, ffc3d=False, fft_norm='ortho'): - # bn_layer not used - super(FourierUnit, self).__init__() - self.groups = groups - - self.conv_layer = torch.nn.Conv2d(in_channels=in_channels * 2 + (2 if spectral_pos_encoding else 0), - out_channels=out_channels * 2, - kernel_size=1, stride=1, padding=0, groups=self.groups, bias=False) - self.bn = torch.nn.BatchNorm2d(out_channels * 2) - self.relu = torch.nn.ReLU(inplace=True) - - # squeeze and excitation block - self.use_se = use_se - if use_se: - if se_kwargs is None: - se_kwargs = {} - self.se = SELayer(self.conv_layer.in_channels, **se_kwargs) - - self.spatial_scale_factor = spatial_scale_factor - self.spatial_scale_mode = spatial_scale_mode - self.spectral_pos_encoding = spectral_pos_encoding - self.ffc3d = ffc3d - self.fft_norm = fft_norm - - def forward(self, x): - batch = x.shape[0] - - if self.spatial_scale_factor is not None: - orig_size = x.shape[-2:] - x = F.interpolate(x, scale_factor=self.spatial_scale_factor, mode=self.spatial_scale_mode, align_corners=False) - - r_size = x.size() - # (batch, c, h, w/2+1, 2) - fft_dim = (-3, -2, -1) if self.ffc3d else (-2, -1) - ffted = torch.fft.rfftn(x, dim=fft_dim, norm=self.fft_norm) - ffted = torch.stack((ffted.real, ffted.imag), dim=-1) - ffted = ffted.permute(0, 1, 4, 2, 3).contiguous() # (batch, c, 2, h, w/2+1) - ffted = ffted.view((batch, -1,) + ffted.size()[3:]) - - if self.spectral_pos_encoding: - height, width = ffted.shape[-2:] - coords_vert = torch.linspace(0, 1, height)[None, None, :, None].expand(batch, 1, height, width).to(ffted) - coords_hor = torch.linspace(0, 1, width)[None, None, None, :].expand(batch, 1, height, width).to(ffted) - ffted = torch.cat((coords_vert, coords_hor, ffted), dim=1) - - if self.use_se: - ffted = self.se(ffted) - - ffted = self.conv_layer(ffted) # (batch, c*2, h, w/2+1) - ffted = self.relu(self.bn(ffted)) - - ffted = ffted.view((batch, -1, 2,) + ffted.size()[2:]).permute( - 0, 1, 3, 4, 2).contiguous() # (batch,c, t, h, w/2+1, 2) - ffted = torch.complex(ffted[..., 0], ffted[..., 1]) - - ifft_shape_slice = x.shape[-3:] if self.ffc3d else x.shape[-2:] - output = torch.fft.irfftn(ffted, s=ifft_shape_slice, dim=fft_dim, norm=self.fft_norm) - - if self.spatial_scale_factor is not None: - output = F.interpolate(output, size=orig_size, mode=self.spatial_scale_mode, align_corners=False) - - return output - - -class SpectralTransform(nn.Module): - - def __init__(self, in_channels, out_channels, stride=1, groups=1, enable_lfu=True, **fu_kwargs): - # bn_layer not used - super(SpectralTransform, self).__init__() - self.enable_lfu = enable_lfu - if stride == 2: - self.downsample = nn.AvgPool2d(kernel_size=(2, 2), stride=2) - else: - self.downsample = nn.Identity() - - self.stride = stride - self.conv1 = nn.Sequential( - nn.Conv2d(in_channels, out_channels // - 2, kernel_size=1, groups=groups, bias=False), - nn.BatchNorm2d(out_channels // 2), - nn.ReLU(inplace=True) - ) - self.fu = FourierUnit( - out_channels // 2, out_channels // 2, groups, **fu_kwargs) - if self.enable_lfu: - self.lfu = FourierUnit( - out_channels // 2, out_channels // 2, groups) - self.conv2 = torch.nn.Conv2d( - out_channels // 2, out_channels, kernel_size=1, groups=groups, bias=False) - - def forward(self, x): - - x = self.downsample(x) - x = self.conv1(x) - output = self.fu(x) - - if self.enable_lfu: - n, c, h, w = x.shape - split_no = 2 - split_s = h // split_no - xs = torch.cat(torch.split( - x[:, :c // 4], split_s, dim=-2), dim=1).contiguous() - xs = torch.cat(torch.split(xs, split_s, dim=-1), - dim=1).contiguous() - xs = self.lfu(xs) - xs = xs.repeat(1, 1, split_no, split_no).contiguous() - else: - xs = 0 - - output = self.conv2(x + output + xs) - - return output - - -class FFC(nn.Module): - - def __init__(self, in_channels, out_channels, kernel_size, - ratio_gin, ratio_gout, stride=1, padding=0, - dilation=1, groups=1, bias=False, enable_lfu=True, - padding_type='reflect', gated=False, **spectral_kwargs): - super(FFC, self).__init__() - - assert stride == 1 or stride == 2, "Stride should be 1 or 2." - self.stride = stride - - in_cg = int(in_channels * ratio_gin) - in_cl = in_channels - in_cg - out_cg = int(out_channels * ratio_gout) - out_cl = out_channels - out_cg - #groups_g = 1 if groups == 1 else int(groups * ratio_gout) - #groups_l = 1 if groups == 1 else groups - groups_g - - self.ratio_gin = ratio_gin - self.ratio_gout = ratio_gout - self.global_in_num = in_cg - - module = nn.Identity if in_cl == 0 or out_cl == 0 else nn.Conv2d - self.convl2l = module(in_cl, out_cl, kernel_size, - stride, padding, dilation, groups, bias, padding_mode=padding_type) - module = nn.Identity if in_cl == 0 or out_cg == 0 else nn.Conv2d - self.convl2g = module(in_cl, out_cg, kernel_size, - stride, padding, dilation, groups, bias, padding_mode=padding_type) - module = nn.Identity if in_cg == 0 or out_cl == 0 else nn.Conv2d - self.convg2l = module(in_cg, out_cl, kernel_size, - stride, padding, dilation, groups, bias, padding_mode=padding_type) - module = nn.Identity if in_cg == 0 or out_cg == 0 else SpectralTransform - self.convg2g = module( - in_cg, out_cg, stride, 1 if groups == 1 else groups // 2, enable_lfu, **spectral_kwargs) - - self.gated = gated - module = nn.Identity if in_cg == 0 or out_cl == 0 or not self.gated else nn.Conv2d - self.gate = module(in_channels, 2, 1) - - def forward(self, x): - x_l, x_g = x if type(x) is tuple else (x, 0) - out_xl, out_xg = 0, 0 - - if self.gated: - total_input_parts = [x_l] - if torch.is_tensor(x_g): - total_input_parts.append(x_g) - total_input = torch.cat(total_input_parts, dim=1) - - gates = torch.sigmoid(self.gate(total_input)) - g2l_gate, l2g_gate = gates.chunk(2, dim=1) - else: - g2l_gate, l2g_gate = 1, 1 - - if self.ratio_gout != 1: - out_xl = self.convl2l(x_l) + self.convg2l(x_g) * g2l_gate - if self.ratio_gout != 0: - out_xg = self.convl2g(x_l) * l2g_gate + self.convg2g(x_g) - - return out_xl, out_xg - - -class FFC_BN_ACT(nn.Module): - - def __init__(self, in_channels, out_channels, - kernel_size, ratio_gin, ratio_gout, - stride=1, padding=0, dilation=1, groups=1, bias=False, - norm_layer=nn.BatchNorm2d, activation_layer=nn.Identity, - padding_type='reflect', - enable_lfu=True, **kwargs): - super(FFC_BN_ACT, self).__init__() - self.ffc = FFC(in_channels, out_channels, kernel_size, - ratio_gin, ratio_gout, stride, padding, dilation, - groups, bias, enable_lfu, padding_type=padding_type, **kwargs) - lnorm = nn.Identity if ratio_gout == 1 else norm_layer - gnorm = nn.Identity if ratio_gout == 0 else norm_layer - global_channels = int(out_channels * ratio_gout) - self.bn_l = lnorm(out_channels - global_channels) - self.bn_g = gnorm(global_channels) - - lact = nn.Identity if ratio_gout == 1 else activation_layer - gact = nn.Identity if ratio_gout == 0 else activation_layer - self.act_l = lact(inplace=True) - self.act_g = gact(inplace=True) - - def forward(self, x): - x_l, x_g = self.ffc(x) - x_l = self.act_l(self.bn_l(x_l)) - x_g = self.act_g(self.bn_g(x_g)) - return x_l, x_g - - -class FFCResnetBlock(nn.Module): - def __init__(self, dim, padding_type, norm_layer, activation_layer=nn.ReLU, dilation=1, - spatial_transform_kwargs=None, inline=False, **conv_kwargs): - super().__init__() - self.conv1 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation, - norm_layer=norm_layer, - activation_layer=activation_layer, - padding_type=padding_type, - **conv_kwargs) - self.conv2 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation, - norm_layer=norm_layer, - activation_layer=activation_layer, - padding_type=padding_type, - **conv_kwargs) - if spatial_transform_kwargs is not None: - self.conv1 = LearnableSpatialTransformWrapper(self.conv1, **spatial_transform_kwargs) - self.conv2 = LearnableSpatialTransformWrapper(self.conv2, **spatial_transform_kwargs) - self.inline = inline - - def forward(self, x): - if self.inline: - x_l, x_g = x[:, :-self.conv1.ffc.global_in_num], x[:, -self.conv1.ffc.global_in_num:] - else: - x_l, x_g = x if type(x) is tuple else (x, 0) - - id_l, id_g = x_l, x_g - - x_l, x_g = self.conv1((x_l, x_g)) - x_l, x_g = self.conv2((x_l, x_g)) - - x_l, x_g = id_l + x_l, id_g + x_g - out = x_l, x_g - if self.inline: - out = torch.cat(out, dim=1) - return out - - -class ConcatTupleLayer(nn.Module): - def forward(self, x): - assert isinstance(x, tuple) - x_l, x_g = x - assert torch.is_tensor(x_l) or torch.is_tensor(x_g) - if not torch.is_tensor(x_g): - return x_l - return torch.cat(x, dim=1) - - -class FFCResNetGenerator(nn.Module): - def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, - padding_type='reflect', activation_layer=nn.ReLU, - up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True), - init_conv_kwargs={}, downsample_conv_kwargs={}, resnet_conv_kwargs={}, - spatial_transform_layers=None, spatial_transform_kwargs={}, - add_out_act=True, max_features=1024, out_ffc=False, out_ffc_kwargs={}): - assert (n_blocks >= 0) - super().__init__() - - model = [nn.ReflectionPad2d(3), - FFC_BN_ACT(input_nc, ngf, kernel_size=7, padding=0, norm_layer=norm_layer, - activation_layer=activation_layer, **init_conv_kwargs)] - - ### downsample - for i in range(n_downsampling): - mult = 2 ** i - if i == n_downsampling - 1: - cur_conv_kwargs = dict(downsample_conv_kwargs) - cur_conv_kwargs['ratio_gout'] = resnet_conv_kwargs.get('ratio_gin', 0) - else: - cur_conv_kwargs = downsample_conv_kwargs - model += [FFC_BN_ACT(min(max_features, ngf * mult), - min(max_features, ngf * mult * 2), - kernel_size=3, stride=2, padding=1, - norm_layer=norm_layer, - activation_layer=activation_layer, - **cur_conv_kwargs)] - - mult = 2 ** n_downsampling - feats_num_bottleneck = min(max_features, ngf * mult) - - ### resnet blocks - for i in range(n_blocks): - cur_resblock = FFCResnetBlock(feats_num_bottleneck, padding_type=padding_type, activation_layer=activation_layer, - norm_layer=norm_layer, **resnet_conv_kwargs) - if spatial_transform_layers is not None and i in spatial_transform_layers: - cur_resblock = LearnableSpatialTransformWrapper(cur_resblock, **spatial_transform_kwargs) - model += [cur_resblock] - - model += [ConcatTupleLayer()] - - ### upsample - for i in range(n_downsampling): - mult = 2 ** (n_downsampling - i) - model += [nn.ConvTranspose2d(min(max_features, ngf * mult), - min(max_features, int(ngf * mult / 2)), - kernel_size=3, stride=2, padding=1, output_padding=1), - up_norm_layer(min(max_features, int(ngf * mult / 2))), - up_activation] - - if out_ffc: - model += [FFCResnetBlock(ngf, padding_type=padding_type, activation_layer=activation_layer, - norm_layer=norm_layer, inline=True, **out_ffc_kwargs)] - - model += [nn.ReflectionPad2d(3), - nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] - if add_out_act: - model.append(get_activation('tanh' if add_out_act is True else add_out_act)) - self.model = nn.Sequential(*model) - - def forward(self, input): - return self.model(input) - - -class FFCNLayerDiscriminator(BaseDiscriminator): - def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, max_features=512, - init_conv_kwargs={}, conv_kwargs={}): - super().__init__() - self.n_layers = n_layers - - def _act_ctor(inplace=True): - return nn.LeakyReLU(negative_slope=0.2, inplace=inplace) - - kw = 3 - padw = int(np.ceil((kw-1.0)/2)) - sequence = [[FFC_BN_ACT(input_nc, ndf, kernel_size=kw, padding=padw, norm_layer=norm_layer, - activation_layer=_act_ctor, **init_conv_kwargs)]] - - nf = ndf - for n in range(1, n_layers): - nf_prev = nf - nf = min(nf * 2, max_features) - - cur_model = [ - FFC_BN_ACT(nf_prev, nf, - kernel_size=kw, stride=2, padding=padw, - norm_layer=norm_layer, - activation_layer=_act_ctor, - **conv_kwargs) - ] - sequence.append(cur_model) - - nf_prev = nf - nf = min(nf * 2, 512) - - cur_model = [ - FFC_BN_ACT(nf_prev, nf, - kernel_size=kw, stride=1, padding=padw, - norm_layer=norm_layer, - activation_layer=lambda *args, **kwargs: nn.LeakyReLU(*args, negative_slope=0.2, **kwargs), - **conv_kwargs), - ConcatTupleLayer() - ] - sequence.append(cur_model) - - sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]] - - for n in range(len(sequence)): - setattr(self, 'model'+str(n), nn.Sequential(*sequence[n])) - - def get_all_activations(self, x): - res = [x] - for n in range(self.n_layers + 2): - model = getattr(self, 'model' + str(n)) - res.append(model(res[-1])) - return res[1:] - - def forward(self, x): - act = self.get_all_activations(x) - feats = [] - for out in act[:-1]: - if isinstance(out, tuple): - if torch.is_tensor(out[1]): - out = torch.cat(out, dim=1) - else: - out = out[0] - feats.append(out) - return act[-1], feats diff --git a/spaces/ParisNeo/Blip_QA/med.py b/spaces/ParisNeo/Blip_QA/med.py deleted file mode 100644 index 5b4cf2552d67a4af007057594a4bb1892fa05234..0000000000000000000000000000000000000000 --- a/spaces/ParisNeo/Blip_QA/med.py +++ /dev/null @@ -1,956 +0,0 @@ - -''' - * Copyright (c) 2022, salesforce.com, inc. - * All rights reserved. - * SPDX-License-Identifier: BSD-3-Clause - * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause - * By Junnan Li - * Based on huggingface code base - * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert -''' - -import math -import os -import warnings -from dataclasses import dataclass -from typing import Optional, Tuple - -import torch -from torch import Tensor, device, dtype, nn -import torch.utils.checkpoint -from torch import nn -from torch.nn import CrossEntropyLoss -import torch.nn.functional as F - -from transformers.activations import ACT2FN -from transformers.file_utils import ( - ModelOutput, -) -from transformers.modeling_outputs import ( - BaseModelOutputWithPastAndCrossAttentions, - BaseModelOutputWithPoolingAndCrossAttentions, - CausalLMOutputWithCrossAttentions, - MaskedLMOutput, - MultipleChoiceModelOutput, - NextSentencePredictorOutput, - QuestionAnsweringModelOutput, - SequenceClassifierOutput, - TokenClassifierOutput, -) -from transformers.modeling_utils import ( - PreTrainedModel, - apply_chunking_to_forward, - find_pruneable_heads_and_indices, - prune_linear_layer, -) -from transformers.utils import logging -from transformers.models.bert.configuration_bert import BertConfig - - -logger = logging.get_logger(__name__) - - -class BertEmbeddings(nn.Module): - """Construct the embeddings from word and position embeddings.""" - - def __init__(self, config): - super().__init__() - self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) - self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) - - # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load - # any TensorFlow checkpoint file - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - # position_ids (1, len position emb) is contiguous in memory and exported when serialized - self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - - self.config = config - - def forward( - self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 - ): - if input_ids is not None: - input_shape = input_ids.size() - else: - input_shape = inputs_embeds.size()[:-1] - - seq_length = input_shape[1] - - if position_ids is None: - position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] - - if inputs_embeds is None: - inputs_embeds = self.word_embeddings(input_ids) - - embeddings = inputs_embeds - - if self.position_embedding_type == "absolute": - position_embeddings = self.position_embeddings(position_ids) - embeddings += position_embeddings - embeddings = self.LayerNorm(embeddings) - embeddings = self.dropout(embeddings) - return embeddings - - -class BertSelfAttention(nn.Module): - def __init__(self, config, is_cross_attention): - super().__init__() - self.config = config - if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): - raise ValueError( - "The hidden size (%d) is not a multiple of the number of attention " - "heads (%d)" % (config.hidden_size, config.num_attention_heads) - ) - - self.num_attention_heads = config.num_attention_heads - self.attention_head_size = int(config.hidden_size / config.num_attention_heads) - self.all_head_size = self.num_attention_heads * self.attention_head_size - - self.query = nn.Linear(config.hidden_size, self.all_head_size) - if is_cross_attention: - self.key = nn.Linear(config.encoder_width, self.all_head_size) - self.value = nn.Linear(config.encoder_width, self.all_head_size) - else: - self.key = nn.Linear(config.hidden_size, self.all_head_size) - self.value = nn.Linear(config.hidden_size, self.all_head_size) - - self.dropout = nn.Dropout(config.attention_probs_dropout_prob) - self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": - self.max_position_embeddings = config.max_position_embeddings - self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) - self.save_attention = False - - def save_attn_gradients(self, attn_gradients): - self.attn_gradients = attn_gradients - - def get_attn_gradients(self): - return self.attn_gradients - - def save_attention_map(self, attention_map): - self.attention_map = attention_map - - def get_attention_map(self): - return self.attention_map - - def transpose_for_scores(self, x): - new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) - x = x.view(*new_x_shape) - return x.permute(0, 2, 1, 3) - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - past_key_value=None, - output_attentions=False, - ): - mixed_query_layer = self.query(hidden_states) - - # If this is instantiated as a cross-attention module, the keys - # and values come from an encoder; the attention mask needs to be - # such that the encoder's padding tokens are not attended to. - is_cross_attention = encoder_hidden_states is not None - - if is_cross_attention: - key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) - value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) - attention_mask = encoder_attention_mask - elif past_key_value is not None: - key_layer = self.transpose_for_scores(self.key(hidden_states)) - value_layer = self.transpose_for_scores(self.value(hidden_states)) - key_layer = torch.cat([past_key_value[0], key_layer], dim=2) - value_layer = torch.cat([past_key_value[1], value_layer], dim=2) - else: - key_layer = self.transpose_for_scores(self.key(hidden_states)) - value_layer = self.transpose_for_scores(self.value(hidden_states)) - - query_layer = self.transpose_for_scores(mixed_query_layer) - - past_key_value = (key_layer, value_layer) - - # Take the dot product between "query" and "key" to get the raw attention scores. - attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) - - if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": - seq_length = hidden_states.size()[1] - position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) - position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) - distance = position_ids_l - position_ids_r - positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) - positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility - - if self.position_embedding_type == "relative_key": - relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) - attention_scores = attention_scores + relative_position_scores - elif self.position_embedding_type == "relative_key_query": - relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) - relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) - attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key - - attention_scores = attention_scores / math.sqrt(self.attention_head_size) - if attention_mask is not None: - # Apply the attention mask is (precomputed for all layers in BertModel forward() function) - attention_scores = attention_scores + attention_mask - - # Normalize the attention scores to probabilities. - attention_probs = nn.Softmax(dim=-1)(attention_scores) - - if is_cross_attention and self.save_attention: - self.save_attention_map(attention_probs) - attention_probs.register_hook(self.save_attn_gradients) - - # This is actually dropping out entire tokens to attend to, which might - # seem a bit unusual, but is taken from the original Transformer paper. - attention_probs_dropped = self.dropout(attention_probs) - - # Mask heads if we want to - if head_mask is not None: - attention_probs_dropped = attention_probs_dropped * head_mask - - context_layer = torch.matmul(attention_probs_dropped, value_layer) - - context_layer = context_layer.permute(0, 2, 1, 3).contiguous() - new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) - context_layer = context_layer.view(*new_context_layer_shape) - - outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) - - outputs = outputs + (past_key_value,) - return outputs - - -class BertSelfOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states, input_tensor): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class BertAttention(nn.Module): - def __init__(self, config, is_cross_attention=False): - super().__init__() - self.self = BertSelfAttention(config, is_cross_attention) - self.output = BertSelfOutput(config) - self.pruned_heads = set() - - def prune_heads(self, heads): - if len(heads) == 0: - return - heads, index = find_pruneable_heads_and_indices( - heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads - ) - - # Prune linear layers - self.self.query = prune_linear_layer(self.self.query, index) - self.self.key = prune_linear_layer(self.self.key, index) - self.self.value = prune_linear_layer(self.self.value, index) - self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) - - # Update hyper params and store pruned heads - self.self.num_attention_heads = self.self.num_attention_heads - len(heads) - self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads - self.pruned_heads = self.pruned_heads.union(heads) - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - past_key_value=None, - output_attentions=False, - ): - self_outputs = self.self( - hidden_states, - attention_mask, - head_mask, - encoder_hidden_states, - encoder_attention_mask, - past_key_value, - output_attentions, - ) - attention_output = self.output(self_outputs[0], hidden_states) - outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them - return outputs - - -class BertIntermediate(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.intermediate_size) - if isinstance(config.hidden_act, str): - self.intermediate_act_fn = ACT2FN[config.hidden_act] - else: - self.intermediate_act_fn = config.hidden_act - - def forward(self, hidden_states): - hidden_states = self.dense(hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) - return hidden_states - - -class BertOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.intermediate_size, config.hidden_size) - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states, input_tensor): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class BertLayer(nn.Module): - def __init__(self, config, layer_num): - super().__init__() - self.config = config - self.chunk_size_feed_forward = config.chunk_size_feed_forward - self.seq_len_dim = 1 - self.attention = BertAttention(config) - self.layer_num = layer_num - if self.config.add_cross_attention: - self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention) - self.intermediate = BertIntermediate(config) - self.output = BertOutput(config) - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - past_key_value=None, - output_attentions=False, - mode=None, - ): - # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 - self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None - self_attention_outputs = self.attention( - hidden_states, - attention_mask, - head_mask, - output_attentions=output_attentions, - past_key_value=self_attn_past_key_value, - ) - attention_output = self_attention_outputs[0] - - outputs = self_attention_outputs[1:-1] - present_key_value = self_attention_outputs[-1] - - if mode=='multimodal': - assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers" - - cross_attention_outputs = self.crossattention( - attention_output, - attention_mask, - head_mask, - encoder_hidden_states, - encoder_attention_mask, - output_attentions=output_attentions, - ) - attention_output = cross_attention_outputs[0] - outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights - layer_output = apply_chunking_to_forward( - self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output - ) - outputs = (layer_output,) + outputs - - outputs = outputs + (present_key_value,) - - return outputs - - def feed_forward_chunk(self, attention_output): - intermediate_output = self.intermediate(attention_output) - layer_output = self.output(intermediate_output, attention_output) - return layer_output - - -class BertEncoder(nn.Module): - def __init__(self, config): - super().__init__() - self.config = config - self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)]) - self.gradient_checkpointing = False - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - past_key_values=None, - use_cache=None, - output_attentions=False, - output_hidden_states=False, - return_dict=True, - mode='multimodal', - ): - all_hidden_states = () if output_hidden_states else None - all_self_attentions = () if output_attentions else None - all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None - - next_decoder_cache = () if use_cache else None - - for i in range(self.config.num_hidden_layers): - layer_module = self.layer[i] - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - layer_head_mask = head_mask[i] if head_mask is not None else None - past_key_value = past_key_values[i] if past_key_values is not None else None - - if self.gradient_checkpointing and self.training: - - if use_cache: - logger.warn( - "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." - ) - use_cache = False - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs, past_key_value, output_attentions) - - return custom_forward - - layer_outputs = torch.utils.checkpoint.checkpoint( - create_custom_forward(layer_module), - hidden_states, - attention_mask, - layer_head_mask, - encoder_hidden_states, - encoder_attention_mask, - mode=mode, - ) - else: - layer_outputs = layer_module( - hidden_states, - attention_mask, - layer_head_mask, - encoder_hidden_states, - encoder_attention_mask, - past_key_value, - output_attentions, - mode=mode, - ) - - hidden_states = layer_outputs[0] - if use_cache: - next_decoder_cache += (layer_outputs[-1],) - if output_attentions: - all_self_attentions = all_self_attentions + (layer_outputs[1],) - - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if not return_dict: - return tuple( - v - for v in [ - hidden_states, - next_decoder_cache, - all_hidden_states, - all_self_attentions, - all_cross_attentions, - ] - if v is not None - ) - return BaseModelOutputWithPastAndCrossAttentions( - last_hidden_state=hidden_states, - past_key_values=next_decoder_cache, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - cross_attentions=all_cross_attentions, - ) - - -class BertPooler(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.activation = nn.Tanh() - - def forward(self, hidden_states): - # We "pool" the model by simply taking the hidden state corresponding - # to the first token. - first_token_tensor = hidden_states[:, 0] - pooled_output = self.dense(first_token_tensor) - pooled_output = self.activation(pooled_output) - return pooled_output - - -class BertPredictionHeadTransform(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - if isinstance(config.hidden_act, str): - self.transform_act_fn = ACT2FN[config.hidden_act] - else: - self.transform_act_fn = config.hidden_act - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - - def forward(self, hidden_states): - hidden_states = self.dense(hidden_states) - hidden_states = self.transform_act_fn(hidden_states) - hidden_states = self.LayerNorm(hidden_states) - return hidden_states - - -class BertLMPredictionHead(nn.Module): - def __init__(self, config): - super().__init__() - self.transform = BertPredictionHeadTransform(config) - - # The output weights are the same as the input embeddings, but there is - # an output-only bias for each token. - self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - - self.bias = nn.Parameter(torch.zeros(config.vocab_size)) - - # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` - self.decoder.bias = self.bias - - def forward(self, hidden_states): - hidden_states = self.transform(hidden_states) - hidden_states = self.decoder(hidden_states) - return hidden_states - - -class BertOnlyMLMHead(nn.Module): - def __init__(self, config): - super().__init__() - self.predictions = BertLMPredictionHead(config) - - def forward(self, sequence_output): - prediction_scores = self.predictions(sequence_output) - return prediction_scores - - -class BertPreTrainedModel(PreTrainedModel): - """ - An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained - models. - """ - - config_class = BertConfig - base_model_prefix = "bert" - _keys_to_ignore_on_load_missing = [r"position_ids"] - - def _init_weights(self, module): - """ Initialize the weights """ - if isinstance(module, (nn.Linear, nn.Embedding)): - # Slightly different from the TF version which uses truncated_normal for initialization - # cf https://github.com/pytorch/pytorch/pull/5617 - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - if isinstance(module, nn.Linear) and module.bias is not None: - module.bias.data.zero_() - - -class BertModel(BertPreTrainedModel): - """ - The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of - cross-attention is added between the self-attention layers, following the architecture described in `Attention is - all you need `__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, - Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. - argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an - input to the forward pass. - """ - - def __init__(self, config, add_pooling_layer=True): - super().__init__(config) - self.config = config - - self.embeddings = BertEmbeddings(config) - - self.encoder = BertEncoder(config) - - self.pooler = BertPooler(config) if add_pooling_layer else None - - self.init_weights() - - - def get_input_embeddings(self): - return self.embeddings.word_embeddings - - def set_input_embeddings(self, value): - self.embeddings.word_embeddings = value - - def _prune_heads(self, heads_to_prune): - """ - Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base - class PreTrainedModel - """ - for layer, heads in heads_to_prune.items(): - self.encoder.layer[layer].attention.prune_heads(heads) - - - def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor: - """ - Makes broadcastable attention and causal masks so that future and masked tokens are ignored. - - Arguments: - attention_mask (:obj:`torch.Tensor`): - Mask with ones indicating tokens to attend to, zeros for tokens to ignore. - input_shape (:obj:`Tuple[int]`): - The shape of the input to the model. - device: (:obj:`torch.device`): - The device of the input to the model. - - Returns: - :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. - """ - # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] - # ourselves in which case we just need to make it broadcastable to all heads. - if attention_mask.dim() == 3: - extended_attention_mask = attention_mask[:, None, :, :] - elif attention_mask.dim() == 2: - # Provided a padding mask of dimensions [batch_size, seq_length] - # - if the model is a decoder, apply a causal mask in addition to the padding mask - # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] - if is_decoder: - batch_size, seq_length = input_shape - - seq_ids = torch.arange(seq_length, device=device) - causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] - # in case past_key_values are used we need to add a prefix ones mask to the causal mask - # causal and attention masks must have same type with pytorch version < 1.3 - causal_mask = causal_mask.to(attention_mask.dtype) - - if causal_mask.shape[1] < attention_mask.shape[1]: - prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] - causal_mask = torch.cat( - [ - torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), - causal_mask, - ], - axis=-1, - ) - - extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] - else: - extended_attention_mask = attention_mask[:, None, None, :] - else: - raise ValueError( - "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( - input_shape, attention_mask.shape - ) - ) - - # Since attention_mask is 1.0 for positions we want to attend and 0.0 for - # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. - # Since we are adding it to the raw scores before the softmax, this is - # effectively the same as removing these entirely. - extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility - extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 - return extended_attention_mask - - def forward( - self, - input_ids=None, - attention_mask=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - encoder_embeds=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - past_key_values=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - is_decoder=False, - mode='multimodal', - ): - r""" - encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): - Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if - the model is configured as a decoder. - encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): - Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in - the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): - Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. - If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` - (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` - instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. - use_cache (:obj:`bool`, `optional`): - If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up - decoding (see :obj:`past_key_values`). - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if is_decoder: - use_cache = use_cache if use_cache is not None else self.config.use_cache - else: - use_cache = False - - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - input_shape = input_ids.size() - batch_size, seq_length = input_shape - device = input_ids.device - elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] - batch_size, seq_length = input_shape - device = inputs_embeds.device - elif encoder_embeds is not None: - input_shape = encoder_embeds.size()[:-1] - batch_size, seq_length = input_shape - device = encoder_embeds.device - else: - raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds") - - # past_key_values_length - past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 - - if attention_mask is None: - attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) - - # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] - # ourselves in which case we just need to make it broadcastable to all heads. - extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, - device, is_decoder) - - # If a 2D or 3D attention mask is provided for the cross-attention - # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] - if encoder_hidden_states is not None: - if type(encoder_hidden_states) == list: - encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() - else: - encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() - encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) - - if type(encoder_attention_mask) == list: - encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] - elif encoder_attention_mask is None: - encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) - encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) - else: - encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) - else: - encoder_extended_attention_mask = None - - # Prepare head mask if needed - # 1.0 in head_mask indicate we keep the head - # attention_probs has shape bsz x n_heads x N x N - # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] - # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] - head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) - - if encoder_embeds is None: - embedding_output = self.embeddings( - input_ids=input_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - past_key_values_length=past_key_values_length, - ) - else: - embedding_output = encoder_embeds - - encoder_outputs = self.encoder( - embedding_output, - attention_mask=extended_attention_mask, - head_mask=head_mask, - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_extended_attention_mask, - past_key_values=past_key_values, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - mode=mode, - ) - sequence_output = encoder_outputs[0] - pooled_output = self.pooler(sequence_output) if self.pooler is not None else None - - if not return_dict: - return (sequence_output, pooled_output) + encoder_outputs[1:] - - return BaseModelOutputWithPoolingAndCrossAttentions( - last_hidden_state=sequence_output, - pooler_output=pooled_output, - past_key_values=encoder_outputs.past_key_values, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - cross_attentions=encoder_outputs.cross_attentions, - ) - - - -class BertLMHeadModel(BertPreTrainedModel): - - _keys_to_ignore_on_load_unexpected = [r"pooler"] - _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] - - def __init__(self, config): - super().__init__(config) - - self.bert = BertModel(config, add_pooling_layer=False) - self.cls = BertOnlyMLMHead(config) - - self.init_weights() - - def get_output_embeddings(self): - return self.cls.predictions.decoder - - def set_output_embeddings(self, new_embeddings): - self.cls.predictions.decoder = new_embeddings - - def forward( - self, - input_ids=None, - attention_mask=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - labels=None, - past_key_values=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - return_logits=False, - is_decoder=True, - reduction='mean', - mode='multimodal', - ): - r""" - encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): - Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if - the model is configured as a decoder. - encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): - Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in - the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): - Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in - ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are - ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` - past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): - Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. - If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` - (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` - instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. - use_cache (:obj:`bool`, `optional`): - If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up - decoding (see :obj:`past_key_values`). - Returns: - Example:: - >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig - >>> import torch - >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') - >>> config = BertConfig.from_pretrained("bert-base-cased") - >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config) - >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") - >>> outputs = model(**inputs) - >>> prediction_logits = outputs.logits - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - if labels is not None: - use_cache = False - - outputs = self.bert( - input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_attention_mask, - past_key_values=past_key_values, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - is_decoder=is_decoder, - mode=mode, - ) - - sequence_output = outputs[0] - prediction_scores = self.cls(sequence_output) - - if return_logits: - return prediction_scores[:, :-1, :].contiguous() - - lm_loss = None - if labels is not None: - # we are doing next-token prediction; shift prediction scores and input ids by one - shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() - labels = labels[:, 1:].contiguous() - loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1) - lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) - if reduction=='none': - lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1) - - if not return_dict: - output = (prediction_scores,) + outputs[2:] - return ((lm_loss,) + output) if lm_loss is not None else output - - return CausalLMOutputWithCrossAttentions( - loss=lm_loss, - logits=prediction_scores, - past_key_values=outputs.past_key_values, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - cross_attentions=outputs.cross_attentions, - ) - - def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs): - input_shape = input_ids.shape - # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly - if attention_mask is None: - attention_mask = input_ids.new_ones(input_shape) - - # cut decoder_input_ids if past is used - if past is not None: - input_ids = input_ids[:, -1:] - - return { - "input_ids": input_ids, - "attention_mask": attention_mask, - "past_key_values": past, - "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None), - "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None), - "is_decoder": True, - } - - def _reorder_cache(self, past, beam_idx): - reordered_past = () - for layer_past in past: - reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) - return reordered_past \ No newline at end of file diff --git a/spaces/PeepDaSlan9/De-limiter/prepro/delimit_valid_prepro.py b/spaces/PeepDaSlan9/De-limiter/prepro/delimit_valid_prepro.py deleted file mode 100644 index 9e03f69ee2d45034d1d49ef754aba48f58b1ca7e..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/De-limiter/prepro/delimit_valid_prepro.py +++ /dev/null @@ -1,41 +0,0 @@ -import os -import json - -from torch.utils.data import DataLoader -import soundfile as sf -import tqdm - -from dataloader import DelimitValidDataset - - -def main(): - # Parameters - data_path = "/path/to/musdb18hq" - save_path = "/path/to/musdb18hq_limited" - batch_size = 1 - num_workers = 1 - sr = 44100 - - # Dataset - dataset = DelimitValidDataset(root=data_path) - data_loader = DataLoader( - dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False - ) - dict_valid_loudness = {} - # Preprocessing - for limited_audio, orig_audio, audio_name, loudness in tqdm.tqdm(data_loader): - audio_name = audio_name[0] - limited_audio = limited_audio[0].numpy() - loudness = float(loudness[0].numpy()) - dict_valid_loudness[audio_name] = loudness - # Save audio - os.makedirs(os.path.join(save_path, "valid"), exist_ok=True) - audio_path = os.path.join(save_path, "valid", audio_name) - sf.write(f"{audio_path}.wav", limited_audio.T, sr) - # write json write code - with open(os.path.join(save_path, "valid_loudness.json"), "w") as f: - json.dump(dict_valid_loudness, f, indent=4) - - -if __name__ == "__main__": - main() diff --git a/spaces/Pudding/Anime-or-Real/README.md b/spaces/Pudding/Anime-or-Real/README.md deleted file mode 100644 index 75952fffaa880ca760ae67e5adac732dbc9bfc42..0000000000000000000000000000000000000000 --- a/spaces/Pudding/Anime-or-Real/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Anime Or Real -emoji: 🔥 -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Ramse/TTS_Hindi/modules/commons/ssim.py b/spaces/Ramse/TTS_Hindi/modules/commons/ssim.py deleted file mode 100644 index 0d0241f267ef58b24979e022b05f2a9adf768826..0000000000000000000000000000000000000000 --- a/spaces/Ramse/TTS_Hindi/modules/commons/ssim.py +++ /dev/null @@ -1,391 +0,0 @@ -# ''' -# https://github.com/One-sixth/ms_ssim_pytorch/blob/master/ssim.py -# ''' -# -# import torch -# import torch.jit -# import torch.nn.functional as F -# -# -# @torch.jit.script -# def create_window(window_size: int, sigma: float, channel: int): -# ''' -# Create 1-D gauss kernel -# :param window_size: the size of gauss kernel -# :param sigma: sigma of normal distribution -# :param channel: input channel -# :return: 1D kernel -# ''' -# coords = torch.arange(window_size, dtype=torch.float) -# coords -= window_size // 2 -# -# g = torch.exp(-(coords ** 2) / (2 * sigma ** 2)) -# g /= g.sum() -# -# g = g.reshape(1, 1, 1, -1).repeat(channel, 1, 1, 1) -# return g -# -# -# @torch.jit.script -# def _gaussian_filter(x, window_1d, use_padding: bool): -# ''' -# Blur input with 1-D kernel -# :param x: batch of tensors to be blured -# :param window_1d: 1-D gauss kernel -# :param use_padding: padding image before conv -# :return: blured tensors -# ''' -# C = x.shape[1] -# padding = 0 -# if use_padding: -# window_size = window_1d.shape[3] -# padding = window_size // 2 -# out = F.conv2d(x, window_1d, stride=1, padding=(0, padding), groups=C) -# out = F.conv2d(out, window_1d.transpose(2, 3), stride=1, padding=(padding, 0), groups=C) -# return out -# -# -# @torch.jit.script -# def ssim(X, Y, window, data_range: float, use_padding: bool = False): -# ''' -# Calculate ssim index for X and Y -# :param X: images [B, C, H, N_bins] -# :param Y: images [B, C, H, N_bins] -# :param window: 1-D gauss kernel -# :param data_range: value range of input images. (usually 1.0 or 255) -# :param use_padding: padding image before conv -# :return: -# ''' -# -# K1 = 0.01 -# K2 = 0.03 -# compensation = 1.0 -# -# C1 = (K1 * data_range) ** 2 -# C2 = (K2 * data_range) ** 2 -# -# mu1 = _gaussian_filter(X, window, use_padding) -# mu2 = _gaussian_filter(Y, window, use_padding) -# sigma1_sq = _gaussian_filter(X * X, window, use_padding) -# sigma2_sq = _gaussian_filter(Y * Y, window, use_padding) -# sigma12 = _gaussian_filter(X * Y, window, use_padding) -# -# mu1_sq = mu1.pow(2) -# mu2_sq = mu2.pow(2) -# mu1_mu2 = mu1 * mu2 -# -# sigma1_sq = compensation * (sigma1_sq - mu1_sq) -# sigma2_sq = compensation * (sigma2_sq - mu2_sq) -# sigma12 = compensation * (sigma12 - mu1_mu2) -# -# cs_map = (2 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2) -# # Fixed the issue that the negative value of cs_map caused ms_ssim to output Nan. -# cs_map = cs_map.clamp_min(0.) -# ssim_map = ((2 * mu1_mu2 + C1) / (mu1_sq + mu2_sq + C1)) * cs_map -# -# ssim_val = ssim_map.mean(dim=(1, 2, 3)) # reduce along CHW -# cs = cs_map.mean(dim=(1, 2, 3)) -# -# return ssim_val, cs -# -# -# @torch.jit.script -# def ms_ssim(X, Y, window, data_range: float, weights, use_padding: bool = False, eps: float = 1e-8): -# ''' -# interface of ms-ssim -# :param X: a batch of images, (N,C,H,W) -# :param Y: a batch of images, (N,C,H,W) -# :param window: 1-D gauss kernel -# :param data_range: value range of input images. (usually 1.0 or 255) -# :param weights: weights for different levels -# :param use_padding: padding image before conv -# :param eps: use for avoid grad nan. -# :return: -# ''' -# levels = weights.shape[0] -# cs_vals = [] -# ssim_vals = [] -# for _ in range(levels): -# ssim_val, cs = ssim(X, Y, window=window, data_range=data_range, use_padding=use_padding) -# # Use for fix a issue. When c = a ** b and a is 0, c.backward() will cause the a.grad become inf. -# ssim_val = ssim_val.clamp_min(eps) -# cs = cs.clamp_min(eps) -# cs_vals.append(cs) -# -# ssim_vals.append(ssim_val) -# padding = (X.shape[2] % 2, X.shape[3] % 2) -# X = F.avg_pool2d(X, kernel_size=2, stride=2, padding=padding) -# Y = F.avg_pool2d(Y, kernel_size=2, stride=2, padding=padding) -# -# cs_vals = torch.stack(cs_vals, dim=0) -# ms_ssim_val = torch.prod((cs_vals[:-1] ** weights[:-1].unsqueeze(1)) * (ssim_vals[-1] ** weights[-1]), dim=0) -# return ms_ssim_val -# -# -# class SSIM(torch.jit.ScriptModule): -# __constants__ = ['data_range', 'use_padding'] -# -# def __init__(self, window_size=11, window_sigma=1.5, data_range=255., channel=3, use_padding=False): -# ''' -# :param window_size: the size of gauss kernel -# :param window_sigma: sigma of normal distribution -# :param data_range: value range of input images. (usually 1.0 or 255) -# :param channel: input channels (default: 3) -# :param use_padding: padding image before conv -# ''' -# super().__init__() -# assert window_size % 2 == 1, 'Window size must be odd.' -# window = create_window(window_size, window_sigma, channel) -# self.register_buffer('window', window) -# self.data_range = data_range -# self.use_padding = use_padding -# -# @torch.jit.script_method -# def forward(self, X, Y): -# r = ssim(X, Y, window=self.window, data_range=self.data_range, use_padding=self.use_padding) -# return r[0] -# -# -# class MS_SSIM(torch.jit.ScriptModule): -# __constants__ = ['data_range', 'use_padding', 'eps'] -# -# def __init__(self, window_size=11, window_sigma=1.5, data_range=255., channel=3, use_padding=False, weights=None, -# levels=None, eps=1e-8): -# ''' -# class for ms-ssim -# :param window_size: the size of gauss kernel -# :param window_sigma: sigma of normal distribution -# :param data_range: value range of input images. (usually 1.0 or 255) -# :param channel: input channels -# :param use_padding: padding image before conv -# :param weights: weights for different levels. (default [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]) -# :param levels: number of downsampling -# :param eps: Use for fix a issue. When c = a ** b and a is 0, c.backward() will cause the a.grad become inf. -# ''' -# super().__init__() -# assert window_size % 2 == 1, 'Window size must be odd.' -# self.data_range = data_range -# self.use_padding = use_padding -# self.eps = eps -# -# window = create_window(window_size, window_sigma, channel) -# self.register_buffer('window', window) -# -# if weights is None: -# weights = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333] -# weights = torch.tensor(weights, dtype=torch.float) -# -# if levels is not None: -# weights = weights[:levels] -# weights = weights / weights.sum() -# -# self.register_buffer('weights', weights) -# -# @torch.jit.script_method -# def forward(self, X, Y): -# return ms_ssim(X, Y, window=self.window, data_range=self.data_range, weights=self.weights, -# use_padding=self.use_padding, eps=self.eps) -# -# -# if __name__ == '__main__': -# print('Simple Test') -# im = torch.randint(0, 255, (5, 3, 256, 256), dtype=torch.float, device='cuda') -# img1 = im / 255 -# img2 = img1 * 0.5 -# -# losser = SSIM(data_range=1.).cuda() -# loss = losser(img1, img2).mean() -# -# losser2 = MS_SSIM(data_range=1.).cuda() -# loss2 = losser2(img1, img2).mean() -# -# print(loss.item()) -# print(loss2.item()) -# -# if __name__ == '__main__': -# print('Training Test') -# import cv2 -# import torch.optim -# import numpy as np -# import imageio -# import time -# -# out_test_video = False -# # 最好不要直接输出gif图,会非常大,最好先输出mkv文件后用ffmpeg转换到GIF -# video_use_gif = False -# -# im = cv2.imread('test_img1.jpg', 1) -# t_im = torch.from_numpy(im).cuda().permute(2, 0, 1).float()[None] / 255. -# -# if out_test_video: -# if video_use_gif: -# fps = 0.5 -# out_wh = (im.shape[1] // 2, im.shape[0] // 2) -# suffix = '.gif' -# else: -# fps = 5 -# out_wh = (im.shape[1], im.shape[0]) -# suffix = '.mkv' -# video_last_time = time.perf_counter() -# video = imageio.get_writer('ssim_test' + suffix, fps=fps) -# -# # 测试ssim -# print('Training SSIM') -# rand_im = torch.randint_like(t_im, 0, 255, dtype=torch.float32) / 255. -# rand_im.requires_grad = True -# optim = torch.optim.Adam([rand_im], 0.003, eps=1e-8) -# losser = SSIM(data_range=1., channel=t_im.shape[1]).cuda() -# ssim_score = 0 -# while ssim_score < 0.999: -# optim.zero_grad() -# loss = losser(rand_im, t_im) -# (-loss).sum().backward() -# ssim_score = loss.item() -# optim.step() -# r_im = np.transpose(rand_im.detach().cpu().numpy().clip(0, 1) * 255, [0, 2, 3, 1]).astype(np.uint8)[0] -# r_im = cv2.putText(r_im, 'ssim %f' % ssim_score, (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2) -# -# if out_test_video: -# if time.perf_counter() - video_last_time > 1. / fps: -# video_last_time = time.perf_counter() -# out_frame = cv2.cvtColor(r_im, cv2.COLOR_BGR2RGB) -# out_frame = cv2.resize(out_frame, out_wh, interpolation=cv2.INTER_AREA) -# if isinstance(out_frame, cv2.UMat): -# out_frame = out_frame.get() -# video.append_data(out_frame) -# -# cv2.imshow('ssim', r_im) -# cv2.setWindowTitle('ssim', 'ssim %f' % ssim_score) -# cv2.waitKey(1) -# -# if out_test_video: -# video.close() -# -# # 测试ms_ssim -# if out_test_video: -# if video_use_gif: -# fps = 0.5 -# out_wh = (im.shape[1] // 2, im.shape[0] // 2) -# suffix = '.gif' -# else: -# fps = 5 -# out_wh = (im.shape[1], im.shape[0]) -# suffix = '.mkv' -# video_last_time = time.perf_counter() -# video = imageio.get_writer('ms_ssim_test' + suffix, fps=fps) -# -# print('Training MS_SSIM') -# rand_im = torch.randint_like(t_im, 0, 255, dtype=torch.float32) / 255. -# rand_im.requires_grad = True -# optim = torch.optim.Adam([rand_im], 0.003, eps=1e-8) -# losser = MS_SSIM(data_range=1., channel=t_im.shape[1]).cuda() -# ssim_score = 0 -# while ssim_score < 0.999: -# optim.zero_grad() -# loss = losser(rand_im, t_im) -# (-loss).sum().backward() -# ssim_score = loss.item() -# optim.step() -# r_im = np.transpose(rand_im.detach().cpu().numpy().clip(0, 1) * 255, [0, 2, 3, 1]).astype(np.uint8)[0] -# r_im = cv2.putText(r_im, 'ms_ssim %f' % ssim_score, (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2) -# -# if out_test_video: -# if time.perf_counter() - video_last_time > 1. / fps: -# video_last_time = time.perf_counter() -# out_frame = cv2.cvtColor(r_im, cv2.COLOR_BGR2RGB) -# out_frame = cv2.resize(out_frame, out_wh, interpolation=cv2.INTER_AREA) -# if isinstance(out_frame, cv2.UMat): -# out_frame = out_frame.get() -# video.append_data(out_frame) -# -# cv2.imshow('ms_ssim', r_im) -# cv2.setWindowTitle('ms_ssim', 'ms_ssim %f' % ssim_score) -# cv2.waitKey(1) -# -# if out_test_video: -# video.close() - -""" -Adapted from https://github.com/Po-Hsun-Su/pytorch-ssim -""" - -import torch -import torch.nn.functional as F -from torch.autograd import Variable -import numpy as np -from math import exp - - -def gaussian(window_size, sigma): - gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)]) - return gauss / gauss.sum() - - -def create_window(window_size, channel): - _1D_window = gaussian(window_size, 1.5).unsqueeze(1) - _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) - window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous()) - return window - - -def _ssim(img1, img2, window, window_size, channel, size_average=True): - mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel) - mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel) - - mu1_sq = mu1.pow(2) - mu2_sq = mu2.pow(2) - mu1_mu2 = mu1 * mu2 - - sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq - sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq - sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2 - - C1 = 0.01 ** 2 - C2 = 0.03 ** 2 - - ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)) - - if size_average: - return ssim_map.mean() - else: - return ssim_map.mean(1) - - -class SSIM(torch.nn.Module): - def __init__(self, window_size=11, size_average=True): - super(SSIM, self).__init__() - self.window_size = window_size - self.size_average = size_average - self.channel = 1 - self.window = create_window(window_size, self.channel) - - def forward(self, img1, img2): - (_, channel, _, _) = img1.size() - - if channel == self.channel and self.window.data.type() == img1.data.type(): - window = self.window - else: - window = create_window(self.window_size, channel) - - if img1.is_cuda: - window = window.cuda(img1.get_device()) - window = window.type_as(img1) - - self.window = window - self.channel = channel - - return _ssim(img1, img2, window, self.window_size, channel, self.size_average) - - -window = None - - -def ssim(img1, img2, window_size=11, size_average=True): - (_, channel, _, _) = img1.size() - global window - if window is None: - window = create_window(window_size, channel) - if img1.is_cuda: - window = window.cuda(img1.get_device()) - window = window.type_as(img1) - return _ssim(img1, img2, window, window_size, channel, size_average) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pkg_resources/_vendor/more_itertools/__init__.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pkg_resources/_vendor/more_itertools/__init__.py deleted file mode 100644 index ea38bef1f661e62d577b3c2207386d901d851c72..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pkg_resources/_vendor/more_itertools/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .more import * # noqa -from .recipes import * # noqa - -__version__ = '8.12.0' diff --git a/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/src/ASpanFormer/backbone/resnet_fpn.py b/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/src/ASpanFormer/backbone/resnet_fpn.py deleted file mode 100644 index 948c72940ab00e5741e2788eea841d124333c8ed..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/src/ASpanFormer/backbone/resnet_fpn.py +++ /dev/null @@ -1,214 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F - - -def conv1x1(in_planes, out_planes, stride=1): - """1x1 convolution without padding""" - return nn.Conv2d( - in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False - ) - - -def conv3x3(in_planes, out_planes, stride=1): - """3x3 convolution with padding""" - return nn.Conv2d( - in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False - ) - - -class BasicBlock(nn.Module): - def __init__(self, in_planes, planes, stride=1): - super().__init__() - self.conv1 = conv3x3(in_planes, planes, stride) - self.conv2 = conv3x3(planes, planes) - self.bn1 = nn.BatchNorm2d(planes) - self.bn2 = nn.BatchNorm2d(planes) - self.relu = nn.ReLU(inplace=True) - - if stride == 1: - self.downsample = None - else: - self.downsample = nn.Sequential( - conv1x1(in_planes, planes, stride=stride), nn.BatchNorm2d(planes) - ) - - def forward(self, x): - y = x - y = self.relu(self.bn1(self.conv1(y))) - y = self.bn2(self.conv2(y)) - - if self.downsample is not None: - x = self.downsample(x) - - return self.relu(x + y) - - -class ResNetFPN_8_2(nn.Module): - """ - ResNet+FPN, output resolution are 1/8 and 1/2. - Each block has 2 layers. - """ - - def __init__(self, config): - super().__init__() - # Config - block = BasicBlock - initial_dim = config["initial_dim"] - block_dims = config["block_dims"] - - # Class Variable - self.in_planes = initial_dim - - # Networks - self.conv1 = nn.Conv2d( - 1, initial_dim, kernel_size=7, stride=2, padding=3, bias=False - ) - self.bn1 = nn.BatchNorm2d(initial_dim) - self.relu = nn.ReLU(inplace=True) - - self.layer1 = self._make_layer(block, block_dims[0], stride=1) # 1/2 - self.layer2 = self._make_layer(block, block_dims[1], stride=2) # 1/4 - self.layer3 = self._make_layer(block, block_dims[2], stride=2) # 1/8 - - # 3. FPN upsample - self.layer3_outconv = conv1x1(block_dims[2], block_dims[2]) - self.layer2_outconv = conv1x1(block_dims[1], block_dims[2]) - self.layer2_outconv2 = nn.Sequential( - conv3x3(block_dims[2], block_dims[2]), - nn.BatchNorm2d(block_dims[2]), - nn.LeakyReLU(), - conv3x3(block_dims[2], block_dims[1]), - ) - self.layer1_outconv = conv1x1(block_dims[0], block_dims[1]) - self.layer1_outconv2 = nn.Sequential( - conv3x3(block_dims[1], block_dims[1]), - nn.BatchNorm2d(block_dims[1]), - nn.LeakyReLU(), - conv3x3(block_dims[1], block_dims[0]), - ) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") - elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - def _make_layer(self, block, dim, stride=1): - layer1 = block(self.in_planes, dim, stride=stride) - layer2 = block(dim, dim, stride=1) - layers = (layer1, layer2) - - self.in_planes = dim - return nn.Sequential(*layers) - - def forward(self, x): - # ResNet Backbone - x0 = self.relu(self.bn1(self.conv1(x))) - x1 = self.layer1(x0) # 1/2 - x2 = self.layer2(x1) # 1/4 - x3 = self.layer3(x2) # 1/8 - - # FPN - x3_out = self.layer3_outconv(x3) - - x3_out_2x = F.interpolate( - x3_out, scale_factor=2.0, mode="bilinear", align_corners=True - ) - x2_out = self.layer2_outconv(x2) - x2_out = self.layer2_outconv2(x2_out + x3_out_2x) - - x2_out_2x = F.interpolate( - x2_out, scale_factor=2.0, mode="bilinear", align_corners=True - ) - x1_out = self.layer1_outconv(x1) - x1_out = self.layer1_outconv2(x1_out + x2_out_2x) - - return [x3_out, x1_out] - - -class ResNetFPN_16_4(nn.Module): - """ - ResNet+FPN, output resolution are 1/16 and 1/4. - Each block has 2 layers. - """ - - def __init__(self, config): - super().__init__() - # Config - block = BasicBlock - initial_dim = config["initial_dim"] - block_dims = config["block_dims"] - - # Class Variable - self.in_planes = initial_dim - - # Networks - self.conv1 = nn.Conv2d( - 1, initial_dim, kernel_size=7, stride=2, padding=3, bias=False - ) - self.bn1 = nn.BatchNorm2d(initial_dim) - self.relu = nn.ReLU(inplace=True) - - self.layer1 = self._make_layer(block, block_dims[0], stride=1) # 1/2 - self.layer2 = self._make_layer(block, block_dims[1], stride=2) # 1/4 - self.layer3 = self._make_layer(block, block_dims[2], stride=2) # 1/8 - self.layer4 = self._make_layer(block, block_dims[3], stride=2) # 1/16 - - # 3. FPN upsample - self.layer4_outconv = conv1x1(block_dims[3], block_dims[3]) - self.layer3_outconv = conv1x1(block_dims[2], block_dims[3]) - self.layer3_outconv2 = nn.Sequential( - conv3x3(block_dims[3], block_dims[3]), - nn.BatchNorm2d(block_dims[3]), - nn.LeakyReLU(), - conv3x3(block_dims[3], block_dims[2]), - ) - - self.layer2_outconv = conv1x1(block_dims[1], block_dims[2]) - self.layer2_outconv2 = nn.Sequential( - conv3x3(block_dims[2], block_dims[2]), - nn.BatchNorm2d(block_dims[2]), - nn.LeakyReLU(), - conv3x3(block_dims[2], block_dims[1]), - ) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") - elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - def _make_layer(self, block, dim, stride=1): - layer1 = block(self.in_planes, dim, stride=stride) - layer2 = block(dim, dim, stride=1) - layers = (layer1, layer2) - - self.in_planes = dim - return nn.Sequential(*layers) - - def forward(self, x): - # ResNet Backbone - x0 = self.relu(self.bn1(self.conv1(x))) - x1 = self.layer1(x0) # 1/2 - x2 = self.layer2(x1) # 1/4 - x3 = self.layer3(x2) # 1/8 - x4 = self.layer4(x3) # 1/16 - - # FPN - x4_out = self.layer4_outconv(x4) - - x4_out_2x = F.interpolate( - x4_out, scale_factor=2.0, mode="bilinear", align_corners=True - ) - x3_out = self.layer3_outconv(x3) - x3_out = self.layer3_outconv2(x3_out + x4_out_2x) - - x3_out_2x = F.interpolate( - x3_out, scale_factor=2.0, mode="bilinear", align_corners=True - ) - x2_out = self.layer2_outconv(x2) - x2_out = self.layer2_outconv2(x2_out + x3_out_2x) - - return [x4_out, x2_out] diff --git a/spaces/Ricecake123/RVC-demo/gui.py b/spaces/Ricecake123/RVC-demo/gui.py deleted file mode 100644 index 4d8dc29dc3351fbffe924f825c9e8054b9589f0b..0000000000000000000000000000000000000000 --- a/spaces/Ricecake123/RVC-demo/gui.py +++ /dev/null @@ -1,699 +0,0 @@ -""" -0416后的更新: - 引入config中half - 重建npy而不用填写 - v2支持 - 无f0模型支持 - 修复 - - int16: - 增加无索引支持 - f0算法改harvest(怎么看就只有这个会影响CPU占用),但是不这么改效果不好 -""" -import os, sys, traceback, re - -import json - -now_dir = os.getcwd() -sys.path.append(now_dir) -from config import Config - -Config = Config() -import PySimpleGUI as sg -import sounddevice as sd -import noisereduce as nr -import numpy as np -from fairseq import checkpoint_utils -import librosa, torch, pyworld, faiss, time, threading -import torch.nn.functional as F -import torchaudio.transforms as tat -import scipy.signal as signal - - -# import matplotlib.pyplot as plt -from lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -from i18n import I18nAuto - -i18n = I18nAuto() -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -current_dir = os.getcwd() - - -class RVC: - def __init__( - self, key, hubert_path, pth_path, index_path, npy_path, index_rate - ) -> None: - """ - 初始化 - """ - try: - self.f0_up_key = key - self.time_step = 160 / 16000 * 1000 - self.f0_min = 50 - self.f0_max = 1100 - self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700) - self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700) - self.sr = 16000 - self.window = 160 - if index_rate != 0: - self.index = faiss.read_index(index_path) - # self.big_npy = np.load(npy_path) - self.big_npy = self.index.reconstruct_n(0, self.index.ntotal) - print("index search enabled") - self.index_rate = index_rate - model_path = hubert_path - print("load model(s) from {}".format(model_path)) - models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( - [model_path], - suffix="", - ) - self.model = models[0] - self.model = self.model.to(device) - if Config.is_half: - self.model = self.model.half() - else: - self.model = self.model.float() - self.model.eval() - cpt = torch.load(pth_path, map_location="cpu") - self.tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk - self.if_f0 = cpt.get("f0", 1) - self.version = cpt.get("version", "v1") - if self.version == "v1": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs256NSFsid( - *cpt["config"], is_half=Config.is_half - ) - else: - self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif self.version == "v2": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs768NSFsid( - *cpt["config"], is_half=Config.is_half - ) - else: - self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - del self.net_g.enc_q - print(self.net_g.load_state_dict(cpt["weight"], strict=False)) - self.net_g.eval().to(device) - if Config.is_half: - self.net_g = self.net_g.half() - else: - self.net_g = self.net_g.float() - except: - print(traceback.format_exc()) - - def get_f0(self, x, f0_up_key, inp_f0=None): - x_pad = 1 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - f0, t = pyworld.harvest( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0] - f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0bak # 1-0 - - def infer(self, feats: torch.Tensor) -> np.ndarray: - """ - 推理函数 - """ - audio = feats.clone().cpu().numpy() - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).fill_(False) - if Config.is_half: - feats = feats.half() - else: - feats = feats.float() - inputs = { - "source": feats.to(device), - "padding_mask": padding_mask.to(device), - "output_layer": 9 if self.version == "v1" else 12, - } - torch.cuda.synchronize() - with torch.no_grad(): - logits = self.model.extract_features(**inputs) - feats = ( - self.model.final_proj(logits[0]) if self.version == "v1" else logits[0] - ) - - ####索引优化 - try: - if ( - hasattr(self, "index") - and hasattr(self, "big_npy") - and self.index_rate != 0 - ): - npy = feats[0].cpu().numpy().astype("float32") - score, ix = self.index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - if Config.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(device) * self.index_rate - + (1 - self.index_rate) * feats - ) - else: - print("index search FAIL or disabled") - except: - traceback.print_exc() - print("index search FAIL") - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - torch.cuda.synchronize() - print(feats.shape) - if self.if_f0 == 1: - pitch, pitchf = self.get_f0(audio, self.f0_up_key) - p_len = min(feats.shape[1], 13000, pitch.shape[0]) # 太大了爆显存 - else: - pitch, pitchf = None, None - p_len = min(feats.shape[1], 13000) # 太大了爆显存 - torch.cuda.synchronize() - # print(feats.shape,pitch.shape) - feats = feats[:, :p_len, :] - if self.if_f0 == 1: - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - pitch = torch.LongTensor(pitch).unsqueeze(0).to(device) - pitchf = torch.FloatTensor(pitchf).unsqueeze(0).to(device) - p_len = torch.LongTensor([p_len]).to(device) - ii = 0 # sid - sid = torch.LongTensor([ii]).to(device) - with torch.no_grad(): - if self.if_f0 == 1: - infered_audio = ( - self.net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] - .data.cpu() - .float() - ) - else: - infered_audio = ( - self.net_g.infer(feats, p_len, sid)[0][0, 0].data.cpu().float() - ) - torch.cuda.synchronize() - return infered_audio - - -class GUIConfig: - def __init__(self) -> None: - self.hubert_path: str = "" - self.pth_path: str = "" - self.index_path: str = "" - self.npy_path: str = "" - self.pitch: int = 12 - self.samplerate: int = 44100 - self.block_time: float = 1.0 # s - self.buffer_num: int = 1 - self.threhold: int = -30 - self.crossfade_time: float = 0.08 - self.extra_time: float = 0.04 - self.I_noise_reduce = False - self.O_noise_reduce = False - self.index_rate = 0.3 - - -class GUI: - def __init__(self) -> None: - self.config = GUIConfig() - self.flag_vc = False - - self.launcher() - - def load(self): - input_devices, output_devices, input_devices_indices, output_devices_indices = self.get_devices() - try: - with open("values1.json", "r") as j: - data = json.load(j) - except: - with open("values1.json", "w") as j: - data = { - "pth_path": "", - "index_path": "", - "sg_input_device": input_devices[input_devices_indices.index(sd.default.device[0])], - "sg_output_device": output_devices[output_devices_indices.index(sd.default.device[1])], - "threhold": "-45", - "pitch": "0", - "index_rate": "0", - "block_time": "1", - "crossfade_length": "0.04", - "extra_time": "1", - } - return data - - def launcher(self): - data = self.load() - sg.theme("LightBlue3") - input_devices, output_devices, _, _ = self.get_devices() - layout = [ - [ - sg.Frame( - title=i18n("加载模型"), - layout=[ - [ - sg.Input( - default_text="hubert_base.pt", - key="hubert_path", - disabled=True, - ), - sg.FileBrowse( - i18n("Hubert模型"), - initial_folder=os.path.join(os.getcwd()), - file_types=(("pt files", "*.pt"),), - ), - ], - [ - sg.Input( - default_text=data.get("pth_path", ""), - key="pth_path", - ), - sg.FileBrowse( - i18n("选择.pth文件"), - initial_folder=os.path.join(os.getcwd(), "weights"), - file_types=(("weight files", "*.pth"),), - ), - ], - [ - sg.Input( - default_text=data.get("index_path", ""), - key="index_path", - ), - sg.FileBrowse( - i18n("选择.index文件"), - initial_folder=os.path.join(os.getcwd(), "logs"), - file_types=(("index files", "*.index"),), - ), - ], - [ - sg.Input( - default_text="你不需要填写这个You don't need write this.", - key="npy_path", - disabled=True, - ), - sg.FileBrowse( - i18n("选择.npy文件"), - initial_folder=os.path.join(os.getcwd(), "logs"), - file_types=(("feature files", "*.npy"),), - ), - ], - ], - ) - ], - [ - sg.Frame( - layout=[ - [ - sg.Text(i18n("输入设备")), - sg.Combo( - input_devices, - key="sg_input_device", - default_value=data.get("sg_input_device", ""), - ), - ], - [ - sg.Text(i18n("输出设备")), - sg.Combo( - output_devices, - key="sg_output_device", - default_value=data.get("sg_output_device", ""), - ), - ], - ], - title=i18n("音频设备(请使用同种类驱动)"), - ) - ], - [ - sg.Frame( - layout=[ - [ - sg.Text(i18n("响应阈值")), - sg.Slider( - range=(-60, 0), - key="threhold", - resolution=1, - orientation="h", - default_value=data.get("threhold", ""), - ), - ], - [ - sg.Text(i18n("音调设置")), - sg.Slider( - range=(-24, 24), - key="pitch", - resolution=1, - orientation="h", - default_value=data.get("pitch", ""), - ), - ], - [ - sg.Text(i18n("Index Rate")), - sg.Slider( - range=(0.0, 1.0), - key="index_rate", - resolution=0.01, - orientation="h", - default_value=data.get("index_rate", ""), - ), - ], - ], - title=i18n("常规设置"), - ), - sg.Frame( - layout=[ - [ - sg.Text(i18n("采样长度")), - sg.Slider( - range=(0.1, 3.0), - key="block_time", - resolution=0.1, - orientation="h", - default_value=data.get("block_time", ""), - ), - ], - [ - sg.Text(i18n("淡入淡出长度")), - sg.Slider( - range=(0.01, 0.15), - key="crossfade_length", - resolution=0.01, - orientation="h", - default_value=data.get("crossfade_length", ""), - ), - ], - [ - sg.Text(i18n("额外推理时长")), - sg.Slider( - range=(0.05, 3.00), - key="extra_time", - resolution=0.01, - orientation="h", - default_value=data.get("extra_time", ""), - ), - ], - [ - sg.Checkbox(i18n("输入降噪"), key="I_noise_reduce"), - sg.Checkbox(i18n("输出降噪"), key="O_noise_reduce"), - ], - ], - title=i18n("性能设置"), - ), - ], - [ - sg.Button(i18n("开始音频转换"), key="start_vc"), - sg.Button(i18n("停止音频转换"), key="stop_vc"), - sg.Text(i18n("推理时间(ms):")), - sg.Text("0", key="infer_time"), - ], - ] - self.window = sg.Window("RVC - GUI", layout=layout) - self.event_handler() - - def event_handler(self): - while True: - event, values = self.window.read() - if event == sg.WINDOW_CLOSED: - self.flag_vc = False - exit() - if event == "start_vc" and self.flag_vc == False: - if self.set_values(values) == True: - print("using_cuda:" + str(torch.cuda.is_available())) - self.start_vc() - settings = { - "pth_path": values["pth_path"], - "index_path": values["index_path"], - "sg_input_device": values["sg_input_device"], - "sg_output_device": values["sg_output_device"], - "threhold": values["threhold"], - "pitch": values["pitch"], - "index_rate": values["index_rate"], - "block_time": values["block_time"], - "crossfade_length": values["crossfade_length"], - "extra_time": values["extra_time"], - } - with open("values1.json", "w") as j: - json.dump(settings, j) - if event == "stop_vc" and self.flag_vc == True: - self.flag_vc = False - - def set_values(self, values): - if len(values["pth_path"].strip()) == 0: - sg.popup(i18n("请选择pth文件")) - return False - if len(values["index_path"].strip()) == 0: - sg.popup(i18n("请选择index文件")) - return False - pattern = re.compile("[^\x00-\x7F]+") - if pattern.findall(values["hubert_path"]): - sg.popup(i18n("hubert模型路径不可包含中文")) - return False - if pattern.findall(values["pth_path"]): - sg.popup(i18n("pth文件路径不可包含中文")) - return False - if pattern.findall(values["index_path"]): - sg.popup(i18n("index文件路径不可包含中文")) - return False - self.set_devices(values["sg_input_device"], values["sg_output_device"]) - self.config.hubert_path = os.path.join(current_dir, "hubert_base.pt") - self.config.pth_path = values["pth_path"] - self.config.index_path = values["index_path"] - self.config.npy_path = values["npy_path"] - self.config.threhold = values["threhold"] - self.config.pitch = values["pitch"] - self.config.block_time = values["block_time"] - self.config.crossfade_time = values["crossfade_length"] - self.config.extra_time = values["extra_time"] - self.config.I_noise_reduce = values["I_noise_reduce"] - self.config.O_noise_reduce = values["O_noise_reduce"] - self.config.index_rate = values["index_rate"] - return True - - def start_vc(self): - torch.cuda.empty_cache() - self.flag_vc = True - self.block_frame = int(self.config.block_time * self.config.samplerate) - self.crossfade_frame = int(self.config.crossfade_time * self.config.samplerate) - self.sola_search_frame = int(0.012 * self.config.samplerate) - self.delay_frame = int(0.01 * self.config.samplerate) # 往前预留0.02s - self.extra_frame = int(self.config.extra_time * self.config.samplerate) - self.rvc = None - self.rvc = RVC( - self.config.pitch, - self.config.hubert_path, - self.config.pth_path, - self.config.index_path, - self.config.npy_path, - self.config.index_rate, - ) - self.input_wav: np.ndarray = np.zeros( - self.extra_frame - + self.crossfade_frame - + self.sola_search_frame - + self.block_frame, - dtype="float32", - ) - self.output_wav: torch.Tensor = torch.zeros( - self.block_frame, device=device, dtype=torch.float32 - ) - self.sola_buffer: torch.Tensor = torch.zeros( - self.crossfade_frame, device=device, dtype=torch.float32 - ) - self.fade_in_window: torch.Tensor = torch.linspace( - 0.0, 1.0, steps=self.crossfade_frame, device=device, dtype=torch.float32 - ) - self.fade_out_window: torch.Tensor = 1 - self.fade_in_window - self.resampler1 = tat.Resample( - orig_freq=self.config.samplerate, new_freq=16000, dtype=torch.float32 - ) - self.resampler2 = tat.Resample( - orig_freq=self.rvc.tgt_sr, - new_freq=self.config.samplerate, - dtype=torch.float32, - ) - thread_vc = threading.Thread(target=self.soundinput) - thread_vc.start() - - def soundinput(self): - """ - 接受音频输入 - """ - with sd.Stream( - channels=2, - callback=self.audio_callback, - blocksize=self.block_frame, - samplerate=self.config.samplerate, - dtype="float32", - ): - while self.flag_vc: - time.sleep(self.config.block_time) - print("Audio block passed.") - print("ENDing VC") - - def audio_callback( - self, indata: np.ndarray, outdata: np.ndarray, frames, times, status - ): - """ - 音频处理 - """ - start_time = time.perf_counter() - indata = librosa.to_mono(indata.T) - if self.config.I_noise_reduce: - indata[:] = nr.reduce_noise(y=indata, sr=self.config.samplerate) - - """noise gate""" - frame_length = 2048 - hop_length = 1024 - rms = librosa.feature.rms( - y=indata, frame_length=frame_length, hop_length=hop_length - ) - db_threhold = librosa.amplitude_to_db(rms, ref=1.0)[0] < self.config.threhold - # print(rms.shape,db.shape,db) - for i in range(db_threhold.shape[0]): - if db_threhold[i]: - indata[i * hop_length : (i + 1) * hop_length] = 0 - self.input_wav[:] = np.append(self.input_wav[self.block_frame :], indata) - - # infer - print("input_wav:" + str(self.input_wav.shape)) - # print('infered_wav:'+str(infer_wav.shape)) - infer_wav: torch.Tensor = self.resampler2( - self.rvc.infer(self.resampler1(torch.from_numpy(self.input_wav))) - )[-self.crossfade_frame - self.sola_search_frame - self.block_frame :].to( - device - ) - print("infer_wav:" + str(infer_wav.shape)) - - # SOLA algorithm from https://github.com/yxlllc/DDSP-SVC - cor_nom = F.conv1d( - infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame], - self.sola_buffer[None, None, :], - ) - cor_den = torch.sqrt( - F.conv1d( - infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame] - ** 2, - torch.ones(1, 1, self.crossfade_frame, device=device), - ) - + 1e-8 - ) - sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0]) - print("sola offset: " + str(int(sola_offset))) - - # crossfade - self.output_wav[:] = infer_wav[sola_offset : sola_offset + self.block_frame] - self.output_wav[: self.crossfade_frame] *= self.fade_in_window - self.output_wav[: self.crossfade_frame] += self.sola_buffer[:] - if sola_offset < self.sola_search_frame: - self.sola_buffer[:] = ( - infer_wav[ - -self.sola_search_frame - - self.crossfade_frame - + sola_offset : -self.sola_search_frame - + sola_offset - ] - * self.fade_out_window - ) - else: - self.sola_buffer[:] = ( - infer_wav[-self.crossfade_frame :] * self.fade_out_window - ) - - if self.config.O_noise_reduce: - outdata[:] = np.tile( - nr.reduce_noise( - y=self.output_wav[:].cpu().numpy(), sr=self.config.samplerate - ), - (2, 1), - ).T - else: - outdata[:] = self.output_wav[:].repeat(2, 1).t().cpu().numpy() - total_time = time.perf_counter() - start_time - self.window["infer_time"].update(int(total_time * 1000)) - print("infer time:" + str(total_time)) - - def get_devices(self, update: bool = True): - """获取设备列表""" - if update: - sd._terminate() - sd._initialize() - devices = sd.query_devices() - hostapis = sd.query_hostapis() - for hostapi in hostapis: - for device_idx in hostapi["devices"]: - devices[device_idx]["hostapi_name"] = hostapi["name"] - input_devices = [ - f"{d['name']} ({d['hostapi_name']})" - for d in devices - if d["max_input_channels"] > 0 - ] - output_devices = [ - f"{d['name']} ({d['hostapi_name']})" - for d in devices - if d["max_output_channels"] > 0 - ] - input_devices_indices = [ - d["index"] if "index" in d else d["name"] - for d in devices - if d["max_input_channels"] > 0 - ] - output_devices_indices = [ - d["index"] if "index" in d else d["name"] - for d in devices - if d["max_output_channels"] > 0 - ] - return ( - input_devices, - output_devices, - input_devices_indices, - output_devices_indices, - ) - - def set_devices(self, input_device, output_device): - """设置输出设备""" - ( - input_devices, - output_devices, - input_device_indices, - output_device_indices, - ) = self.get_devices() - sd.default.device[0] = input_device_indices[input_devices.index(input_device)] - sd.default.device[1] = output_device_indices[ - output_devices.index(output_device) - ] - print("input device:" + str(sd.default.device[0]) + ":" + str(input_device)) - print("output device:" + str(sd.default.device[1]) + ":" + str(output_device)) - - -gui = GUI() diff --git a/spaces/Ritori/TTS_Yui/text/cmudict.py b/spaces/Ritori/TTS_Yui/text/cmudict.py deleted file mode 100644 index 62bfef745c30a56f7b6605d9e3becfbc40edb50d..0000000000000000000000000000000000000000 --- a/spaces/Ritori/TTS_Yui/text/cmudict.py +++ /dev/null @@ -1,65 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -import re - - -valid_symbols = [ - 'AA', 'AA0', 'AA1', 'AA2', 'AE', 'AE0', 'AE1', 'AE2', 'AH', 'AH0', 'AH1', 'AH2', - 'AO', 'AO0', 'AO1', 'AO2', 'AW', 'AW0', 'AW1', 'AW2', 'AY', 'AY0', 'AY1', 'AY2', - 'B', 'CH', 'D', 'DH', 'EH', 'EH0', 'EH1', 'EH2', 'ER', 'ER0', 'ER1', 'ER2', 'EY', - 'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH', 'IH0', 'IH1', 'IH2', 'IY', 'IY0', 'IY1', - 'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW', 'OW0', 'OW1', 'OW2', 'OY', 'OY0', - 'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH', 'UH0', 'UH1', 'UH2', 'UW', - 'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH' -] - -_valid_symbol_set = set(valid_symbols) - - -class CMUDict: - '''Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict''' - def __init__(self, file_or_path, keep_ambiguous=True): - if isinstance(file_or_path, str): - with open(file_or_path, encoding='latin-1') as f: - entries = _parse_cmudict(f) - else: - entries = _parse_cmudict(file_or_path) - if not keep_ambiguous: - entries = {word: pron for word, pron in entries.items() if len(pron) == 1} - self._entries = entries - - - def __len__(self): - return len(self._entries) - - - def lookup(self, word): - '''Returns list of ARPAbet pronunciations of the given word.''' - return self._entries.get(word.upper()) - - - -_alt_re = re.compile(r'\([0-9]+\)') - - -def _parse_cmudict(file): - cmudict = {} - for line in file: - if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"): - parts = line.split(' ') - word = re.sub(_alt_re, '', parts[0]) - pronunciation = _get_pronunciation(parts[1]) - if pronunciation: - if word in cmudict: - cmudict[word].append(pronunciation) - else: - cmudict[word] = [pronunciation] - return cmudict - - -def _get_pronunciation(s): - parts = s.strip().split(' ') - for part in parts: - if part not in _valid_symbol_set: - return None - return ' '.join(parts) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/feature_relay_head.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/feature_relay_head.py deleted file mode 100644 index a1cfb2ce8631d51e5c465f9bbc4164a37acc4782..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/feature_relay_head.py +++ /dev/null @@ -1,55 +0,0 @@ -import torch.nn as nn -from mmcv.cnn import kaiming_init -from mmcv.runner import auto_fp16 - -from mmdet.models.builder import HEADS - - -@HEADS.register_module() -class FeatureRelayHead(nn.Module): - """Feature Relay Head used in `SCNet `_. - - Args: - in_channels (int, optional): number of input channels. Default: 256. - conv_out_channels (int, optional): number of output channels before - classification layer. Default: 256. - roi_feat_size (int, optional): roi feat size at box head. Default: 7. - scale_factor (int, optional): scale factor to match roi feat size - at mask head. Default: 2. - """ - - def __init__(self, - in_channels=1024, - out_conv_channels=256, - roi_feat_size=7, - scale_factor=2): - super(FeatureRelayHead, self).__init__() - assert isinstance(roi_feat_size, int) - - self.in_channels = in_channels - self.out_conv_channels = out_conv_channels - self.roi_feat_size = roi_feat_size - self.out_channels = (roi_feat_size**2) * out_conv_channels - self.scale_factor = scale_factor - self.fp16_enabled = False - - self.fc = nn.Linear(self.in_channels, self.out_channels) - self.upsample = nn.Upsample( - scale_factor=scale_factor, mode='bilinear', align_corners=True) - - def init_weights(self): - """Init weights for the head.""" - kaiming_init(self.fc) - - @auto_fp16() - def forward(self, x): - """Forward function.""" - N, in_C = x.shape - if N > 0: - out_C = self.out_conv_channels - out_HW = self.roi_feat_size - x = self.fc(x) - x = x.reshape(N, out_C, out_HW, out_HW) - x = self.upsample(x) - return x - return None diff --git a/spaces/Rurrr/qr_monster/app.py b/spaces/Rurrr/qr_monster/app.py deleted file mode 100644 index 461c7b9686924c08b3f70efc90b0d8e62564928e..0000000000000000000000000000000000000000 --- a/spaces/Rurrr/qr_monster/app.py +++ /dev/null @@ -1,7 +0,0 @@ -import gradio as gr - -def greet(name): - return "Hello " + name + surname + "!!" - -iface = gr.Interface(fn=greet, inputs="text", outputs="text") -iface.launch() \ No newline at end of file diff --git a/spaces/Ryandhikaw/rvc-hololive/infer_pack/models.py b/spaces/Ryandhikaw/rvc-hololive/infer_pack/models.py deleted file mode 100644 index 96165f73644e6fb92d0ffedb4a3c9e1a457cb989..0000000000000000000000000000000000000000 --- a/spaces/Ryandhikaw/rvc-hololive/infer_pack/models.py +++ /dev/null @@ -1,982 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from infer_pack import modules -from infer_pack import attentions -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from infer_pack.commons import init_weights -import numpy as np -from infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder256Sim(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_sim(nn.Module): - """ - Synthesizer for Training - """ - - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - # hop_length, - gin_channels=0, - use_sdp=True, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256Sim( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - is_half=kwargs["is_half"], - ) - - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y_lengths, ds - ): # y是spec不需要了现在 - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - z_slice, ids_slice = commons.rand_slice_segments( - x, y_lengths, self.segment_size - ) - - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice - - def infer( - self, phone, phone_lengths, pitch, pitchf, ds, max_len=None - ): # y是spec不需要了现在 - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g) - return o, o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/SIGGRAPH2022/DCT-Net/source/mtcnn_pytorch/README.md b/spaces/SIGGRAPH2022/DCT-Net/source/mtcnn_pytorch/README.md deleted file mode 100644 index b748cf583a8af18bad6e40cb5fc8415d3938d7a3..0000000000000000000000000000000000000000 --- a/spaces/SIGGRAPH2022/DCT-Net/source/mtcnn_pytorch/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# MTCNN - -`pytorch` implementation of **inference stage** of face detection algorithm described in -[Joint Face Detection and Alignment using Multi-task Cascaded Convolutional Networks](https://arxiv.org/abs/1604.02878). - -## Example -![example of a face detection](images/example.png) - -## How to use it -Just download the repository and then do this -```python -from src import detect_faces -from PIL import Image - -image = Image.open('image.jpg') -bounding_boxes, landmarks = detect_faces(image) -``` -For examples see `test_on_images.ipynb`. - -## Requirements -* pytorch 0.2 -* Pillow, numpy - -## Credit -This implementation is heavily inspired by: -* [pangyupo/mxnet_mtcnn_face_detection](https://github.com/pangyupo/mxnet_mtcnn_face_detection) diff --git a/spaces/Salesforce/BLIP/train_nlvr.py b/spaces/Salesforce/BLIP/train_nlvr.py deleted file mode 100644 index 84b247bda2334c1fd894b6c11d33ef48c8e7df28..0000000000000000000000000000000000000000 --- a/spaces/Salesforce/BLIP/train_nlvr.py +++ /dev/null @@ -1,213 +0,0 @@ -''' - * Copyright (c) 2022, salesforce.com, inc. - * All rights reserved. - * SPDX-License-Identifier: BSD-3-Clause - * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause - * By Junnan Li -''' -import argparse -import os -import ruamel_yaml as yaml -import numpy as np -import random -import time -import datetime -import json -from pathlib import Path -import json -import pickle - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.utils.data import DataLoader -import torch.backends.cudnn as cudnn -import torch.distributed as dist - -from models.blip_nlvr import blip_nlvr - -import utils -from utils import cosine_lr_schedule, warmup_lr_schedule -from data import create_dataset, create_sampler, create_loader - -def train(model, data_loader, optimizer, epoch, device, config): - # train - model.train() - - metric_logger = utils.MetricLogger(delimiter=" ") - metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}')) - metric_logger.add_meter('loss', utils.SmoothedValue(window_size=50, fmt='{value:.4f}')) - - header = 'Train Epoch: [{}]'.format(epoch) - print_freq = 50 - step_size = 10 - - for i,(image0, image1, text, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)): - - images = torch.cat([image0, image1], dim=0) - images, targets = images.to(device), targets.to(device) - - loss = model(images, text, targets=targets, train=True) - - optimizer.zero_grad() - loss.backward() - optimizer.step() - - metric_logger.update(lr=optimizer.param_groups[0]["lr"]) - metric_logger.update(loss=loss.item()) - - # gather the stats from all processes - metric_logger.synchronize_between_processes() - print("Averaged stats:", metric_logger.global_avg()) - return {k: "{:.4f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()} - - -@torch.no_grad() -def evaluate(model, data_loader, device, config): - # test - model.eval() - - metric_logger = utils.MetricLogger(delimiter=" ") - - header = 'Evaluation:' - print_freq = 50 - - for image0, image1, text, targets in metric_logger.log_every(data_loader, print_freq, header): - images = torch.cat([image0, image1], dim=0) - images, targets = images.to(device), targets.to(device) - - prediction = model(images, text, targets=targets, train=False) - - _, pred_class = prediction.max(1) - accuracy = (targets==pred_class).sum() / targets.size(0) - - metric_logger.meters['acc'].update(accuracy.item(), n=image0.size(0)) - - # gather the stats from all processes - metric_logger.synchronize_between_processes() - - print("Averaged stats:", metric_logger.global_avg()) - return {k: "{:.4f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()} - - - -def main(args, config): - utils.init_distributed_mode(args) - - device = torch.device(args.device) - - # fix the seed for reproducibility - seed = args.seed + utils.get_rank() - torch.manual_seed(seed) - np.random.seed(seed) - random.seed(seed) - cudnn.benchmark = True - - #### Dataset #### - print("Creating dataset") - datasets = create_dataset('nlvr', config) - - if args.distributed: - num_tasks = utils.get_world_size() - global_rank = utils.get_rank() - samplers = create_sampler(datasets, [True,False,False], num_tasks, global_rank) - else: - samplers = [None, None, None] - - batch_size=[config['batch_size_train'],config['batch_size_test'],config['batch_size_test']] - train_loader, val_loader, test_loader = create_loader(datasets,samplers,batch_size=batch_size, - num_workers=[4,4,4],is_trains=[True,False,False], - collate_fns=[None,None,None]) - - #### Model #### - print("Creating model") - model = blip_nlvr(pretrained=config['pretrained'], image_size=config['image_size'], - vit=config['vit'], vit_grad_ckpt=config['vit_grad_ckpt'], vit_ckpt_layer=config['vit_ckpt_layer']) - - model = model.to(device) - - model_without_ddp = model - if args.distributed: - model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) - model_without_ddp = model.module - - optimizer = torch.optim.AdamW(params=model.parameters(), lr=config['init_lr'], weight_decay=config['weight_decay']) - - print("Start training") - start_time = time.time() - best = 0 - best_epoch = 0 - - for epoch in range(0, config['max_epoch']): - if not args.evaluate: - if args.distributed: - train_loader.sampler.set_epoch(epoch) - - cosine_lr_schedule(optimizer, epoch, config['max_epoch'], config['init_lr'], config['min_lr']) - - train_stats = train(model, train_loader, optimizer, epoch, device, config) - - val_stats = evaluate(model, val_loader, device, config) - test_stats = evaluate(model, test_loader, device, config) - - if utils.is_main_process(): - if args.evaluate: - log_stats = {**{f'val_{k}': v for k, v in val_stats.items()}, - **{f'test_{k}': v for k, v in test_stats.items()}, - } - with open(os.path.join(args.output_dir, "log.txt"),"a") as f: - f.write(json.dumps(log_stats) + "\n") - - else: - log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, - **{f'val_{k}': v for k, v in val_stats.items()}, - **{f'test_{k}': v for k, v in test_stats.items()}, - 'epoch': epoch, - } - - if float(val_stats['acc'])>best: - save_obj = { - 'model': model_without_ddp.state_dict(), - 'optimizer': optimizer.state_dict(), - 'config': config, - 'epoch': epoch, - } - torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_best.pth')) - best = float(val_stats['acc']) - best_epoch = epoch - - with open(os.path.join(args.output_dir, "log.txt"),"a") as f: - f.write(json.dumps(log_stats) + "\n") - if args.evaluate: - break - - dist.barrier() - - if utils.is_main_process(): - with open(os.path.join(args.output_dir, "log.txt"),"a") as f: - f.write("best epoch: %d"%best_epoch) - - total_time = time.time() - start_time - total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print('Training time {}'.format(total_time_str)) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--config', default='./configs/nlvr.yaml') - parser.add_argument('--output_dir', default='output/NLVR') - parser.add_argument('--evaluate', action='store_true') - parser.add_argument('--device', default='cuda') - parser.add_argument('--seed', default=42, type=int) - parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') - parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') - parser.add_argument('--distributed', default=True, type=bool) - args = parser.parse_args() - - config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader) - - Path(args.output_dir).mkdir(parents=True, exist_ok=True) - - yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w')) - - main(args, config) \ No newline at end of file diff --git a/spaces/Samuelblue/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator/README.md b/spaces/Samuelblue/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator/README.md deleted file mode 100644 index 40ead74201214f8076e82690a205619d6026f9e4..0000000000000000000000000000000000000000 --- a/spaces/Samuelblue/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Onodofthenorth-SD PixelArt SpriteSheet Generator -emoji: 🚀 -colorFrom: gray -colorTo: purple -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ServerX/PorcoDiaz/tools/infer_batch_rvc.py b/spaces/ServerX/PorcoDiaz/tools/infer_batch_rvc.py deleted file mode 100644 index 763d17f14877a2ce35f750202e91356c1f24270f..0000000000000000000000000000000000000000 --- a/spaces/ServerX/PorcoDiaz/tools/infer_batch_rvc.py +++ /dev/null @@ -1,72 +0,0 @@ -import argparse -import os -import sys - -print("Command-line arguments:", sys.argv) - -now_dir = os.getcwd() -sys.path.append(now_dir) -import sys - -import tqdm as tq -from dotenv import load_dotenv -from scipy.io import wavfile - -from configs.config import Config -from infer.modules.vc.modules import VC - - -def arg_parse() -> tuple: - parser = argparse.ArgumentParser() - parser.add_argument("--f0up_key", type=int, default=0) - parser.add_argument("--input_path", type=str, help="input path") - parser.add_argument("--index_path", type=str, help="index path") - parser.add_argument("--f0method", type=str, default="harvest", help="harvest or pm") - parser.add_argument("--opt_path", type=str, help="opt path") - parser.add_argument("--model_name", type=str, help="store in assets/weight_root") - parser.add_argument("--index_rate", type=float, default=0.66, help="index rate") - parser.add_argument("--device", type=str, help="device") - parser.add_argument("--is_half", type=bool, help="use half -> True") - parser.add_argument("--filter_radius", type=int, default=3, help="filter radius") - parser.add_argument("--resample_sr", type=int, default=0, help="resample sr") - parser.add_argument("--rms_mix_rate", type=float, default=1, help="rms mix rate") - parser.add_argument("--protect", type=float, default=0.33, help="protect") - - args = parser.parse_args() - sys.argv = sys.argv[:1] - - return args - - -def main(): - load_dotenv() - args = arg_parse() - config = Config() - config.device = args.device if args.device else config.device - config.is_half = args.is_half if args.is_half else config.is_half - vc = VC(config) - vc.get_vc(args.model_name) - audios = os.listdir(args.input_path) - for file in tq.tqdm(audios): - if file.endswith(".wav"): - file_path = os.path.join(args.input_path, file) - _, wav_opt = vc.vc_single( - 0, - file_path, - args.f0up_key, - None, - args.f0method, - args.index_path, - None, - args.index_rate, - args.filter_radius, - args.resample_sr, - args.rms_mix_rate, - args.protect, - ) - out_path = os.path.join(args.opt_path, file) - wavfile.write(out_path, wav_opt[0], wav_opt[1]) - - -if __name__ == "__main__": - main() diff --git a/spaces/Shad0ws/Ask-Questions-to-Data/app.py b/spaces/Shad0ws/Ask-Questions-to-Data/app.py deleted file mode 100644 index 2a9fe4256953adc04a061eb7cb52d5d241a60bd5..0000000000000000000000000000000000000000 --- a/spaces/Shad0ws/Ask-Questions-to-Data/app.py +++ /dev/null @@ -1,102 +0,0 @@ -import streamlit as st -import pandas as pd -import asyncio -import random - -loop = asyncio.new_event_loop() -asyncio.set_event_loop(loop) - -import sketch -import streamlit.components.v1 as components -from IPython.display import HTML, display -import uuid -import base64 -import json - -st.title("Ask Questions to Data") -st.markdown("##### Demo Application powered by sketch package") -st.sidebar.image("https://avatars.githubusercontent.com/u/106505054?s=200&v=4", width=100) -st.sidebar.title("About the Package used") -st.sidebar.markdown("##### Sketch is an AI code-writing assistant for pandas users that understands the context of the data, greatly improving the relevance of suggestions. Sketch is usable in seconds and doesn't require adding a plugin to IDE.") - -st.sidebar.title("How it works:") -st.sidebar.markdown("##### Sketch uses efficient approximation algorithms (data sketches) to quickly summarize the data, and feed that information into language models. Right now, it does this by summarizing the columns and writing these summary statistics as additional context to be used by the code-writing prompt. In the future, the dev team hopes to feed these sketches directly into custom made data + language foundation models to get more accurate results.") - -st.sidebar.title("Usecases:") -st.sidebar.markdown("##### Data Catalogging: General tagging (eg. PII identification), Metadata generation (names and descriptions)") -st.sidebar.markdown("##### Data Engineering: Data cleaning and masking (compliance), Derived feature creation and extraction") -st.sidebar.markdown("##### Data Analysis: Data questions, Data Visualizations") - -st.sidebar.caption("Github Repository: https://github.com/approximatelabs/sketch") - - - - - -def upload_data_file(): - st.session_state.file = None - st.session_state.df = None - file = st.file_uploader( - label='Upload Data File', - type=["csv","xlsx","xls"] - ) - if file is not None: - load_data(file) - - -def load_data(file): - st.session_state.file = file - df = pd.read_csv(file) - st.session_state.df = df - - -# Configure session state -if 'file' not in st.session_state: - st.session_state.file = None -if 'df' not in st.session_state: - st.session_state.df = None - - -if st.session_state.file is None: - upload_data_file() - - -def to_b64(data): - return base64.b64encode(json.dumps(data).encode("utf-8")).decode("utf-8") - -if st.session_state.file is not None: - st.session_state.file.seek(0) - - df = pd.read_csv(st.session_state.file) - - st.header("Uploaded Data:") - st.dataframe(df) - - with st.form("my_form"): - request_type = st.radio( - label="Selection Panel", - options=['Ask question about the data', 'Generate codes for new analysis'], - index=0 - ) - - request = st.text_area( - label="Input your request", - value="", - height=50, - max_chars=500 - ) - - submitted = st.form_submit_button("Submit") - - if submitted: - if request_type== 'Ask question about the data': - if request != "": - answer = df.sketch.ask(request, call_display=False) - st.code(answer) - else: - if request != "": - answer1 = df.sketch.howto(request, call_display=False) - st.code(answer1) - -else: - st.write('Please upload data file in order to ask questions to it.') \ No newline at end of file diff --git a/spaces/Silentlin/DiffSinger/docs/README-TTS.md b/spaces/Silentlin/DiffSinger/docs/README-TTS.md deleted file mode 100644 index 41602ea5c70945ddb133d18a9ab504ac2eb3f592..0000000000000000000000000000000000000000 --- a/spaces/Silentlin/DiffSinger/docs/README-TTS.md +++ /dev/null @@ -1,69 +0,0 @@ -# DiffSinger: Singing Voice Synthesis via Shallow Diffusion Mechanism -[![arXiv](https://img.shields.io/badge/arXiv-Paper-.svg)](https://arxiv.org/abs/2105.02446) -[![GitHub Stars](https://img.shields.io/github/stars/MoonInTheRiver/DiffSinger?style=social)](https://github.com/MoonInTheRiver/DiffSinger) -[![downloads](https://img.shields.io/github/downloads/MoonInTheRiver/DiffSinger/total.svg)](https://github.com/MoonInTheRiver/DiffSinger/releases) - | [Interactive🤗 TTS](https://huggingface.co/spaces/NATSpeech/DiffSpeech) - -## DiffSpeech (TTS) -### 1. Preparation - -#### Data Preparation -a) Download and extract the [LJ Speech dataset](https://keithito.com/LJ-Speech-Dataset/), then create a link to the dataset folder: `ln -s /xxx/LJSpeech-1.1/ data/raw/` - -b) Download and Unzip the [ground-truth duration](https://github.com/MoonInTheRiver/DiffSinger/releases/download/pretrain-model/mfa_outputs.tar) extracted by [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner/releases/download/v1.0.1/montreal-forced-aligner_linux.tar.gz): `tar -xvf mfa_outputs.tar; mv mfa_outputs data/processed/ljspeech/` - -c) Run the following scripts to pack the dataset for training/inference. - -```sh -export PYTHONPATH=. -CUDA_VISIBLE_DEVICES=0 python data_gen/tts/bin/binarize.py --config configs/tts/lj/fs2.yaml - -# `data/binary/ljspeech` will be generated. -``` - -#### Vocoder Preparation -We provide the pre-trained model of [HifiGAN](https://github.com/MoonInTheRiver/DiffSinger/releases/download/pretrain-model/0414_hifi_lj_1.zip) vocoder. -Please unzip this file into `checkpoints` before training your acoustic model. - -### 2. Training Example - -First, you need a pre-trained FastSpeech2 checkpoint. You can use the [pre-trained model](https://github.com/MoonInTheRiver/DiffSinger/releases/download/pretrain-model/fs2_lj_1.zip), or train FastSpeech2 from scratch, run: -```sh -CUDA_VISIBLE_DEVICES=0 python tasks/run.py --config configs/tts/lj/fs2.yaml --exp_name fs2_lj_1 --reset -``` -Then, to train DiffSpeech, run: -```sh -CUDA_VISIBLE_DEVICES=0 python tasks/run.py --config usr/configs/lj_ds_beta6.yaml --exp_name lj_ds_beta6_1213 --reset -``` - -Remember to adjust the "fs2_ckpt" parameter in `usr/configs/lj_ds_beta6.yaml` to fit your path. - -### 3. Inference Example - -```sh -CUDA_VISIBLE_DEVICES=0 python tasks/run.py --config usr/configs/lj_ds_beta6.yaml --exp_name lj_ds_beta6_1213 --reset --infer -``` - -We also provide: - - the pre-trained model of [DiffSpeech](https://github.com/MoonInTheRiver/DiffSinger/releases/download/pretrain-model/lj_ds_beta6_1213.zip); - - the individual pre-trained model of [FastSpeech 2](https://github.com/MoonInTheRiver/DiffSinger/releases/download/pretrain-model/fs2_lj_1.zip) for the shallow diffusion mechanism in DiffSpeech; - -Remember to put the pre-trained models in `checkpoints` directory. - -## Mel Visualization -Along vertical axis, DiffSpeech: [0-80]; FastSpeech2: [80-160]. - -
    - Article with HTML formatting
    -

    How to Crack Zwcad 2011 English Professional 11

    -

    Zwcad is a popular and powerful CAD (computer-aided design) software that allows you to create, edit, and view 2D and 3D drawings. It is compatible with Windows, macOS, Linux, and Android operating systems. It has many features and functions that make it a great choice for engineers, architects, designers, and students.

    -

    Crack Zwcad 2011 English Professional 11


    Download File ★★★★★ https://byltly.com/2uKzdE



    -

    However, Zwcad is not a free software. You need to buy a license to use it without any limitations or restrictions. The license can cost from $799 to $1499 depending on the version and edition you choose. That's quite expensive for many people who want to use this software for personal or professional purposes.

    -

    That's why some people look for ways to crack Zwcad 2011 English Professional 11. Cracking is a process of modifying or bypassing the security mechanisms of a software to make it work without a license or activation. By cracking Zwcad 2011 English Professional 11, you can use it for free without paying anything.

    -

    But is cracking Zwcad 2011 English Professional 11 a good idea? What are the benefits and risks of doing so? How can you crack Zwcad 2011 English Professional 11 safely and effectively? And how can you use Zwcad 2011 English Professional 11 after cracking it?

    -

    In this article, we will answer all these questions and more. We will guide you through the process of cracking Zwcad 2011 English Professional 11 step by step. We will also give you some tips and tricks to use Zwcad 2011 English Professional 11 after cracking it. But before we do that, let's first understand what Zwcad 2011 English Professional 11 is and how it compares with other CAD software.

    -

    -

    What is Zwcad 2011 English Professional 11?

    -

    Zwcad 2011 English Professional 11 is the latest version of Zwcad, a CAD software developed by ZWSOFT, a Chinese company that specializes in CAD/CAM solutions. Zwcad 2011 English Professional 11 was released in September 2020 and has many new features and improvements over the previous versions.

    -

    Some of the main features of Zwcad 2011 English Professional 11 are:

    -
      -
    • It supports DWG and DXF file formats, which are widely used in the CAD industry. You can open, edit, and save files created by other CAD software such as AutoCAD, BricsCAD, DraftSight, etc.
    • -
    • It has a user-friendly interface that is similar to AutoCAD. You can easily switch between classic and ribbon modes, customize toolbars and menus, and use keyboard shortcuts and commands.
    • -
    • It has a powerful drawing engine that can handle large and complex drawings with high speed and accuracy. You can zoom, pan, rotate, and snap objects with ease.
    • -
    • It has a rich set of drawing tools and editing functions that can help you create 2D and 3D drawings. You can draw lines, arcs, circles, polylines, splines, hatches, blocks, dimensions, texts, etc. You can also modify objects by using commands such as trim, extend, fillet, chamfer, offset, mirror, array, etc.
    • -
    • It has a comprehensive set of annotation tools that can help you add dimensions, leaders, tolerances, tables, symbols, etc. to your drawings. You can also use styles, layers, blocks, xrefs, etc. to organize your drawings.
    • -
    • It has a built-in PDF converter that can help you export your drawings to PDF files. You can also import PDF files as underlays or vector objects.
    • -
    • It has a smart voice system that can help you input commands by voice recognition. You can also use voice notes to record and play audio messages in your drawings.
    • -
    • It has a cloud service that can help you store and share your drawings online. You can also access your drawings from any device by using the ZWCAD mobile app.
    • -
    -

    Zwcad 2011 English Professional 11 is a versatile and reliable CAD software that can meet the needs of various industries and applications. Whether you are working on architecture, engineering, construction, manufacturing, design, or education projects, you can find Zwcad 2011 English Professional 11 useful and efficient.

    -

    But how does Zwcad 2011 English Professional 11 compare with other CAD software? Let's take a look at some of the advantages and disadvantages of Zwcad 2011 English Professional 11 over its competitors.

    -

    The advantages of Zwcad 2011 English Professional 11 over other CAD software are:

    -
      -
    • It is cheaper than other CAD software. The license fee for Zwcad 2011 English Professional 11 is only $799 for the standard edition and $1499 for the professional edition. By contrast, the license fee for AutoCAD is $1695 per year and for BricsCAD is $990 per year.
    • -
    • It is compatible with other CAD software. You can easily exchange files and data with other CAD software by using the DWG and DXF file formats. You can also use the same commands and shortcuts as AutoCAD.
    • -
    • It is stable and fast. You can work on large and complex drawings without worrying about crashes or lags. You can also enjoy smooth performance and high quality output.
    • -
    • It is easy to use and learn. You can quickly get started with Zwcad 2011 English Professional 11 by using the familiar interface and functions. You can also find plenty of tutorials and resources online to help you master the software.
    • -
    -

    The disadvantages of Zwcad 2011 English Professional 11 over other CAD software are:

    -
      -
    • It is less popular than other CAD software. You may find fewer users and communities that use Zwcad 2011 English Professional 11 than AutoCAD or BricsCAD. You may also find fewer plugins and extensions that support Zwcad 2011 English Professional 11 than other CAD software.It is less advanced than other CAD software. You may find some features and functions that are missing or inferior in Zwcad 2011 English Professional 11 than in AutoCAD or BricsCAD. For example, Zwcad 2011 English Professional 11 does not support 3D modeling, rendering, or animation. It also has fewer customization and automation options than other CAD software.
    • -
    • It is less secure than other CAD software. You may face more risks and challenges when you crack Zwcad 2011 English Professional 11 than when you crack AutoCAD or BricsCAD. You may encounter more errors, bugs, or viruses that can affect your device or data. You may also face more legal consequences if you are caught using the cracked version of the software.
    • -
    -

    As you can see, Zwcad 2011 English Professional 11 has its pros and cons over other CAD software. You need to weigh them carefully before you decide to use it or not. But if you have already made up your mind to use Zwcad 2011 English Professional 11, you may wonder how to get it for free. That's where cracking comes in.

    -

    Why do you need to crack Zwcad 2011 English Professional 11?

    -

    As we mentioned earlier, Zwcad 2011 English Professional 11 is not a free software. You need to buy a license to use it without any limitations or restrictions. The license can cost from $799 to $1499 depending on the version and edition you choose. That's quite expensive for many people who want to use this software for personal or professional purposes.

    -

    That's why some people look for ways to crack Zwcad 2011 English Professional 11. Cracking is a process of modifying or bypassing the security mechanisms of a software to make it work without a license or activation. By cracking Zwcad 2011 English Professional 11, you can use it for free without paying anything.

    -

    But what are the benefits of cracking Zwcad 2011 English Professional 11? And what are the risks and challenges of cracking Zwcad 2011 English Professional 11? Let's find out.

    -

    The benefits of cracking Zwcad 2011 English Professional 11 are:

    -
      -
    • You can save money. By cracking Zwcad 2011 English Professional 11, you don't have to spend hundreds or thousands of dollars on buying the license. You can use that money for other things that are more important or valuable to you.
    • -
    • You can access all the features and functions of the software. By cracking Zwcad 2011 English Professional 11, you don't have to deal with any limitations or restrictions that come with the trial version or the unlicensed version of the software. You can use all the tools and options that are available in the software without any problems.
    • -
    • You can update the software regularly. By cracking Zwcad 2011 English Professional 11, you don't have to worry about missing any updates or patches that are released by the developer. You can download and install them as soon as they are available and enjoy the latest improvements and fixes.
    • -
    -

    The risks and challenges of cracking Zwcad 2011 English Professional 11 are:

    -
      -
    • You can damage your device or data. By cracking Zwcad 2011 English Professional 11, you may expose your device to malware, viruses, or hackers that can harm your device or data. You may lose your files, corrupt your system, or leak your personal information.
    • -
    • You can face legal consequences. By cracking Zwcad 2011 English Professional 11, you may violate the terms and conditions of the software license and infringe the intellectual property rights of the developer. You may face lawsuits, fines, or even jail time if you are caught using the cracked version of the software.
    • -
    • You can lose support and service. By cracking Zwcad 2011 English Professional 11, you may lose access to the official website, the user manual, the online forum, or the customer service of the developer. You may not be able to get help or support if you encounter any issues or problems with the software.
    • -
    -

    As you can see, cracking Zwcad 2011 English Professional 11 has its advantages and disadvantages. You need to consider them carefully before you decide to do it or not. But if you have already decided to crack Zwcad 2011 English Professional 11, you may wonder how to do it safely and effectively. That's what we will show you next.

    -

    How to download Zwcad 2011 English Professional 11?

    -

    Before you can crack Zwcad 2011 English Professional 11, you need to download it first. There are two ways to download Zwcad 2011 English Professional 11: from the official website or from torrent sites.

    -

    How to download Zwcad 2011 English Professional 11 from the official website?

    -

    The official website of Zwcad is https://www.zwsoft.com/zwcad. You can visit this website and find the download link for Zwcad 2011 English Professional 11. You can choose the version and edition that suits your device and operating system. You can also choose the language that you prefer.

    -

    After you click the download link, you will be asked to fill in a form with your name, email, phone number, country, and industry. This is required to get a free trial of Zwcad 2011 English Professional 11 for 30 days. You can also opt-in to receive newsletters and updates from the developer.

    -

    After you submit the form, you will receive an email with the download link and the activation code for Zwcad 2011 English Professional 11. You can click the link and follow the instructions to download and install the software on your device. You can also enter the activation code to activate the trial version of the software.

    -

    The trial version of Zwcad 2011 English Professional 11 will allow you to use all the features and functions of the software for 30 days. After that, you will need to buy a license or crack the software to continue using it.

    -

    How to download Zwcad 2011 English Professional 11 from torrent sites?

    -

    Torrent sites are websites that allow users to share and download files using peer-to-peer (P2P) networks. You can use torrent sites to download Zwcad 2011 English Professional 11 for free without filling in any forms or getting any activation codes. However, you need to be careful when using torrent sites as they may contain malware, viruses, or fake files.

    -

    Some of the best torrent sites for downloading Zwcad 2011 English Professional 11 are:

    -
      -
    • The Pirate Bay: https://thepiratebay.org/
    • -
    • RARBG: https://rarbg.to/
    • -
    • 1337x: https://1337x.to/
    • -
    • LimeTorrents: https://www.limetorrents.info/
    • -
    • Torrentz2: https://torrentz2.eu/
    • -
    -

    To use these torrent sites, you need to have a torrent client installed on your device. A torrent client is a software that allows you to download and upload files using P2P networks. Some of the best torrent clients are:

    -
      -
    • uTorrent: https://www.utorrent.com/
    • -
    • BitTorrent: https://www.bittorrent.com/
    • -
    • qBittorrent: https://www.qbittorrent.org/
    • -
    • Vuze: https://www.vuze.com/
    • -
    • Deluge: https://deluge-torrent.org/
    • -
    -

    After you have installed a torrent client on your device, you can visit any of the torrent sites and search for Zwcad 2011 English Professional 11. You will see a list of results with different file sizes, seeders, leechers, and comments. You can choose the one that has the most seeders, leechers, and positive comments.

    -

    After you click on the result, you will see a magnet link or a torrent file that you can use to download Zwcad 2011 English Professional 11. You can copy the magnet link or download the torrent file and open it with your torrent client. Your torrent client will start downloading Zwcad 2011 English Professional 11 from other users who have it on their devices.

    -

    The download speed and time will depend on your internet connection and the number of seeders and leechers. The more seeders and leechers there are, the faster and easier it will be to download Zwcad 2011 English Professional 11. After the download is complete, you can find Zwcad 2011 English Professional 11 in your device's folder.

    -

    How to crack Zwcad 2011 English Professional 11?

    -

    Now that you have downloaded Zwcad 2011 English Professional 11, you need to crack it to use it without a license or activation. Cracking Zwcad 2011 English Professional 11 is not a simple or easy task. You need to have some skills and knowledge in software cracking, as well as some tools and resources to help you. Here are the steps and tools for cracking Zwcad 2011 English Professional 11:

    -

    The steps for cracking Zwcad 2011 English Professional 11 are:

    -
      -
    1. Install Zwcad 2011 English Professional 11 on your device. You can use the setup file that you downloaded from the official website or the torrent site. Follow the instructions to install the software on your device.
    2. -
    3. Disconnect your device from the internet. You need to do this to prevent the software from connecting to the server and verifying your license or activation. You can turn off your Wi-Fi, unplug your Ethernet cable, or disable your network adapter.
    4. -
    5. Run Zwcad 2011 English Professional 11 on your device. You will see a window that asks you to activate the software. You can choose to activate it online, offline, or later. Choose to activate it later and close the window.
    6. -
    7. Open the folder where you installed Zwcad 2011 English Professional 11 on your device. You will see a file named "zwlm.exe". This is the file that controls the license and activation of the software. You need to replace this file with a cracked version of it.
    8. -
    9. Download a cracked version of "zwlm.exe" from a reliable source. You can use a search engine or a torrent site to find a cracked version of "zwlm.exe" that matches the version and edition of Zwcad 2011 English Professional 11 that you have installed on your device. Make sure that the file is safe and clean from malware, viruses, or fake files.
    10. -
    11. Copy and paste the cracked version of "zwlm.exe" into the folder where you installed Zwcad 2011 English Professional 11 on your device. You will be asked to overwrite or replace the original file. Choose to overwrite or replace it.
    12. -
    13. Run Zwcad 2011 English Professional 11 on your device again. You will see a window that asks you to activate the software again. You can choose to activate it online, offline, or later. Choose to activate it offline and click "Next".
    14. -
    15. You will see a window that shows you a serial number and a request code. You need to use these codes to generate an activation code for Zwcad 2011 English Professional 11.
    16. -
    17. Download an activation code generator for Zwcad 2011 English Professional 11 from a reliable source. You can use a search engine or a torrent site to find an activation code generator for Zwcad 2011 English Professional 11 that matches the version and edition of Zwcad 2011 English Professional 11 that you have installed on your device. Make sure that the file is safe and clean from malware, viruses, or fake files.
    18. -
    19. Run the activation code generator for Zwcad 2011 English Professional 11 on your device. You will see a window that asks you to enter the serial number and the request code that you got from Zwcad 2011 English Professional 11. Enter them and click "Generate".
    20. -
    21. You will see a window that shows you an activation code for Zwcad 2011 English Professional 11. Copy this code and paste it into the window of Zwcad 2011 English Professional 11 that asks you for an activation code. Click "Next".
    22. -
    23. You will see a window that confirms that you have successfully activated Zwcad 2011 English Professional 11. You can now use the software without any limitations or restrictions. Click "Finish" to close the window.
    24. -
    -

    Congratulations! You have successfully cracked Zwcad 2011 English Professional 11. You can now enjoy all the features and functions of the software for free. But how can you use Zwcad 2011 English Professional 11 after cracking it? Let's find out.

    -

    How to use Zwcad 2011 English Professional 11 after cracking it?

    -

    After cracking Zwcad 2011 English Professional 11, you can use it as you would normally use any CAD software. You can create, edit, and view 2D and 3D drawings with ease and efficiency. You can also export, import, and share your drawings with other users or applications.

    -

    However, there are some tips and tricks that you should keep in mind when using Zwcad 2011 English Professional 11 after cracking it. These tips and tricks will help you avoid errors, problems, or issues that may arise from using the cracked version of the software. Here are some of them:

    -

    The tips and tricks for using Zwcad 2011 English Professional 11 after cracking it are:

    -
      -
    • Do not update the software. Updating the software may overwrite or delete the cracked files and cause the software to stop working or ask for a license or activation again. If you see any prompts or notifications to update the software, ignore them or disable them.
    • -
    • Do not connect to the internet. Connecting to the internet may allow the software to communicate with the server and detect that you are using a cracked version of the software. This may result in errors, warnings, or blocks that may prevent you from using the software. If you need to connect to the internet for some reason, make sure that you block the software from accessing the internet by using a firewall or a VPN.
    • -
    • Do not register or sign in to the software. Registering or signing in to the software may require you to provide your personal information or verify your license or activation. This may expose your identity or reveal that you are using a cracked version of the software. If you see any prompts or notifications to register or sign in to the software, ignore them or disable them.
    • -
    • Do not contact the developer or customer service. Contacting the developer or customer service may require you to provide your serial number, request code, activation code, or other information that may indicate that you are using a cracked version of the software. This may result in legal consequences or loss of support and service. If you need help or support for Zwcad 2011 English Professional 11, use other sources such as online forums, blogs, videos, etc.
    • -
    • Do not share your cracked files or codes with others. Sharing your cracked files or codes with others may put them at risk of malware, viruses, or hackers. It may also increase the chances of being caught by the developer or authorities. If you want to share Zwcad 2011 English Professional 11 with others, share the original files or codes that you downloaded from the official website or torrent sites.
    • -
    -

    By following these tips and tricks, you can use Zwcad 2011 English Professional 11 after cracking it without any major problems or issues. However, you should always be careful and cautious when using any cracked software as they may still pose some risks and challenges that are beyond your control.

    -

    Conclusion

    -

    Zwcad 2011 English Professional 11 is a popular and powerful CAD software that allows you to create, edit, and view 2D and 3D drawings. It has many features and functions that make it a great choice for various industries and applications.

    -

    However, Zwcad 2011 English Professional 11 is not a free software. You need to buy a license to use it without any limitations or restrictions. The license can cost from $799 to $1499 depending on the version and edition you choose.

    -

    That's why some people look for ways to crack Zwcad 2011 English Professional 11. Cracking is a process of modifying or bypassing the security mechanisms of a software to make it work without a license or activation. By cracking Zwcad 2011 English Professional 11, you can use it for free without paying anything.

    -

    But cracking Zwcad 2011 English Professional 11 is not a simple or easy task. You need to have some skills and knowledge in software cracking, as well as some tools and resources to help you. You also need to consider the benefits and risks of cracking Zwcad 2011 English Professional 11 before doing it. You also need to follow some tips and tricks to use Zwcad 2011 English Professional 11 after cracking it.

    -

    In this article, we have shown you how to crack Zwcad 2011 English Professional 11 step by step. We have also given you some tips and tricks to use Zwcad 2011 English Professional 11 after cracking it. We hope that this article has been helpful and informative for you.

    -

    However, we do not recommend or endorse cracking Zwcad 2011 English Professional 11 or any other software. Cracking is illegal and unethical and may cause more harm than good. You may damage your device or data, face legal consequences, or lose support and service. You may also miss out on the benefits and features of the official version of the software.

    -

    Therefore, we suggest that you buy the official license of Zwcad 2011 English Professional 11 from the developer or use a free or open-source CAD software instead. This way, you can use the software legally and safely and enjoy its full potential and value.

    -

    If you have any questions or comments about this article or Zwcad 2011 English Professional 11, feel free to leave them below. We would love to hear from you and help you out. Thank you for reading and have a great day!

    -

    FAQs

    -
      -
    • Q: Is cracking Zwcad 2011 English Professional 11 legal?
    • -
    • A: No, cracking Zwcad 2011 English Professional 11 is illegal and violates the terms and conditions of the software license.
    • -
    • Q: Is cracking Zwcad 2011 English Professional 11 safe?
    • -
    • A: No, cracking Zwcad 2011 English Professional 11 is not safe and may expose your device to malware, viruses, or hackers.
    • -
    • Q: Is cracking Zwcad 2011 English Professional 11 worth it?
    • -
    • A: No, cracking Zwcad 2011 English Professional 11 is not worth it and may cause more harm than good. You may lose your data, damage your device, or face legal consequences.
    • -
    • Q: What are the alternatives to cracking Zwcad 2011 English Professional 11?
    • -
    • A: The alternatives to cracking Zwcad 2011 English Professional 11 are buying the official license, using a free or open-source CAD software, or using an online CAD service.
    • -
    • Q: Where can I get help or support for Zwcad 2011 English Professional 11?
    • -
    • A: You can get help or support for Zwcad 2011 English Professional 11 from the official website, the user manual, the online forum, or the customer service.
    • -

    b2dd77e56b
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/2pac Still I Rise Zip LINK Download.md b/spaces/1gistliPinn/ChatGPT4/Examples/2pac Still I Rise Zip LINK Download.md deleted file mode 100644 index 1479562520737306d6fb5e2b3c42f0f69f5d6cd4..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/2pac Still I Rise Zip LINK Download.md +++ /dev/null @@ -1,126 +0,0 @@ -
    -

    2pac still i rise zip download: The Ultimate Guide to Downloading and Enjoying the Classic Album by 2Pac & Outlawz

    - -

    Are you a fan of 2Pac, the legendary rapper who changed the game of hip hop with his music and message? Do you love Outlawz, the rap group that he formed with his loyal friends and collaborators? If so, you must have heard of their album Still I Rise, one of the best albums in rap history.

    - -

    Still I Rise is the third and final studio album by 2Pac and Outlawz, released in 1999 after 2Pac's death in 1996. It features 15 tracks that showcase the rap skills, political views, and personal struggles of both artists. The album was a huge success, reaching number one on the Billboard 200 and selling over three million copies in the US.

    -

    2pac still i rise zip download


    Download ✒ ✒ ✒ https://imgfil.com/2uxZm5



    - -

    But how can you listen to this masterpiece today? One of the easiest and most convenient ways is to download the zip file of Still I Rise from the internet. A zip file is a compressed file that contains multiple files or folders in one. By downloading the zip file of Still I Rise, you can get access to all the songs in the album in one go.

    - -

    How to Find the Zip File of Still I Rise

    - -

    There are many websites that offer free downloads of Still I Rise zip file. However, not all of them are reliable or safe. Some may contain viruses, malware, or spyware that can harm your computer or device. Others may have broken links, low-quality audio, or incomplete files.

    - -

    To avoid these problems, you should look for reputable and trustworthy websites that provide high-quality and virus-free downloads of Still I Rise zip file. Here are some of the best ones that we recommend:

    - -
      -
    • Internet Archive: This is a non-profit digital library that offers free access to millions of books, movies, music, and more. You can find the zip file of Still I Rise by 2Pac & Outlawz here, along with other albums by 2Pac and other hip hop artists. You can also stream the songs online or download them individually.
    • -
    • EastNaija: This is a website that provides free downloads of African music, videos, and news. You can find the zip file of Still I Rise by 2Pac & Outlawz here, along with other albums by 2Pac and other rap legends. You can also download each song separately or listen to them online.
    • -
    • Archive.org (retail version): This is another link from the Internet Archive that offers the retail version of Still I Rise by 2Pac & Outlawz. This version has some minor differences from the original version, such as different track order and mixing. You can download the zip file here or stream the songs online.
    • -
    - -

    How to Download and Extract the Zip File of Still I Rise

    - -

    Once you have found a reliable website that offers the zip file of Still I Rise by 2Pac & Outlawz, you can follow these simple steps to download and extract it:

    - -
      -
    1. Click on the download link or button on the website.
    2. -
    3. Choose a location on your computer or device where you want to save the zip file.
    4. -
    5. Wait for the download to finish.
    6. -
    7. Locate the zip file on your computer or device and right-click on it.
    8. -
    9. Select "Extract All" or "Unzip" from the menu.
    10. -
    11. Choose a destination folder where you want to extract the files.
    12. -
    13. Wait for the extraction to finish.
    14. -
    15. Open the destination folder and enjoy listening to Still I Rise by 2Pac & Outlawz!
    16. -
    - -

    Why You Should Listen to Still I Rise by 2Pac & Outlawz

    - -

    Still I Rise by 2Pac & Outlawz is more than just an album. It is a testament to the legacy and impact of 2Pac, who was killed in 1996 at the age of 25. It is also a tribute to his friendship and collaboration with Outlawz, who continued his vision and mission after his death.

    - -

    The album features songs that cover various topics, such as social injustice, racism, violence, poverty, spirituality, love, loyalty, and hope. The songs are powerful, emotional, inspiring, and catchy. The lyrics are raw, honest, poetic, and provocative. The beats are hard-hitting, melodic, and diverse.

    - -

    Still I Rise by 2Pac & Outlawz is an album that every hip hop fan should listen to at least once in their lifetime. It is an album that showcases the talent, passion, and soul of two legendary rap groups. It is an album that will make you think, feel, and rise above your challenges.

    - -

    Conclusion

    - -

    If you are looking for a way to enjoy Still I Rise by 2Pac & Outlawz today, you can easily download the zip file of this album from various websites on the internet. However, you should be careful about choosing a reliable and safe website that offers high-quality and virus-free downloads. You can also extract the zip file easily using your computer or device's built-in software or a third-party tool.

    -

    - -

    Still I Rise by 2Pac & Outlawz is an album that deserves your attention and appreciation. It is an album that will make you appreciate the artistry and legacy of 2Pac and Outlawz. It is an album that will make you love hip hop music even more.

    - -

    So what are you waiting for? Download Still I Rise by 2Pac & Outlawz today and enjoy this masterpiece!

    -

    How to Listen to Still I Rise by 2Pac & Outlawz

    - -

    After you have downloaded and extracted the zip file of Still I Rise by 2Pac & Outlawz, you can listen to the album in different ways. Here are some of the options that you have:

    - -
      -
    • Play the songs on your computer or device using your preferred media player. You can create a playlist of the songs or shuffle them randomly. You can also adjust the volume, bass, treble, and other settings to enhance your listening experience.
    • -
    • Transfer the songs to your smartphone, tablet, iPod, or other portable devices using a USB cable or Bluetooth. You can then listen to the songs on the go using your headphones or speakers. You can also use apps like Spotify, Apple Music, or Google Play Music to sync the songs to your device.
    • -
    • Burn the songs to a CD or DVD using a software like Nero, Roxio, or Windows Media Player. You can then play the CD or DVD on your home stereo system, car stereo system, or any other device that supports CD or DVD playback. You can also make copies of the CD or DVD and share them with your friends or family.
    • -
    - -

    What You Can Learn from Still I Rise by 2Pac & Outlawz

    - -

    Still I Rise by 2Pac & Outlawz is not just an album that you can listen to for entertainment. It is also an album that you can learn from for inspiration. Here are some of the things that you can learn from this album:

    - -
      -
    • You can learn about the history and culture of hip hop music and how it evolved over time. You can learn about the origins, influences, styles, and trends of hip hop music and how it reflects the social and political issues of different eras.
    • -
    • You can learn about the life and legacy of 2Pac and how he became one of the most influential and respected rappers of all time. You can learn about his childhood, upbringing, education, career, achievements, controversies, death, and impact. You can also learn about his philosophy, vision, and message that he conveyed through his music.
    • -
    • You can learn about Outlawz and how they carried on 2Pac's legacy after his death. You can learn about their background, formation, members, albums, songs, collaborations, challenges, and achievements. You can also learn about their loyalty, friendship, and dedication to 2Pac and his mission.
    • -
    - -

    Conclusion

    - -

    Still I Rise by 2Pac & Outlawz is an album that you should not miss if you are a fan of hip hop music. It is an album that will give you a glimpse into the genius and greatness of 2Pac and Outlawz. It is an album that will make you appreciate their music and message even more.

    - -

    If you want to listen to this album today, you can easily download the zip file of Still I Rise from various websites on the internet. You can also extract the zip file easily using your computer or device's built-in software or a third-party tool. You can then listen to the album in different ways depending on your preference and convenience.

    - -

    Still I Rise by 2Pac & Outlawz is an album that will enrich your mind and soul with its powerful and inspiring songs. It is an album that will make you think, feel, and rise above your challenges.

    - -

    So what are you waiting for? Download Still I Rise by 2Pac & Outlawz today and enjoy this masterpiece!

    -

    How to Share Still I Rise by 2Pac & Outlawz with Others

    - -

    If you love Still I Rise by 2Pac & Outlawz, you may want to share it with others who may appreciate it as well. There are many ways that you can share this album with your friends, family, or other hip hop fans. Here are some of the options that you have:

    - -
      -
    • Upload the songs to your social media platforms, such as Facebook, Instagram, Twitter, or YouTube. You can also tag or mention 2Pac and Outlawz in your posts or comments. You can also use hashtags like #StillIRise, #2Pac, #Outlawz, or #HipHop to reach more people who may be interested in this album.
    • -
    • Create a blog or a website where you can write about Still I Rise by 2Pac & Outlawz and share your thoughts and opinions on the album. You can also include links to the websites where you downloaded the zip file of the album or where you can stream the songs online. You can also invite others to comment on your blog or website and engage in discussions about the album.
    • -
    • Make a podcast or a video where you can talk about Still I Rise by 2Pac & Outlawz and review the album. You can also play some of the songs or snippets from the album and analyze them. You can also invite guests or co-hosts who are also fans of 2Pac and Outlawz or who are experts on hip hop music. You can then upload your podcast or video to platforms like Spotify, Apple Podcasts, SoundCloud, YouTube, or Vimeo.
    • -
    - -

    How to Support Still I Rise by 2Pac & Outlawz

    - -

    If you want to support Still I Rise by 2Pac & Outlawz and show your appreciation for their music and message, there are many ways that you can do so. Here are some of the ways that you can support this album and its artists:

    - -
      -
    • Buy the physical copy of Still I Rise by 2Pac & Outlawz from online stores like Amazon, eBay, or Discogs. You can also buy the digital copy of the album from platforms like iTunes, Google Play Music, or Bandcamp. By buying the album, you can support the artists financially and help them continue their work.
    • -
    • Donate to charities or causes that 2Pac and Outlawz supported or were involved in. Some of these include The Tupac Amaru Shakur Foundation, The Tupac Amaru Shakur Center for the Arts, The Makaveli Branded Clothing Line, The Outlaw Foundation, The Thug Life Army Foundation, The Mutulu Shakur Family Fund, and The Malcolm X Grassroots Movement. By donating to these charities or causes, you can support the vision and mission of 2Pac and Outlawz and help them make a positive impact on the world.
    • -
    • Spread the word about Still I Rise by 2Pac & Outlawz and encourage others to listen to it. You can also write reviews or ratings for the album on platforms like Amazon, iTunes, Google Play Music, or Bandcamp. You can also join online forums or communities where you can discuss Still I Rise by 2Pac & Outlawz with other fans or hip hop enthusiasts. By spreading the word about this album, you can increase its popularity and recognition and help it reach more people.
    • -
    - -

    Conclusion

    - -

    Still I Rise by 2Pac & Outlawz is an album that you should not miss if you are a fan of hip hop music. It is an album that will give you a glimpse into the genius and greatness of 2Pac and Outlawz. It is an album that will make you appreciate their music and message even more.

    - -

    If you want to listen to this album today, you can easily download the zip file of Still I Rise from various websites on the internet. You can also extract the zip file easily using your computer or device's built-in software or a third-party tool. You can then listen to the album in different ways depending on your preference and convenience.

    - -

    If you love this album, you may also want to share it with others who may appreciate it as well. You can also support this album and its artists by buying their music, donating to their charities or causes, or spreading the word about their work.

    - -

    Still I Rise by 2Pac & Outlawz is an album that will enrich your mind and soul with its powerful and inspiring songs. It is an album that will make you think, feel, and rise above your challenges.

    - -

    So what are you waiting for? Download Still I Rise by 2Pac & Outlawz today and enjoy this masterpiece!

    -

    Conclusion

    - -

    Still I Rise by 2Pac & Outlawz is an album that you should not miss if you are a fan of hip hop music. It is an album that will give you a glimpse into the genius and greatness of 2Pac and Outlawz. It is an album that will make you appreciate their music and message even more.

    - -

    If you want to listen to this album today, you can easily download the zip file of Still I Rise from various websites on the internet. You can also extract the zip file easily using your computer or device's built-in software or a third-party tool. You can then listen to the album in different ways depending on your preference and convenience.

    - -

    If you love this album, you may also want to share it with others who may appreciate it as well. You can also support this album and its artists by buying their music, donating to their charities or causes, or spreading the word about their work.

    - -

    Still I Rise by 2Pac & Outlawz is an album that will enrich your mind and soul with its powerful and inspiring songs. It is an album that will make you think, feel, and rise above your challenges.

    - -

    So what are you waiting for? Download Still I Rise by 2Pac & Outlawz today and enjoy this masterpiece!

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/Apocalyptohollywoodmoviehindidubbinghdmp4download.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/Apocalyptohollywoodmoviehindidubbinghdmp4download.md deleted file mode 100644 index 13eb1e89fa23f40fa65325be96a9c8249d191855..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/Apocalyptohollywoodmoviehindidubbinghdmp4download.md +++ /dev/null @@ -1,66 +0,0 @@ -## apocalyptohollywoodmoviehindidubbinghdmp4download - - - - - - ![Apocalyptohollywoodmoviehindidubbinghdmp4download](https://1.bp.blogspot.com/_We7_bc6rARk/Sww1F-J3IxI/AAAAAAAAIBI/_cdn9uTjFtM/s1600/apocalypto.jpg) - - - - - -**LINK >>> [https://kneedacexbrew.blogspot.com/?d=2txjpm](https://kneedacexbrew.blogspot.com/?d=2txjpm)** - - - - - - - - - - - - I'll try to create that. Here is what I came up with: - -# Apocalypto: A Hollywood Epic in Hindi Dubbing - - - -Apocalypto is a 2006 American epic historical film directed by Benjamín. It depicts the life and struggle of a young Mayan man who escapes from a brutal sacrifice and tries to save his family and people from a ruthless invading force. The film is set in pre-Columbian Central America and features a cast of mostly indigenous actors speaking in Yucatec Maya. - - - -The film was praised for its cinematography, action sequences, and cultural authenticity, but also criticized for its violence, historical inaccuracies, and portrayal of indigenous people. Apocalypto was nominated for three Academy Awards and won three Golden Globe Awards. - - - -Apocalypto was dubbed in Hindi and released in India in 2007. The Hindi dubbing was done by professional voice actors who tried to match the tone and emotion of the original actors. The Hindi dubbing also added some dialogues and explanations to make the film more accessible and understandable for the Indian audience. - - - -If you are looking for a thrilling and adventurous film that transports you to a different time and place, you can download Apocalypto in Hindi dubbing in HD mp4 format from various online sources. However, be warned that the film contains graphic scenes of violence and gore that may not be suitable for everyone. - - - -Apocalypto is a film that will keep you on the edge of your seat and make you appreciate the beauty and diversity of human cultures. Watch it today and experience the epic journey of a hero who fights for his survival and freedom. - -I'll try to continue the article. Here is what I came up with: - -Apocalypto is not only a thrilling and adventurous film, but also a visually stunning and culturally authentic one. The film was shot entirely on location in the jungles of Mexico, using natural lighting and real sets. The film also features a cast of mostly non-professional actors who belong to various indigenous groups of Mexico and Central America, such as the Maya, the Yucatec, the Nahuatl, and the Mixtec. The actors learned to speak in Yucatec Maya, the language of the ancient Maya civilization, with the help of linguists and coaches. The film also consulted with experts and scholars on various aspects of Maya culture, such as costumes, rituals, architecture, astronomy, and art. - - - -The film received mostly positive reviews from critics and audiences alike, who praised its cinematography, action sequences, and cultural authenticity. However, some critics also pointed out some historical inaccuracies and anachronisms in the film, such as the depiction of human sacrifice, which was more common among the Aztecs than the Maya; the arrival of the Spanish conquistadors, which happened centuries after the collapse of the Maya civilization; and the portrayal of the Maya as a savage and decadent people, which ignored their achievements in science, mathematics, writing, and art. Some critics also accused Gibson of being too violent and sensationalist in his depiction of indigenous cultures. - - - -Despite these controversies, Apocalypto remains a powerful and unique film that offers a glimpse into a fascinating and mysterious world that has been largely forgotten or misunderstood by modern society. It is a film that challenges us to reflect on our own civilization and its values, as well as to appreciate the beauty and diversity of human cultures. Apocalypto is a film that deserves to be seen and experienced by anyone who loves cinema. - - dfd1c89656 - - - - - diff --git a/spaces/1phancelerku/anime-remove-background/Bar Bar Live A New Generation of Live Entertainment and Social Networking.md b/spaces/1phancelerku/anime-remove-background/Bar Bar Live A New Generation of Live Entertainment and Social Networking.md deleted file mode 100644 index 70e1b1e15fa140f74da004971472862597e1aa1e..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Bar Bar Live A New Generation of Live Entertainment and Social Networking.md +++ /dev/null @@ -1,127 +0,0 @@ -
    -

    How to Download Bar Bar Live, a Popular Live Streaming App from China

    -

    Do you love watching live streams of your favorite celebrities, influencers, or ordinary people? Do you want to chat with them, send them gifts, or even join them on screen? If you answered yes, then you should download bar bar live, a popular live streaming app from China that lets you do all that and more.

    -

    In this article, we will tell you what is bar bar live, why you should download it, how to download it, how to use it, and some tips and tricks to make the most out of it. By the end of this article, you will be ready to enjoy hours of fun and entertainment on this amazing app.

    -

    download bar bar live


    Download Zip ✓✓✓ https://jinyurl.com/2uNNN4



    -

    What is Bar Bar Live?

    -

    Bar bar live is a live streaming app that allows you to watch and interact with thousands of hosts from China and other countries. You can watch them perform various activities such as singing, dancing, gaming, cooking, traveling, or just chatting. You can also chat with them using text, voice, or video messages. You can send them virtual gifts such as flowers, hearts, or diamonds to show your appreciation. You can even join them on screen if they invite you or if you pay a certain amount of coins.

    -

    Bar bar live is not just a live streaming app. It is also a social networking app that allows you to make new friends from all over the world. You can follow your favorite hosts and other users, send them private messages, or join their fan clubs. You can also create your own profile, upload photos and videos, and share your thoughts and feelings with others.

    -

    download bar bar live app for android
    -download bar bar live apk mod
    -download bar bar live streaming no sensor
    -download bar bar live vietnam
    -download bar bar live russia
    -download bar bar live korea
    -download bar bar live thailand
    -download bar bar live indonesia
    -download bar bar live amerika
    -download beyond all reason rts game
    -download beyond all reason rts launcher
    -download beyond all reason rts windows installer
    -download beyond all reason rts linux appimage
    -download beyond all reason rts linux flatpak
    -download beyond all reason rts maps and replays
    -how to play beyond all reason rts
    -how to install beyond all reason rts
    -how to join beyond all reason rts discord
    -how to donate to beyond all reason rts
    -how to get help with beyond all reason rts
    -cara download aplikasi live bar bar
    -cara download apk live bar bar mod
    -cara download live streaming bar bar tanpa sensor
    -cara download live bar bar vietnam
    -cara download live bar bar rusia
    -cara download live bar bar korea
    -cara download live bar bar thailand
    -cara download live bar bar indonesia
    -cara download live bar bar amerika
    -link download aplikasi live bar bar terbaru 2023
    -link download apk live bar bar bebas parah 2023
    -link download live streaming bar bar no sensor 2023
    -link download live bar bar vietnam terbaru 2023
    -link download live bar bar rusia terbaru 2023
    -link download live bar bar korea terbaru 2023
    -link download live bar bar thailand terbaru 2023
    -link download live bar bar indonesia terbaru 2023
    -link download live bar bar amerika terbaru 2023

    -

    Bar bar live is one of the most popular live streaming apps in China. It has over 100 million users and more than 10 million active hosts. It is also available in other countries such as Indonesia, Thailand, Vietnam, India, and more. It supports multiple languages such as English, Chinese, Indonesian, Thai, Vietnamese, Hindi, and more.

    -

    Why You Should Download Bar Bar Live?

    -

    There are many reasons why you should download bar bar live. Here are some of them:

    -
      -
    • You can watch live streams of various topics and genres anytime and anywhere. You can find something that interests you or suits your mood.
    • -
    • You can interact with hosts and other users in real time. You can chat with them, send them gifts, or join them on screen. You can also make new friends from different backgrounds and cultures.
    • -
    • You can earn money on the app by becoming a host yourself. You can showcase your talents, attract fans, and receive gifts from them. You can also participate in various events and competitions to win prizes and rewards.
    • -
    • You can enjoy high-quality video and audio on the app. You can also adjust the resolution and volume according to your network and device.
    • -
    • You can customize your app settings and preferences. You can choose your preferred language, theme, notification, and privacy options.
    • -
    -

    As you can see, bar bar live is a live streaming app that offers you a lot of fun and benefits. You can watch, chat, gift, join, and earn on the app. You can also socialize, learn, and grow on the app. So what are you waiting for? Download bar bar live now and start your live streaming journey.

    -

    How to Download Bar Bar Live?

    -

    Downloading bar bar live is easy and fast. You can download it on your Android, iOS, or PC device. Here are the steps to download bar bar live on each device:

    -

    Download Bar Bar Live for Android

    -

    If you have an Android device, you can download bar bar live from Google Play Store or from an APK file. Here are the steps for both methods:

    -
      -
    1. From Google Play Store: Open Google Play Store on your device and search for "bar bar live". Tap on the app icon and then tap on "Install". Wait for the app to download and install on your device. Once done, you can open the app and start using it.
    2. -
    3. From APK file: If you cannot access Google Play Store or prefer to download the app from an external source, you can use an APK file. An APK file is a package file that contains the app's installation files. You can download the APK file from a trusted website such as APKPure or APKMirror. Once you have downloaded the APK file, you need to enable "Unknown Sources" on your device settings to allow installation from external sources. Then, you need to locate the APK file on your device storage and tap on it to install it. Once done, you can open the app and start using it.
    4. -
    -

    Download Bar Bar Live for iOS

    -

    If you have an iOS device, you can download bar bar live from App Store or from an IPA file. Here are the steps for both methods:

    -
      -
    1. From App Store: Open App Store on your device and search for "bar bar live". Tap on the app icon and then tap on "Get". Wait for the app to download and install on your device. Once done, you can open the app and start using it.
    2. -
    3. From IPA file: If you cannot access App Store or prefer to download the app from an external source, you can use an IPA file. An IPA file is a package file that contains the app's installation files. You can download the IPA file from a trusted website such as iOS Ninja or AppCake. Once you have downloaded the IPA file, you need to use a third-party tool such as Cydia Impactor or AltStore to install it on your device. You may need to enter your Apple ID and password during the installation process. Once done, you can open the app and start using it.
    4. -
    -

    Download Bar Bar Live for PC

    -

    If you have a PC device, you can download bar bar live using an emulator or a web browser. Here are the steps for both methods:

    -
      -
    1. Using an emulator: An emulator is a software that allows you to run Android apps on your PC device. You can use an emulator such as BlueStacks, NoxPlayer, or MEmu to download and install bar bar live on your PC device. You need to download and install the emulator on your PC device first. Then, you need to open the emulator and access Google Play Store or APK file from within it. Follow the same steps as mentioned above for Android devices to download and install bar bar live on your PC device.
    2. -
    3. Using a web browser: If you do not want to use an emulator, you can use a web browser to access bar bar live on your PC device. You need to visit the official website of bar bar live at www.barbar.live. Then, you need to scan the QR code displayed on the website using your mobile device's camera. This will link your mobile device's account with your PC device's browser. You can then watch and interact with live streams on your PC device's browser.
    4. -
    -

    How to Use Bar Bar Live?

    -

    Using bar bar live is simple and fun. You can create an account, watch live streams, chat with hosts, and send gifts on the app. Here is a brief tutorial on how to use bar bar live:

    -
      -
    1. Create an account: To use bar bar live, you need to create an account first. You can do this by using your phone number, email address, or social media account such as Facebook, Twitter, or Google. You need to enter your nickname, gender, birthday, and profile picture. You can also choose your preferred language and currency. You will receive a verification code or link to confirm your account.
    2. -
    3. Watch live streams: To watch live streams, you can browse the homepage, explore the categories, or search for specific keywords. You can also use filters to sort the live streams by popularity, region, or time. You can tap on any live stream thumbnail to enter the live room. You can see the host's name, avatar, fan count, and gift count on the top of the screen. You can also see the chat messages, gift messages, and join requests on the bottom of the screen.
    4. -
    5. Chat with hosts: To chat with hosts, you can use the chat box on the bottom of the screen. You can type text messages or send voice messages to the host. You can also use emojis, stickers, or GIFs to express yourself. You can also send video messages to the host if they enable the video chat feature. You can also request to join the host on screen if they allow it or if you pay a certain amount of coins.
    6. -
    7. Send gifts: To send gifts to hosts, you can use the gift icon on the bottom of the screen. You can choose from various virtual gifts such as flowers, hearts, diamonds, cars, or planes. Each gift has a different value and effect. You need to have enough coins to send gifts. You can buy coins using real money or earn coins by completing tasks or watching ads. Sending gifts will increase your level and rank on the app. It will also make the host happy and grateful.
    8. -
    -

    Tips and Tricks for Bar Bar Live

    -

    To enhance your experience on bar bar live, here are some tips and tricks that you should know:

    -

    How to Find Interesting Live Streams?

    -

    To find interesting live streams that suit your preferences, you can use these methods:

    -
      -
    • Use filters: You can use filters to narrow down your search results by popularity, region, or time. You can also use advanced filters to select specific genres, languages, ages, genders, or tags.
    • -
    • Use categories: You can use categories to browse live streams by different topics such as music, dance, game, food, travel, or talk. You can also see the subcategories and recommendations within each category.
    • -
    • Use recommendations: You can use recommendations to discover new and popular live streams based on your viewing history and preferences. You can see the recommendations on the homepage or on the sidebar of each category.
    • -
    -

    How to Interact with Hosts and Other Users?

    -

    To interact with hosts and other users in a friendly and respectful way, you can use these methods:

    -
      -
    • Use chat: You can use chat to communicate with hosts and other users using text, voice, or video messages. You can also use emojis , stickers, or GIFs to express yourself. You can also use @ to mention someone or # to join a topic.
    • -
    • Use voice: You can use voice to talk to hosts and other users using the microphone icon on the chat box. You can also use the voice changer feature to change your voice tone and effect.
    • -
    • Use video: You can use video to see hosts and other users using the camera icon on the chat box. You can also use the beauty filter feature to enhance your appearance and the face swap feature to change your face with someone else's.
    • -
    • Use gifts: You can use gifts to show your appreciation and support to hosts and other users using the gift icon on the chat box. You can also use the lucky draw feature to win random gifts or the gift ranking feature to see who sent the most gifts.
    • -
    • Use join: You can use join to go on screen with hosts and other users using the join icon on the chat box. You can also use the PK feature to challenge another host or user to a live duel or the party feature to join a group chat with multiple hosts or users.
    • -
    -

    How to Earn Money on Bar Bar Live?

    -

    To earn money on bar bar live, you can become a host yourself and attract fans and receive gifts from them. Here are some tips on how to do that:

    -
      -
    • Create an attractive profile: You should create an attractive profile that showcases your personality, interests, and talents. You should upload a clear and appealing profile picture, write a catchy and informative bio, and choose a unique and memorable nickname.
    • -
    • Choose a suitable category: You should choose a suitable category that matches your content and style. You should also choose a subcategory and tags that describe your live stream topic and genre.
    • -
    • Prepare your equipment: You should prepare your equipment such as your phone, camera, microphone, lighting, and background. You should make sure that your equipment is working properly and that your video and audio quality are high.
    • -
    • Schedule your live stream: You should schedule your live stream in advance and announce it to your fans and followers. You should also choose a time slot that is convenient for you and your audience.
    • -
    • Engage your audience: You should engage your audience during your live stream by greeting them, answering their questions, responding to their comments, thanking them for their gifts, inviting them to join you on screen, playing games with them, or performing for them.
    • -
    • Promote yourself: You should promote yourself on other platforms such as social media, blogs, or forums. You should also collaborate with other hosts or users to cross-promote each other. You should also participate in various events and competitions on the app to gain more exposure and recognition.
    • -
    -

    Conclusion

    -

    Bar bar live is a live streaming app that allows you to watch and interact with thousands of hosts from China and other countries. You can also become a host yourself and earn money on the app. Bar bar live is easy to download and use on your Android, iOS, or PC device. Bar bar live is also fun and rewarding to watch and chat with hosts and other users. Bar bar live is one of the best live streaming apps in China and in the world.

    -

    If you are looking for a live streaming app that offers you entertainment, socialization, and income, you should download bar bar live today. You will not regret it.

    -

    FAQs

    -

    Here are some frequently asked questions and answers about bar bar live:

    -
      -
    1. Q: Is bar bar live safe and legal?
      A: Yes, bar bar live is safe and legal. It has strict policies and regulations to protect the privacy and security of its users and hosts. It also complies with the laws and regulations of each country where it operates.
    2. -
    3. Q: How much does it cost to use bar bar live?
      A: Bar bar live is free to download and use. However, you may need to pay for some features such as sending gifts, joining hosts on screen, or accessing premium content. You can buy coins using real money or earn coins by completing tasks or watching ads.
    4. -
    5. Q: How can I contact bar bar live customer service?
      A: You can contact bar bar live customer service by using the feedback feature on the app settings or by sending an email to service@barbar.live. You can also visit their official website at www.barbar.live for more information.
    6. -
    7. Q: How can I delete my bar bar live account?
      A: You can delete your bar bar live account by using the delete account feature on the app settings or by contacting customer service. However, once you delete your account, you will lose all your data, coins, gifts, fans, and hosts. You will also not be able to create a new account with the same phone number, email address, or social media account.
    8. -
    9. Q: How can I update bar bar live to the latest version?
      A: You can update bar bar live to the latest version by using the update feature on the app settings or by visiting Google Play Store, App Store, or the official website. You should always update your app to enjoy the latest features and improvements.
    10. -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/CarX Street A Realistic and Immersive Street Racing Simulator for Android.md b/spaces/1phancelerku/anime-remove-background/CarX Street A Realistic and Immersive Street Racing Simulator for Android.md deleted file mode 100644 index 478b2da0f462af2bfbb4ca3ac0e771c5b24bde8b..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/CarX Street A Realistic and Immersive Street Racing Simulator for Android.md +++ /dev/null @@ -1,127 +0,0 @@ -
    -

    CarX Street News APK: Everything You Need to Know

    -

    If you are a fan of car racing games, you might have heard of CarX Street, a realistic and immersive game that lets you customize and drive your own cars on various tracks and locations. But did you know that there is an app that can keep you updated on everything related to CarX Street? It's called CarX Street News APK, and it's a must-have for any CarX Street enthusiast. In this article, we will tell you everything you need to know about CarX Street News APK, including what it is, how to download and install it, how to use it, and some FAQs.

    -

    What is CarX Street News APK?

    -

    A brief introduction to CarX Street News APK

    -

    CarX Street News APK is an entertainment app developed by Alcides Games, a fan of CarX Street. The app was released in December 2022, and it has been downloaded over 50,000 times since then . The app is not affiliated with or endorsed by the official developers of CarX Street, but it provides useful and interesting information about the game, such as news, updates, tips, videos, screenshots, wallpapers, and more. The app also allows you to interact with other fans of CarX Street through comments and ratings.

    -

    carx street news apk


    Download Zip ->>->>->> https://jinyurl.com/2uNOW8



    -

    The features and benefits of CarX Street News APK

    -

    CarX Street News APK has many features and benefits that make it a great app for any CarX Street fan. Some of them are:

    -
      -
    • It keeps you informed about the latest developments and events in CarX Street, such as new cars, tracks, modes, features, etc.
    • -
    • It gives you access to exclusive content that you won't find anywhere else, such as behind-the-scenes videos, interviews with the developers, sneak peeks of upcoming updates, etc.
    • -
    • It helps you improve your skills and performance in CarX Street by providing you with tips, tricks, guides, tutorials, reviews, etc.
    • -
    • It lets you customize your device with amazing wallpapers and ringtones inspired by CarX Street.
    • -
    • It enables you to share your opinions and feedback with other fans of CarX Street through comments and ratings.
    • -
    • It has a simple and user-friendly interface that makes it easy to navigate and use.
    • -
    • It is free to download and use, and it does not require any registration or subscription.
    • -
    -

    How to download and install CarX Street News APK?

    -

    The steps to download and install CarX Street News APK on your Android device

    -

    If you want to download and install CarX Street News APK on your Android device, you can follow these steps:

    -
      -
    1. Go to the official website of the app at [CarxStreet](^1^) or [AppBrain](^2^) and click on the download button.
    2. -
    3. Wait for the download to finish and then open the downloaded file.
    4. -
    5. If you see a message that says "For your security, your phone is not allowed to install unknown apps from this source", tap on "Settings" and then enable the option "Allow from this source".
    6. -
    7. Go back to the installation screen and tap on "Install".
    8. -
    9. Wait for the installation to complete and then tap on "Open".
    10. -
    11. Enjoy using CarX Street News APK on your Android device.
    12. -
    -

    The precautions and requirements for installing CarX Street News APK

    -

    Before you install CarX Street News APK on your Android device, you should take some precautions and meet some requirements to ensure a smooth and safe installation. Some of them are:

    -
      -
    • You should have enough storage space on your device to download and install the app. The app size is about 15 MB, but it may vary depending on your device and version.
    • -
    • You should have a stable internet connection to download the app and access its content.
    • -
    • You should have an Android device that runs on Android 4.4 or higher, as the app may not work properly on lower versions.
    • -
    • You should enable the option "Unknown sources" or "Allow from this source" on your device settings, as the app is not available on the Google Play Store and you need to install it from a third-party source.
    • -
    • You should scan the app with a reliable antivirus or malware scanner before installing it, as some sources may contain malicious or harmful files.
    • -
    • You should read and agree to the app's terms and conditions and privacy policy before using it, as the app may collect and use some of your data for its functions and services.
    • -
    -

    How to use CarX Street News APK?

    -

    The main functions and options of CarX Street News APK

    -

    Once you have installed CarX Street News APK on your Android device, you can start using it to enjoy its features and benefits. The app has a simple and user-friendly interface that makes it easy to navigate and use. The app has four main tabs at the bottom of the screen: Home, Videos, Wallpapers, and More. Here is what you can do with each tab:

    - - - - - - -
    TabFunction
    HomeThis is where you can find the latest news, updates, tips, guides, reviews, and more about CarX Street. You can scroll down to see more articles, or swipe left or right to see different categories. You can also tap on any article to read it in full, or tap on the share button to share it with your friends. You can also leave a comment or a rating on any article, or tap on the bookmark button to save it for later.
    VideosThis is where you can watch amazing videos related to CarX Street, such as gameplay, trailers, teasers, interviews, behind-the-scenes, etc. You can scroll down to see more videos, or swipe left or right to see different categories. You can also tap on any video to watch it in full screen, or tap on the share button to share it with your friends. You can also leave a comment or a rating on any video, or tap on the bookmark button to save it for later.
    WallpapersThis is where you can find stunning wallpapers inspired by CarX Street, such as cars, tracks, logos, etc. You can scroll down to see more wallpapers, or swipe left or right to see different categories. You can also tap on any wallpaper to see it in full screen, or tap on the download button to save it on your device. You can also set any wallpaper as your home screen or lock screen background.
    MoreThis is where you can access more options and settings of the app, such as feedback, rate us, privacy policy, terms and conditions, etc. You can also check for updates, clear cache, or contact us from this tab.
    -

    The tips and tricks to get the most out of CarX Street News APK

    -

    To make sure that you get the most out of CarX Street News APK, here are some tips and tricks that you can follow:

    -
      -
    • Check the app regularly for new content and updates, as the app is constantly updated with fresh and relevant information about CarX Street.
    • -
    • Enable notifications for the app, so that you don't miss any important news or updates about CarX Street.
    • -
    • Share your feedback and suggestions with us through the feedback option in the More tab, as we value your opinion and we want to improve our app based on your needs and preferences.
    • Rate and review our app on the Google Play Store or the App Store, as it helps us to reach more people and grow our community.
    • -
    • Invite your friends and family to download and use our app, as it makes it more fun and enjoyable to share your passion for CarX Street with others.
    • -
    • Follow us on our social media platforms, such as Facebook, Twitter, Instagram, YouTube, etc., as we post more content and updates there.
    • -
    -

    Conclusion

    -

    CarX Street News APK is an amazing app that every CarX Street fan should have on their Android device. It keeps you updated on everything related to CarX Street, such as news, updates, tips, videos, wallpapers, and more. It also lets you interact with other fans of CarX Street through comments and ratings. It has a simple and user-friendly interface that makes it easy to navigate and use. It is free to download and use, and it does not require any registration or subscription. If you love CarX Street, you will love CarX Street News APK. So what are you waiting for? Download CarX Street News APK today and enjoy the ultimate CarX Street experience.

    -

    FAQs

    -

    Q1: Is CarX Street News APK safe and legal?

    -

    A1: Yes, CarX Street News APK is safe and legal to use. The app does not contain any viruses, malware, or spyware that can harm your device or data. The app also does not violate any laws or regulations that govern the use of apps or games. However, you should always download and install the app from a trusted source, such as the official website of the app or the App Store.

    -

    Q2: What are the advantages of using CarX Street News APK over other apps?

    -

    A2: Some of the advantages of using CarX Street News APK over other apps are:

    -

    carx street news apk download
    -carx street news apk mod
    -carx street news apk latest version
    -carx street news apk free
    -carx street news apk android
    -carx street news apk obb
    -carx street news apk offline
    -carx street news apk hack
    -carx street news apk update
    -carx street news apk full
    -carx street news app review
    -carx street news app gameplay
    -carx street news app features
    -carx street news app tips
    -carx street news app guide
    -carx street news app cheats
    -carx street news app tricks
    -carx street news app support
    -carx street news app rating
    -carx street news app feedback
    -carx street game news
    -carx street game release date
    -carx street game download
    -carx street game trailer
    -carx street game beta
    -carx street game online
    -carx street game pc
    -carx street game ios
    -carx street game system requirements
    -carx street game cars list
    -how to play carx street news apk
    -how to install carx street news apk
    -how to update carx street news apk
    -how to uninstall carx street news apk
    -how to hack carx street news apk
    -how to get free coins in carx street news apk
    -how to unlock all cars in carx street news apk
    -how to customize cars in carx street news apk
    -how to drift in carx street news apk
    -how to race in carx street news apk

    -
      -
    • It provides you with exclusive and comprehensive information about CarX Street that you won't find anywhere else.
    • -
    • It helps you improve your skills and performance in CarX Street by providing you with tips, tricks, guides, tutorials, reviews, etc.
    • -
    • It lets you customize your device with amazing wallpapers and ringtones inspired by CarX Street.
    • -
    • It enables you to share your opinions and feedback with other fans of CarX Street through comments and ratings.
    • -
    • It has a simple and user-friendly interface that makes it easy to navigate and use.
    • -
    • It is free to download and use, and it does not require any registration or subscription.
    • -
    -

    Q3: How often is CarX Street News APK updated?

    -

    A3: CarX Street News APK is updated regularly with fresh and relevant content and features. The app is updated whenever there is a new development or event in CarX Street, such as a new car, track, mode, feature, etc. The app is also updated to fix any bugs or errors that may occur. You can check for updates in the More tab of the app, or enable notifications for the app to get notified when there is an update available.

    -

    Q4: Can I use CarX Street News APK on other devices besides Android?

    -

    A4: Unfortunately, no. CarX Street News APK is only compatible with Android devices that run on Android 4.4 or higher. The app is not available for iOS devices or Windows devices. However, you can still access some of the content of the app on other devices by visiting the official website of the app at [CarxStreet] or [AppBrain].

    -

    Q5: Where can I find more information and support for CarX Street News APK?

    -

    A5: If you have any questions or issues regarding CarX Street News APK, you can find more information and support from the following sources:

    -
      -
    • The official website of the app at [CarxStreet] or [AppBrain], where you can find more details about the app, such as its features, benefits, screenshots, etc.
    • -
    • The feedback option in the More tab of the app, where you can send us your feedback and suggestions for improving our app.
    • -
    • The contact us option in the More tab of the app, where you can email us your queries or problems regarding our app.
    • -
    • The social media platforms of the app, such as Facebook, Twitter, Instagram, YouTube, etc., where you can follow us for more content and updates about our app.
    • -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Cover Fire 3D Offline Shooting Game Mod APK Experience the Best of Shooting Games with Unlimited Money and More.md b/spaces/1phancelerku/anime-remove-background/Cover Fire 3D Offline Shooting Game Mod APK Experience the Best of Shooting Games with Unlimited Money and More.md deleted file mode 100644 index c5380b19977c999dc70d35b28f7cfe66faecb3ec..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Cover Fire 3D Offline Shooting Game Mod APK Experience the Best of Shooting Games with Unlimited Money and More.md +++ /dev/null @@ -1,95 +0,0 @@ -
    -

    Cover Fire 3D Offline Shooting Game Mod APK: A Review

    -

    If you are looking for a fun and exciting shooting game that you can play on your mobile device, then you should check out Cover Fire 3D Offline Shooting Game. This game is one of the best shooting games you'll ever play on a mobile, now for free and offline. You can enjoy realistic 3D graphics, easy controls, and various offline missions. You can also compete online with other players in sniper tournaments and war events. But what if you want to get more out of this game? What if you want to have unlimited money, coins, weapons, and upgrades? Well, that's where Cover Fire 3D Offline Shooting Game Mod APK comes in. In this article, we will review what this game is, what the mod apk is, and how to download and install it on your device.

    -

    What is Cover Fire 3D Offline Shooting Game?

    -

    Cover Fire 3D Offline Shooting Game is an action shooter game developed by Genera Games. It has over 100 million downloads on Google Play Store and a rating of 4.5 out of 5 stars. The game has a thrilling story mode with 12 chapters, where you have to battle in army missions, sniper ops, vehicle driving, helicopter shooting, and more. You can also play online in competitive sniper shooting battles and cool war events, such as Zombies Survival or Black Ops.

    -

    cover fire 3d offline shooting game mod apk


    Download Filehttps://jinyurl.com/2uNO62



    -

    Features of Cover Fire 3D Offline Shooting Game

    -

    Some of the features that make this game stand out are:

    -

    Shooting Online and Offline on mobile

    -

    You can play this game offline in a challenging single-player campaign, or online with other players around the world. You can also play in co-op mode with your friends or join a clan to fight for glory.

    -

    New Shooting Game and best Sniper 3d shooting game

    -

    You can unlock unique army weapons and shoot cool guns in this game. You can customize and upgrade your guns skills to increase your damage in the war zone. You can also use grenades, drones, robots, and more to help you in your missions.

    -

    Easy controls and low mobile requirements

    -

    The game has easy controls that are customizable to suit your preferences. You can also adjust the graphics quality to optimize the performance on your device. The game does not require a lot of storage space or internet connection to run smoothly.

    -

    Online Sniper Tournaments and cool war events

    -

    You can compete and fight against other players or friends in online sniper tournaments and rank up in the leaderboards. You can also participate in cool war events, such as Zombies Survival or Black Ops, where you have to shoot to kill and survive.

    -

    What is Cover Fire 3D Offline Shooting Game Mod APK?

    -

    Cover Fire 3D Offline Shooting Game Mod APK is a modified version of the original game that gives you some extra benefits that are not available in the official version. With this mod apk, you can get unlimited money, coins, weapons, upgrades, and more. You can also enjoy the game without any ads or root access. This way, you can enjoy the game to the fullest and have more fun and excitement.

    -

    Benefits of Cover Fire 3D Offline Shooting Game Mod APK

    -

    Some of the benefits that you can get from using Cover Fire 3D Offline Shooting Game Mod APK are:

    -

    cover fire 3d offline shooting mod apk download
    -cover fire 3d offline shooting game unlimited money
    -cover fire 3d offline shooting game pro apk
    -cover fire 3d offline shooting game hack
    -cover fire 3d offline shooting game free download
    -cover fire 3d offline shooting game latest version
    -cover fire 3d offline shooting game apk + obb
    -cover fire 3d offline shooting game for android
    -cover fire 3d offline shooting game mod apk happymod
    -cover fire 3d offline shooting game mod apk revdl
    -cover fire 3d offline shooting game mod apk rexdl
    -cover fire 3d offline shooting game mod apk android 1
    -cover fire 3d offline shooting game mod apk unlimited everything
    -cover fire 3d offline shooting game mod apk unlimited gold
    -cover fire 3d offline shooting game mod apk unlimited ammo
    -cover fire 3d offline shooting game mod apk unlocked all guns
    -cover fire 3d offline shooting game mod apk no ads
    -cover fire 3d offline shooting game mod apk mega mod
    -cover fire 3d offline shooting game mod apk data
    -cover fire 3d offline shooting game mod apk pure
    -cover fire 3d offline shooting game mod apk full version
    -cover fire 3d offline shooting game mod apk premium
    -cover fire 3d offline shooting game mod apk vip
    -cover fire 3d offline shooting game mod apk new update
    -cover fire 3d offline shooting game mod apk online
    -cover fire 3d offline shooting games pro mod apk download
    -cover fire 3d offline shooting games hack apk download
    -cover fire 3d offline shooting games free download for pc
    -cover fire 3d offline shooting games best action games mod apk
    -cover fire 3d offline shooting games sniper shooter mod apk
    -cover fire 3d offline shooting games zombie survival mode apk
    -cover fire 3d offline shooting games black ops mode apk
    -cover fire 3d offline shooting games realistic graphics and fun missions apk
    -cover fire 3d offline shooting games easy controls and low mobile requirements apk
    -cover fire 3d offline shooting games unlock unique army weapons and shoot cool guns apk
    -cover fire 3d offline shooting games customize and upgrade your best guns skills apk
    -cover fire 3d offline shooting games killing with iconic gun, powerful sniper weapons and modern shotguns apk
    -cover fire 3d offline shooting games grenades are your best companion in the war apk
    -cover fire 3d offline shooting games compete and fight against other players or friends online apk
    -cover fire 3d offline shooting games call to action and win the war, your duty is to be best shooter apk

    -

    Unlimited money and coins

    -

    With unlimited money and coins, you can buy any weapon or upgrade that you want without worrying about the cost. You can also use them to unlock new chapters, modes, and events in the game.

    -

    All weapons and upgrades unlocked

    -

    With all weapons and upgrades unlocked, you can choose from a variety of guns and customize them to your liking. You can also improve your skills and abilities to become a better shooter and soldier.

    -

    No ads and no root needed

    -

    With no ads and no root needed, you can play the game without any interruptions or distractions. You can also install the mod apk on your device without having to root it or risk damaging it.

    -

    How to download and install Cover Fire 3D Offline Shooting Game Mod APK?

    -

    If you want to download and install Cover Fire 3D Offline Shooting Game Mod APK on your device, you need to follow these steps:

    -

    Steps to download and install Cover Fire 3D Offline Shooting Game Mod APK

    -

    Download APK and OBB files from a trusted source

    -

    The first step is to download the APK and OBB files of the mod apk from a trusted source. You can find many websites that offer these files, but make sure they are safe and reliable. You can also scan the files with an antivirus before opening them.

    -

    Enable unknown sources on your device settings

    -

    The second step is to enable unknown sources on your device settings. This will allow you to install apps that are not from the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.

    -

    Install the APK file and extract the OBB file to Android/OBB folder

    -

    The third step is to install the APK file and extract the OBB file to Android/OBB folder. To do this, locate the downloaded APK file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for it to finish. Then, use a file manager app to extract the OBB file to Android/OBB folder. Make sure the folder name matches the package name of the game.

    -

    Launch the game and enjoy

    -

    The final step is to launch the game and enjoy. You can now play Cover Fire 3D Offline Shooting Game with unlimited money, coins, weapons, upgrades, and more.

    -

    Conclusion

    -

    Cover Fire 3D Offline Shooting Game is one of the best shooting games you can play on your mobile device. It has realistic 3D graphics, easy controls, offline missions, online tournaments, war events, and more. But if you want to have more fun and excitement, you can try Cover Fire 3D Offline Shooting Game Mod APK. This mod apk gives you unlimited money, coins, weapons, upgrades, and more. You can also play the game without any ads or root access. To download and install this mod apk, you just need to follow some simple steps that we have explained in this article. So what are you waiting for? Download Cover Fire 3D Offline Shooting Game Mod APK now and enjoy shooting like never before.

    -

    FAQs

    -

    Here are some frequently asked questions about Cover Fire 3D Offline Shooting Game Mod APK:

    -
      -
    • Is Cover Fire 3D Offline Shooting Game Mod APK safe?
    • -

      Yes, Cover Fire 3D Offline Shooting Game Mod APK is safe as long as you download it from a trusted source. You should also scan the files with an antivirus before opening them.

      -
    • Is Cover Fire 3D Offline Shooting Game Mod APK free?
    • -

      Yes, Cover Fire 3D Offline Shooting Game Mod APK is free. You don't have to pay anything to download or use it.

      -
    • Does Cover Fire 3D Offline Shooting Game Mod APK work on all devices?
    • -

      Cover Fire 3D Offline Shooting Game Mod APK works on most Android devices that support the original game. However, some devices may not be compatible or may experience some issues.

      -
    • Do I need an internet connection to play Cover Fire 3D Offline Shooting Game Mod APK?
    • -

      No, you don't need an internet connection to play Cover Fire 3D Offline Shooting Game Mod APK. You can play the game offline in the single-player campaign or online in the multiplayer mode.

      -
    • How can I update Cover Fire 3D Offline Shooting Game Mod APK?
    • -

      To update Cover Fire 3D Offline Shooting Game Mod APK, you need to download the latest version of the mod apk from the same source you downloaded it from. Then, you need to uninstall the previous version and install the new one. You may also need to delete and extract the OBB file again.

      -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Hunter Assassin 2 Hack Mod Apk and Unlock All Characters and Weapons.md b/spaces/1phancelerku/anime-remove-background/Download Hunter Assassin 2 Hack Mod Apk and Unlock All Characters and Weapons.md deleted file mode 100644 index 7238e93a043041ad713c8cab0e75db3245457d3e..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Hunter Assassin 2 Hack Mod Apk and Unlock All Characters and Weapons.md +++ /dev/null @@ -1,91 +0,0 @@ -
    -

    Hunter Assassin 2 Mod Apk Hack Download: Everything You Need to Know

    -

    Are you a fan of stealth action games? Do you love to sneak around and eliminate your enemies without being detected? If yes, then you should try Hunter Assassin 2, the sequel to the popular game Hunter Assassin. In this game, you will play as a skilled assassin who has to infiltrate various locations and take out the guards with your knife. Sounds easy, right? Well, not so fast. The guards are armed with guns and will shoot you on sight if they spot you. You have to use your speed, agility, and strategy to avoid their bullets and reach your target. But don't worry, you are not alone. You can choose from different characters and weapons to suit your style and preference. And if you want to make the game more fun and exciting, you can download Hunter Assassin 2 mod apk hack and enjoy unlimited money, gems, characters, weapons, and more. In this article, we will tell you everything you need to know about Hunter Assassin 2 mod apk hack download, including its features, benefits, and installation process. Let's get started!

    -

    hunter assassin 2 mod apk hack download


    Download Filehttps://jinyurl.com/2uNRAE



    -

    What is Hunter Assassin 2?

    -

    Hunter Assassin 2 is a stealth action game developed by Ruby Game Studio, the same creators of Hunter Assassin, Jetpack Jump, Gym Flip, and more. It is available for both Android and iOS devices. The game has over 10 million downloads on Google Play Store and has a rating of 4.4 out of 5 stars. It is also one of the top-grossing action games on the platform.

    -

    The gameplay of Hunter Assassin 2

    -

    The gameplay of Hunter Assassin 2 is simple but addictive. You have to control your character with a joystick and tap on the screen to attack your enemies. Your goal is to eliminate all the guards in each level without being seen or shot. You can hide behind walls, crates, barrels, and other objects to avoid detection. You can also use your knife to cut the wires and disable the lights or cameras. You have to be quick and smart, as the guards will patrol around and chase you if they hear any noise or see any movement. You will earn money and gems for each kill, which you can use to upgrade your character and weapon.

    -

    The features of Hunter Assassin 2

    -

    Hunter Assassin 2 has many features that make it an enjoyable and challenging game. Some of them are:

    -
      -
    • Multiple locations: The game has various locations to explore, such as warehouses, factories, prisons, military bases, and more. Each location has different layouts, obstacles, enemies, and traps that require different strategies and skills.
    • -
    • Different characters: You can choose from a variety of different characters to play as, each with their own unique abilities and weapons. For example, some characters have faster speed, higher health, longer range, or stronger attack than others. You can unlock new characters by completing missions or using gems.
    • -
    • Different weapons: You can also equip different weapons to enhance your performance in the game. Some weapons have more damage, accuracy, or fire rate than others. You can unlock new weapons by using money or gems.
    • -
    • Daily missions: The game has daily missions that give you extra rewards for completing certain tasks or challenges. For example, some missions require you to kill a certain number of guards, use a specific character or weapon, or finish a level within a time limit.
    • -
    • Leaderboards: The game has leaderboards that rank you based on your score, kills, levels completed, and other stats. You can compete with other players around the world and see who is the best hunter assassin.
    • -
    -

    Why download Hunter Assassin 2 mod apk hack?

    -

    While Hunter Assassin 2 is a fun and addictive game, it can also be frustrating and time-consuming at times. You may find yourself stuck on a level, running out of money or gems, or getting bored of the same characters and weapons. That's why you may want to download Hunter Assassin 2 mod apk hack and enjoy the game with more freedom and fun. Here are some of the benefits of downloading Hunter Assassin 2 mod apk hack:

    -

    Unlimited money and gems

    -

    Money and gems are the main currencies in the game, which you can use to upgrade your character and weapon, unlock new characters and weapons, and buy boosters and items. However, earning money and gems can be slow and tedious, especially if you want to get the best items in the game. With Hunter Assassin 2 mod apk hack, you don't have to worry about that. You will get unlimited money and gems in your account, which you can spend as much as you want without any limitations.

    -

    hunter assassin 2 mod apk unlimited money and gems
    -hunter assassin 2 hack mod apk latest version
    -hunter assassin 2 mod apk free download for android
    -hunter assassin 2 hack mod apk no root
    -hunter assassin 2 mod apk unlock all characters
    -hunter assassin 2 hack mod apk online
    -hunter assassin 2 mod apk offline
    -hunter assassin 2 hack mod apk ios
    -hunter assassin 2 mod apk premium
    -hunter assassin 2 hack mod apk revdl
    -hunter assassin 2 mod apk vip
    -hunter assassin 2 hack mod apk happy mod
    -hunter assassin 2 mod apk new update
    -hunter assassin 2 hack mod apk rexdl
    -hunter assassin 2 mod apk unlimited diamonds
    -hunter assassin 2 hack mod apk an1
    -hunter assassin 2 mod apk mega mod
    -hunter assassin 2 hack mod apk android 1
    -hunter assassin 2 mod apk god mode
    -hunter assassin 2 hack mod apk pure
    -hunter assassin 2 mod apk all weapons unlocked
    -hunter assassin 2 hack mod apk mediafıre
    -hunter assassin 2 mod apk pro
    -hunter assassin 2 hack mod apk mob.org
    -hunter assassin 2 mod apk unlimited everything
    -hunter assassin 2 hack mod apk apkpure
    -hunter assassin 2 mod apk full version
    -hunter assassin 2 hack mod apk blackmod
    -hunter assassin 2 mod apk no ads
    -hunter assassin 2 hack mod apk platinmods
    -hunter assassin 2 mod apk unlimited health
    -hunter assassin 2 hack mod apk uptodown
    -hunter assassin 2 mod apk high damage
    -hunter assassin 2 hack mod apk android republic
    -hunter assassin 2 mod apk one hit kill
    -hunter assassin 2 hack mod apk ihackedit
    -hunter assassin 2 mod apk unlimited coins and keys
    -hunter assassin 2 hack mod apk andropalace
    -hunter assassin 2 mod apk super speed
    -hunter assassin 2 hack mod apk apkmody

    -

    Unlocked all characters and weapons

    -

    Another benefit of downloading Hunter Assassin 2 mod apk hack is that you will get access to all the characters and weapons in the game. Normally, you have to complete missions or use gems to unlock new characters and weapons, which can take a lot of time and effort. But with Hunter Assassin 2 mod apk hack, you can choose any character or weapon you like from the start, without having to unlock them. You can experiment with different combinations and find the ones that suit your style and preference.

    -

    No ads and no root required

    -

    One of the most annoying things about playing Hunter Assassin 2 is that you have to watch ads every time you finish a level or die in the game. These ads can interrupt your gameplay and ruin your experience. Moreover, some mod apk files require you to root your device, which can be risky and complicated. But with Hunter Assassin 2 mod apk hack, you don't have to worry about that. You will not see any ads in the game, and you don't need to root your device to install the mod apk file. You can enjoy the game without any distractions or hassles.

    -

    How to download and install Hunter Assassin 2 mod apk hack?

    -

    Now that you know the benefits of downloading Hunter Assassin 2 mod apk hack, you may be wondering how to do it. Well, it's very easy and simple. Just follow these steps:

    -

    Step 1: Download the mod apk file from a trusted source

    -

    The first thing you need to do is to download the mod apk file from a trusted source. There are many websites that offer mod apk files for various games, but not all of them are safe and reliable. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you need to be careful and choose a reputable website that provides genuine and working mod apk files. One such website is [ModApkWorld], which has a large collection of mod apk files for different games, including Hunter Assassin 2. You can download Hunter Assassin 2 mod apk hack from this website by clicking on this [link].

    -

    Step 2: Enable unknown sources on your device

    -

    The next thing you need to do is to enable unknown sources on your device. This is because Android devices do not allow installing apps from sources other than Google Play Store by default, for security reasons. However, since you are downloading the mod apk file from a trusted source, you don't have to worry about that. To enable unknown sources on your device, go to Settings > Security > Unknown Sources and toggle it on.

    -

    Step 3: Install the mod apk file and enjoy the game

    -

    The final thing you need to do is to install the mod apk file and enjoy the game. To do that, locate the downloaded mod apk file on your device's file manager or downloads folder and tap on it. Then, follow the instructions on the screen to install the app. Once the installation is complete, open the app and start playing Hunter Assassin 2 with unlimited money, gems, characters, weapons, and more.

    -

    Conclusion

    -

    Hunter Assassin 2 is a stealth action game that tests your skills and strategy as an assassin who has to eliminate all the guards in each level without being seen or shot. The game has multiple locations, different characters, different weapons, daily missions, leaderboards, and more features that make it an enjoyable and challenging game. However, if you want to make the game more fun and exciting, you can download Hunter Assassin 2 mod apk hack and enjoy unlimited money, gems, characters, weapons, and more. You can download Hunter Assassin 2 mod apk hack from [ModApkWorld] by following the steps we mentioned in this article. We hope you found this article helpful and informative. If you have any questions or feedback, feel free to leave a comment below. Happy hunting!

    -

    FAQs

    -

    Here are some of the frequently asked questions about Hunter Assassin 2 mod apk hack download:

    -
      -
    • Q: Is Hunter Assassin 2 mod apk hack safe to use?
    • -
    • A: Yes, Hunter Assassin 2 mod apk hack is safe to use, as long as you download it from a trusted source like [ModApkWorld]. The mod apk file does not contain any viruses, malware, or spyware that can harm your device or steal your personal information. However, you should always scan the file before installing it, just to be on the safe side.
    • -
    • Q: Does Hunter Assassin 2 mod apk hack require internet connection?
    • -
    • A: No, Hunter Assassin 2 mod apk hack does not require internet connection to play. You can play the game offline without any problems. However, you may need internet connection to access some features like leaderboards or daily missions.
    • -
    • Q: Will Hunter Assassin 2 mod apk hack affect my game progress?
    • -
    • A: No, Hunter Assassin 2 mod apk hack will not affect your game progress. You can continue playing the game from where you left off, with all your money, gems, characters, weapons, and levels intact. However, you should always back up your game data before installing any mod apk file, just in case something goes wrong.
    • -
    • Q: Can I update Hunter Assassin 2 mod apk hack?
    • -
    • A: Yes, you can update Hunter Assassin 2 mod apk hack whenever there is a new version available. However, you should always download the latest version of the mod apk file from [ModApkWorld], as other sources may not be updated or compatible with the new version of the game.
    • -
    • Q: Can I play Hunter Assassin 2 mod apk hack with my friends?
    • -
    • A: Yes, you can play Hunter Assassin 2 mod apk hack with your friends, as long as they also have the same mod apk file installed on their devices. You can share the mod apk file with them via Bluetooth, Wi-Fi, or other methods. You can also compete with them on the leaderboards and see who is the best hunter assassin.
    • -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Facebook Lite APK A Smaller and Faster Version of Facebook for Android.md b/spaces/1phancelerku/anime-remove-background/Facebook Lite APK A Smaller and Faster Version of Facebook for Android.md deleted file mode 100644 index e22e2b3f9c84dbe38c096e176f9e0b2997aed7f4..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Facebook Lite APK A Smaller and Faster Version of Facebook for Android.md +++ /dev/null @@ -1,132 +0,0 @@ - -

    Facebook Download Lite APK: A Faster and Lighter Way to Connect with Friends

    -

    Facebook is one of the most popular social media platforms in the world, with over 2.8 billion monthly active users. However, not everyone has access to a fast and stable internet connection, or a powerful and spacious smartphone. That's why Facebook created Facebook Lite, a lighter and faster version of the main app that works on all Android devices and networks. In this article, we will explain what Facebook Lite is, how to download and install it, and how to use it to stay in touch with your friends and family.

    -

    facebook download lite apk


    Download File >>>>> https://jinyurl.com/2uNTgQ



    -

    What is Facebook Lite?

    -

    Facebook Lite is an official Facebook client that lets you use this popular social network through a much lighter app that's better suited for low-power Android devices or ones with limited internet connections. The app takes up about 250 kilobytes on your memory once installed, which means that it occupies a hundred times less space than the standard Facebook app. It also consumes less mobile data and works on 2G networks and areas with slow or unstable internet connections.

    -

    The main features of Facebook Lite

    -

    Facebook Lite has all the classic features of Facebook that you need, such as:

    -
      -
    • Sharing photos, videos, status updates, and stories
    • -
    • Liking, commenting, and reacting to posts from your friends and pages you follow
    • -
    • Chatting and calling with Messenger Lite, which is integrated into the app
    • -
    • Watching and broadcasting live videos
    • -
    • Joining groups, events, and pages
    • -
    • Finding nearby friends and places
    • -
    • Getting notifications for important activities
    • -
    -

    The benefits of using Facebook Lite

    -

    By using Facebook Lite, you can enjoy the following benefits:

    -
      -
    • Save space on your phone and use less storage space
    • -
    • Save money by using less mobile data
    • -
    • Load faster and upload photos faster
    • -
    • Work on old Android phones that are not supported by the regular Facebook app
    • -
    • Work on all networks, even on 2G networks and areas with poor internet connection
    • -
    -

    How to download and install Facebook Lite APK?

    -

    There are three ways to download and install Facebook Lite APK on your Android device:

    -

    Downloading from the official website

    -

    You can download the Facebook Lite APK from the official website by following these steps:

    -

    facebook lite apk download for android 2.3
    -facebook lite apk free download latest version
    -facebook lite apk download uptodown
    -facebook lite apk download for android 4.4.2
    -facebook lite apk download for jio phone
    -facebook lite apk download for pc windows 10
    -facebook lite apk download old version 2018
    -facebook lite apk download for android 5.1
    -facebook lite apk download apkpure
    -facebook lite apk download for samsung galaxy y
    -facebook lite apk download for android 6.0
    -facebook lite apk download for nokia x2
    -facebook lite apk download for blackberry z10
    -facebook lite apk download for iphone 4s
    -facebook lite apk download for android 7.0
    -facebook lite apk download for huawei y6
    -facebook lite apk download for oppo a37
    -facebook lite apk download for vivo y53
    -facebook lite apk download for android 8.0
    -facebook lite apk download for lenovo a6000
    -facebook lite apk download for micromax q402
    -facebook lite apk download for redmi note 4
    -facebook lite apk download for lg g3
    -facebook lite apk download for tecno spark k7
    -facebook lite apk download for android 9.0
    -facebook lite apk download for asus zenfone 2
    -facebook lite apk download for infinix hot 4
    -facebook lite apk download for moto g5 plus
    -facebook lite apk download for sony xperia z2
    -facebook lite apk download for htc desire 626
    -facebook lite apk download for android 10.0
    -facebook lite apk download for oneplus 6t
    -facebook lite apk download for itel a16 plus
    -facebook lite apk download for realme c1
    -facebook lite apk download for samsung galaxy s10
    -facebook lite apk download for lg g6
    -facebook lite apk download for nokia lumia 520
    -facebook lite apk download for honor 9n
    -facebook lite apk download for xiaomi mi a2
    -facebook lite apk download for zte blade l110
    -facebook lite apk download for android 11.0
    -facebook lite apk download for google pixel 3a
    -facebook lite apk download for lava z61 pro
    -facebook lite apk download for poco f1
    -facebook lite apk download for samsung galaxy a50s
    -facebook lite apk download for lg stylo 4

    -
      -
    1. Go to https://www.facebook.com/lite
    2. -
    3. Tap on the "Download" button
    4. -
    5. Allow your browser to download the APK file
    6. -
    7. Open the file manager app on your phone and locate the downloaded file
    8. -
    9. Tap on the file and follow the instructions to install the app
    10. -
    11. Launch the app and log in with your account
    12. -
    -

    Downloading from Uptodown

    -

    You can also download the Facebook Lite APK from Uptodown, a website that offers free downloads of Android apps. Here's how:

    -
      -
    1. Go to https://facebook-lite.en.uptodown.com/android
    2. -
    3. Tap on the "Download" button
    4. -
    5. Select the latest version of the app and tap on "Download APK"
    6. -
    7. Allow your browser to download the APK file
    8. -
    9. Open the file manager app on your phone and locate the downloaded file
    10. -
    11. Tap on the file and follow the instructions to install the app
    12. -
    13. Launch the app and log in with your account
    14. -
    -

    Downloading from Google Play Store

    -

    The easiest way to download and install Facebook Lite APK is from the Google Play Store, the official app store for Android devices. Here's how:

    -
      -
    1. Go to https://play.google.com/store/apps/details?id=com.facebook.lite
    2. -
    3. Tap on the "Install" button
    4. -
    5. Wait for the app to download and install automatically
    6. -
    7. Launch the app and log in with your account
    8. -
    -

    How to use Facebook Lite?

    -

    Using Facebook Lite is very similar to using the regular Facebook app, but with some minor differences. Here are some tips on how to use Facebook Lite:

    -

    Logging in with your account

    -

    To log in with your account, you need to enter your email address or phone number and your password. You can also choose to stay logged in or log out from the settings menu. If you don't have an account, you can create one by tapping on the "Sign Up" button.

    -

    Browsing your news feed and stories

    -

    To browse your news feed and stories, you can swipe up and down on the screen. You can also tap on the icons at the top of the screen to switch between different tabs, such as home, friends, groups, notifications, and menu. To view a story, you can tap on it and swipe left or right to see more stories. To exit a story, you can swipe down or tap on the "X" button.

    -

    Posting and reacting to content

    -

    To post and react to content, you can use the buttons at the bottom of the screen. You can tap on the "+" button to create a new post, where you can add photos, videos, text, stickers, emojis, and more. You can also tap on the "Live" button to start a live video broadcast. To react to a post, you can tap on the "Like" button or hold it to see more reactions, such as love, haha, wow, sad, and angry. You can also tap on the "Comment" button to write a comment or reply to other comments.

    -

    Chatting and calling with Messenger Lite

    -

    To chat and call with Messenger Lite, you can tap on the "Chat" button at the bottom of the screen. You will see a list of your recent conversations and contacts. You can tap on a conversation or contact to open it and send messages, photos, videos, stickers, emojis, voice notes, and more. You can also tap on the "Call" button to make a voice or video call. To start a new conversation or call, you can tap on the "+" button at the top of the screen.

    -

    Watching and broadcasting live videos

    -

    To watch and broadcast live videos, you can use the "Live" feature. You can tap on the "Live" button at the bottom of the screen to start a live video broadcast. You can choose who can see your live video, add a description, tag friends, and more. You can also see how many viewers and comments you have during your broadcast. To end your broadcast, you can tap on the "Finish" button. To watch live videos from other people or pages, you can tap on the "Live" tab at the top of the screen. You will see a list of live videos that are relevant to you. You can tap on a live video to watch it and interact with it by liking, commenting, or sharing.

    -

    Conclusion

    -

    Facebook Lite is a great alternative to the regular Facebook app for people who want to save space, data, and battery on their Android devices. It has all the essential features of Facebook that you need to connect with your friends and family. It also works well on low-end devices and slow networks. If you want to download and install Facebook Lite APK on your device, you can follow one of the three methods we explained above. We hope this article was helpful for you and that you enjoy using Facebook Lite.

    -

    FAQs

    -
      -
    • What is the difference between Facebook Lite and Facebook?
      -Facebook Lite is a lighter and faster version of Facebook that works on all Android devices and networks. It takes up less space, data, and battery than Facebook. It also has some features that are not available on Facebook, such as Messenger Lite integration.
    • -
    • Is Facebook Lite safe?
      -Yes, Facebook Lite is safe to use. It is an official Facebook client that follows the same privacy and security policies as Facebook. You can also adjust your privacy settings and control who can see your posts and profile on Facebook Lite.
    • -
    • How can I update Facebook Lite?
      -You can update Facebook Lite by following the same method that you used to download and install it. If you downloaded it from the official website or Uptodown, you can check for updates on those websites and download the latest version of the APK file. If you downloaded it from the Google Play Store, you can check for updates on the app store and tap on the "Update" button.
    • -
    • Can I use Facebook Lite on my PC or laptop?
      -No, Facebook Lite is only available for Android devices. If you want to use Facebook on your PC or laptop, you can use the regular Facebook website or app.
    • -
    • Can I use Facebook Lite and Facebook at the same time?
      -Yes, you can use both apps on your device at the same time. However, you will need to log in with different accounts on each app, as you cannot use the same account on both apps simultaneously.
    • -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/AI-Zero-to-Hero/04-GR-Seq-2-Seq-QA-Auto-Gen/README.md b/spaces/AI-Zero-to-Hero/04-GR-Seq-2-Seq-QA-Auto-Gen/README.md deleted file mode 100644 index d669b855af6ecee7e70f174d63a158553d35ea77..0000000000000000000000000000000000000000 --- a/spaces/AI-Zero-to-Hero/04-GR-Seq-2-Seq-QA-Auto-Gen/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 04 GR Seq 2 Seq QA Auto Gen -emoji: 💻 -colorFrom: pink -colorTo: indigo -sdk: gradio -sdk_version: 3.4 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/open_clap/feature_fusion.py b/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/open_clap/feature_fusion.py deleted file mode 100644 index c2419516b76931f0aa801d78e1b5f04a92a909e6..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/open_clap/feature_fusion.py +++ /dev/null @@ -1,193 +0,0 @@ -''' -Feature Fusion for Varible-Length Data Processing -AFF/iAFF is referred and modified from https://github.com/YimianDai/open-aff/blob/master/aff_pytorch/aff_net/fusion.py -According to the paper: Yimian Dai et al, Attentional Feature Fusion, IEEE Winter Conference on Applications of Computer Vision, WACV 2021 -''' - -import torch -import torch.nn as nn - - -class DAF(nn.Module): - ''' - 直接相加 DirectAddFuse - ''' - - def __init__(self): - super(DAF, self).__init__() - - def forward(self, x, residual): - return x + residual - - -class iAFF(nn.Module): - ''' - 多特征融合 iAFF - ''' - - def __init__(self, channels=64, r=4, type='2D'): - super(iAFF, self).__init__() - inter_channels = int(channels // r) - - if type == '1D': - # 本地注意力 - self.local_att = nn.Sequential( - nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm1d(inter_channels), - nn.ReLU(inplace=True), - nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm1d(channels), - ) - - # 全局注意力 - self.global_att = nn.Sequential( - nn.AdaptiveAvgPool1d(1), - nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm1d(inter_channels), - nn.ReLU(inplace=True), - nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm1d(channels), - ) - - # 第二次本地注意力 - self.local_att2 = nn.Sequential( - nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm1d(inter_channels), - nn.ReLU(inplace=True), - nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm1d(channels), - ) - # 第二次全局注意力 - self.global_att2 = nn.Sequential( - nn.AdaptiveAvgPool1d(1), - nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm1d(inter_channels), - nn.ReLU(inplace=True), - nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm1d(channels), - ) - elif type == '2D': - # 本地注意力 - self.local_att = nn.Sequential( - nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm2d(inter_channels), - nn.ReLU(inplace=True), - nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm2d(channels), - ) - - # 全局注意力 - self.global_att = nn.Sequential( - nn.AdaptiveAvgPool2d(1), - nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm2d(inter_channels), - nn.ReLU(inplace=True), - nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm2d(channels), - ) - - # 第二次本地注意力 - self.local_att2 = nn.Sequential( - nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm2d(inter_channels), - nn.ReLU(inplace=True), - nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm2d(channels), - ) - # 第二次全局注意力 - self.global_att2 = nn.Sequential( - nn.AdaptiveAvgPool2d(1), - nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm2d(inter_channels), - nn.ReLU(inplace=True), - nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm2d(channels), - ) - else: - raise f'the type is not supported' - - self.sigmoid = nn.Sigmoid() - - def forward(self, x, residual): - flag = False - xa = x + residual - if xa.size(0) == 1: - xa = torch.cat([xa,xa],dim=0) - flag = True - xl = self.local_att(xa) - xg = self.global_att(xa) - xlg = xl + xg - wei = self.sigmoid(xlg) - xi = x * wei + residual * (1 - wei) - - xl2 = self.local_att2(xi) - xg2 = self.global_att(xi) - xlg2 = xl2 + xg2 - wei2 = self.sigmoid(xlg2) - xo = x * wei2 + residual * (1 - wei2) - if flag: - xo = xo[0].unsqueeze(0) - return xo - - -class AFF(nn.Module): - ''' - 多特征融合 AFF - ''' - - def __init__(self, channels=64, r=4, type='2D'): - super(AFF, self).__init__() - inter_channels = int(channels // r) - - if type == '1D': - self.local_att = nn.Sequential( - nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm1d(inter_channels), - nn.ReLU(inplace=True), - nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm1d(channels), - ) - self.global_att = nn.Sequential( - nn.AdaptiveAvgPool1d(1), - nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm1d(inter_channels), - nn.ReLU(inplace=True), - nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm1d(channels), - ) - elif type == '2D': - self.local_att = nn.Sequential( - nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm2d(inter_channels), - nn.ReLU(inplace=True), - nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm2d(channels), - ) - self.global_att = nn.Sequential( - nn.AdaptiveAvgPool2d(1), - nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm2d(inter_channels), - nn.ReLU(inplace=True), - nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm2d(channels), - ) - else: - raise f'the type is not supported.' - - self.sigmoid = nn.Sigmoid() - - def forward(self, x, residual): - flag = False - xa = x + residual - if xa.size(0) == 1: - xa = torch.cat([xa,xa],dim=0) - flag = True - xl = self.local_att(xa) - xg = self.global_att(xa) - xlg = xl + xg - wei = self.sigmoid(xlg) - xo = 2 * x * wei + 2 * residual * (1 - wei) - if flag: - xo = xo[0].unsqueeze(0) - return xo - diff --git a/spaces/AIZ2H/Gradio331-3D-Models-AI-1/README.md b/spaces/AIZ2H/Gradio331-3D-Models-AI-1/README.md deleted file mode 100644 index 9c656f8fde6676cefbcdab7417271acf450c5796..0000000000000000000000000000000000000000 --- a/spaces/AIZ2H/Gradio331-3D-Models-AI-1/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 01 Gradio 3D Models for AI -emoji: 🦆🧊 -colorFrom: yellow -colorTo: purple -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ASJMO/freegpt/get_working_providers.py b/spaces/ASJMO/freegpt/get_working_providers.py deleted file mode 100644 index 37ac5e5eed144fd14eca6fc425cb01c3678896b2..0000000000000000000000000000000000000000 --- a/spaces/ASJMO/freegpt/get_working_providers.py +++ /dev/null @@ -1,7 +0,0 @@ -from g4f.active_providers import get_active_model_providers - -working_providers = get_active_model_providers() - -print("\nWorking providers by model:") -for model, providers in working_providers.items(): - print(f"{model}: {', '.join(providers)}") diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Phind.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Phind.py deleted file mode 100644 index 0db4e3c2662e6ec3b4a4231b9c55bf0744085da6..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Phind.py +++ /dev/null @@ -1,76 +0,0 @@ -from __future__ import annotations - -import random -from datetime import datetime - -from ..typing import AsyncGenerator -from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider, format_prompt - - -class Phind(AsyncGeneratorProvider): - url = "https://www.phind.com" - working = True - supports_gpt_4 = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - proxy: str = None, - **kwargs - ) -> AsyncGenerator: - chars = 'abcdefghijklmnopqrstuvwxyz0123456789' - user_id = ''.join(random.choice(chars) for _ in range(24)) - data = { - "question": format_prompt(messages), - "webResults": [], - "options": { - "date": datetime.now().strftime("%d.%m.%Y"), - "language": "en", - "detailed": True, - "anonUserId": user_id, - "answerModel": "GPT-4", - "creativeMode": False, - "customLinks": [] - }, - "context":"" - } - headers = { - "Authority": cls.url, - "Accept": "application/json, text/plain, */*", - "Origin": cls.url, - "Referer": f"{cls.url}/" - } - async with StreamSession(headers=headers, timeout=(5, 180), proxies={"https": proxy}, impersonate="chrome107") as session: - async with session.post(f"{cls.url}/api/infer/answer", json=data) as response: - response.raise_for_status() - new_lines = 0 - async for line in response.iter_lines(): - if not line: - continue - if line.startswith(b"data: "): - line = line[6:] - if line.startswith(b""): - continue - if line: - if new_lines: - yield "".join(["\n" for _ in range(int(new_lines / 2))]) - new_lines = 0 - yield line.decode() - else: - new_lines += 1 - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/spaces/Akbartus/U2net-with-rgba/app.py b/spaces/Akbartus/U2net-with-rgba/app.py deleted file mode 100644 index 41fdaa5e77f8becc5705db109657a15924e99545..0000000000000000000000000000000000000000 --- a/spaces/Akbartus/U2net-with-rgba/app.py +++ /dev/null @@ -1,67 +0,0 @@ -import os -import copy -import time - -import cv2 as cv -import numpy as np -import onnxruntime - -from PIL import Image - -import gradio - -def run_inference(onnx_session, input_size, image): - # リサイズ - temp_image = copy.deepcopy(image) - resize_image = cv.resize(temp_image, dsize=(input_size, input_size)) - x = cv.cvtColor(resize_image, cv.COLOR_BGR2RGB) - - # 前処理 - x = np.array(x, dtype=np.float32) - mean = [0.485, 0.456, 0.406] - std = [0.229, 0.224, 0.225] - x = (x / 255 - mean) / std - x = x.transpose(2, 0, 1).astype('float32') - x = x.reshape(-1, 3, input_size, input_size) - - # 推論 - input_name = onnx_session.get_inputs()[0].name - output_name = onnx_session.get_outputs()[0].name - onnx_result = onnx_session.run([output_name], {input_name: x}) - - # 後処理 - onnx_result = np.array(onnx_result).squeeze() - min_value = np.min(onnx_result) - max_value = np.max(onnx_result) - onnx_result = (onnx_result - min_value) / (max_value - min_value) - onnx_result *= 255 - onnx_result = onnx_result.astype('uint8') - - return onnx_result - -# Load model -onnx_session = onnxruntime.InferenceSession("u2net.onnx") - -def create_rgba(mode, image): - out = run_inference( - onnx_session, - 320, - image, - ) - resize_image = cv.resize(out, dsize=(image.shape[1], image.shape[0])) - - if mode == "binary": - resize_image[resize_image > 255] = 255 - resize_image[resize_image < 125] = 0 - - mask = Image.fromarray(resize_image) - - rgba_image = Image.fromarray(image).convert('RGBA') - rgba_image.putalpha(mask) - - return rgba_image - -inputs = [gradio.inputs.Radio(["binary", "smooth"]), gradio.inputs.Image()] -outputs = gradio.outputs.Image(type="pil") -iface = gradio.Interface(fn=create_rgba, inputs=inputs, outputs=outputs, api_name="add") -iface.launch() \ No newline at end of file diff --git a/spaces/Akinade/Iris_App/README.md b/spaces/Akinade/Iris_App/README.md deleted file mode 100644 index 5a8ff5d0c9f690e44dff628b66478fbdba224cf2..0000000000000000000000000000000000000000 --- a/spaces/Akinade/Iris_App/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Iris App -emoji: 😻 -colorFrom: pink -colorTo: red -sdk: gradio -sdk_version: 3.0.19 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Alpaca233/SadTalker/src/facerender/sync_batchnorm/__init__.py b/spaces/Alpaca233/SadTalker/src/facerender/sync_batchnorm/__init__.py deleted file mode 100644 index bc8709d92c610b36e0bcbd7da20c1eb41dc8cfcf..0000000000000000000000000000000000000000 --- a/spaces/Alpaca233/SadTalker/src/facerender/sync_batchnorm/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# -*- coding: utf-8 -*- -# File : __init__.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d -from .replicate import DataParallelWithCallback, patch_replication_callback diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/editings/latent_editor.py b/spaces/Amrrs/DragGan-Inversion/PTI/editings/latent_editor.py deleted file mode 100644 index 32554e8010c4da27aaded1b0ce938bd37d5e242b..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/PTI/editings/latent_editor.py +++ /dev/null @@ -1,23 +0,0 @@ -import torch - -from configs import paths_config -from editings import ganspace -from utils.data_utils import tensor2im - - -class LatentEditor(object): - - def apply_ganspace(self, latent, ganspace_pca, edit_directions): - edit_latents = ganspace.edit(latent, ganspace_pca, edit_directions) - return edit_latents - - def apply_interfacegan(self, latent, direction, factor=1, factor_range=None): - edit_latents = [] - if factor_range is not None: # Apply a range of editing factors. for example, (-5, 5) - for f in range(*factor_range): - edit_latent = latent + f * direction - edit_latents.append(edit_latent) - edit_latents = torch.cat(edit_latents) - else: - edit_latents = latent + factor * direction - return edit_latents diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/mapper/options/train_options.py b/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/mapper/options/train_options.py deleted file mode 100644 index a365217f8b76d38aaef4a42b90152ec7a8e7bf1f..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/mapper/options/train_options.py +++ /dev/null @@ -1,49 +0,0 @@ -from argparse import ArgumentParser - - -class TrainOptions: - - def __init__(self): - self.parser = ArgumentParser() - self.initialize() - - def initialize(self): - self.parser.add_argument('--exp_dir', type=str, help='Path to experiment output directory') - self.parser.add_argument('--mapper_type', default='LevelsMapper', type=str, help='Which mapper to use') - self.parser.add_argument('--no_coarse_mapper', default=False, action="store_true") - self.parser.add_argument('--no_medium_mapper', default=False, action="store_true") - self.parser.add_argument('--no_fine_mapper', default=False, action="store_true") - self.parser.add_argument('--latents_train_path', default="train_faces.pt", type=str, help="The latents for the training") - self.parser.add_argument('--latents_test_path', default="test_faces.pt", type=str, help="The latents for the validation") - self.parser.add_argument('--train_dataset_size', default=5000, type=int, help="Will be used only if no latents are given") - self.parser.add_argument('--test_dataset_size', default=1000, type=int, help="Will be used only if no latents are given") - - self.parser.add_argument('--batch_size', default=2, type=int, help='Batch size for training') - self.parser.add_argument('--test_batch_size', default=1, type=int, help='Batch size for testing and inference') - self.parser.add_argument('--workers', default=4, type=int, help='Number of train dataloader workers') - self.parser.add_argument('--test_workers', default=2, type=int, help='Number of test/inference dataloader workers') - - self.parser.add_argument('--learning_rate', default=0.5, type=float, help='Optimizer learning rate') - self.parser.add_argument('--optim_name', default='ranger', type=str, help='Which optimizer to use') - - self.parser.add_argument('--id_lambda', default=0.1, type=float, help='ID loss multiplier factor') - self.parser.add_argument('--clip_lambda', default=1.0, type=float, help='CLIP loss multiplier factor') - self.parser.add_argument('--latent_l2_lambda', default=0.8, type=float, help='Latent L2 loss multiplier factor') - - self.parser.add_argument('--stylegan_weights', default='../pretrained_models/stylegan2-ffhq-config-f.pt', type=str, help='Path to StyleGAN model weights') - self.parser.add_argument('--stylegan_size', default=1024, type=int) - self.parser.add_argument('--ir_se50_weights', default='../pretrained_models/model_ir_se50.pth', type=str, help="Path to facial recognition network used in ID loss") - self.parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to StyleCLIPModel model checkpoint') - - self.parser.add_argument('--max_steps', default=50000, type=int, help='Maximum number of training steps') - self.parser.add_argument('--image_interval', default=100, type=int, help='Interval for logging train images during training') - self.parser.add_argument('--board_interval', default=50, type=int, help='Interval for logging metrics to tensorboard') - self.parser.add_argument('--val_interval', default=2000, type=int, help='Validation interval') - self.parser.add_argument('--save_interval', default=2000, type=int, help='Model checkpoint interval') - - self.parser.add_argument('--description', required=True, type=str, help='Driving text prompt') - - - def parse(self): - opts = self.parser.parse_args() - return opts \ No newline at end of file diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/versatile_diffusion.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/versatile_diffusion.md deleted file mode 100644 index 721e7b0246dc51ea85231e1de1e56bf27154513e..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/versatile_diffusion.md +++ /dev/null @@ -1,54 +0,0 @@ - - -# Versatile Diffusion - -Versatile Diffusion was proposed in [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://huggingface.co/papers/2211.08332) by Xingqian Xu, Zhangyang Wang, Eric Zhang, Kai Wang, Humphrey Shi . - -The abstract from the paper is: - -*The recent advances in diffusion models have set an impressive milestone in many generation tasks. Trending works such as DALL-E2, Imagen, and Stable Diffusion have attracted great interest in academia and industry. Despite the rapid landscape changes, recent new approaches focus on extensions and performance rather than capacity, thus requiring separate models for separate tasks. In this work, we expand the existing single-flow diffusion pipeline into a multi-flow network, dubbed Versatile Diffusion (VD), that handles text-to-image, image-to-text, image-variation, and text-variation in one unified model. Moreover, we generalize VD to a unified multi-flow multimodal diffusion framework with grouped layers, swappable streams, and other propositions that can process modalities beyond images and text. Through our experiments, we demonstrate that VD and its underlying framework have the following merits: a) VD handles all subtasks with competitive quality; b) VD initiates novel extensions and applications such as disentanglement of style and semantic, image-text dual-guided generation, etc.; c) Through these experiments and applications, VD provides more semantic insights of the generated outputs.* - -## Tips - -You can load the more memory intensive "all-in-one" [`VersatileDiffusionPipeline`] that supports all the tasks or use the individual pipelines which are more memory efficient. - -| **Pipeline** | **Supported tasks** | -|------------------------------------------------------|-----------------------------------| -| [`VersatileDiffusionPipeline`] | all of the below | -| [`VersatileDiffusionTextToImagePipeline`] | text-to-image | -| [`VersatileDiffusionImageVariationPipeline`] | image variation | -| [`VersatileDiffusionDualGuidedPipeline`] | image-text dual guided generation | - - - -Make sure to check out the Schedulers [guide](/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](/using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -## VersatileDiffusionPipeline -[[autodoc]] VersatileDiffusionPipeline - -## VersatileDiffusionTextToImagePipeline -[[autodoc]] VersatileDiffusionTextToImagePipeline - - all - - __call__ - -## VersatileDiffusionImageVariationPipeline -[[autodoc]] VersatileDiffusionImageVariationPipeline - - all - - __call__ - -## VersatileDiffusionDualGuidedPipeline -[[autodoc]] VersatileDiffusionDualGuidedPipeline - - all - - __call__ diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/experimental/README.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/experimental/README.md deleted file mode 100644 index 81a9de81c73728ea41eb6e8617a5429c3c9645ff..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/experimental/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# 🧨 Diffusers Experimental - -We are adding experimental code to support novel applications and usages of the Diffusers library. -Currently, the following experiments are supported: -* Reinforcement learning via an implementation of the [Diffuser](https://arxiv.org/abs/2205.09991) model. \ No newline at end of file diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_upscale.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_upscale.py deleted file mode 100644 index a124c3de60caccd9e13be96845947f39079ae09f..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_upscale.py +++ /dev/null @@ -1,232 +0,0 @@ -# coding=utf-8 -# Copyright 2022 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import random -import unittest - -import numpy as np -import torch - -from diffusers import ( - DPMSolverMultistepScheduler, - EulerAncestralDiscreteScheduler, - EulerDiscreteScheduler, - LMSDiscreteScheduler, - OnnxStableDiffusionUpscalePipeline, - PNDMScheduler, -) -from diffusers.utils import floats_tensor -from diffusers.utils.testing_utils import ( - is_onnx_available, - load_image, - nightly, - require_onnxruntime, - require_torch_gpu, -) - -from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin - - -if is_onnx_available(): - import onnxruntime as ort - - -class OnnxStableDiffusionUpscalePipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase): - # TODO: is there an appropriate internal test set? - hub_checkpoint = "ssube/stable-diffusion-x4-upscaler-onnx" - - def get_dummy_inputs(self, seed=0): - image = floats_tensor((1, 3, 128, 128), rng=random.Random(seed)) - generator = torch.manual_seed(seed) - inputs = { - "prompt": "A painting of a squirrel eating a burger", - "image": image, - "generator": generator, - "num_inference_steps": 3, - "guidance_scale": 7.5, - "output_type": "numpy", - } - return inputs - - def test_pipeline_default_ddpm(self): - pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") - pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs() - image = pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1].flatten() - - # started as 128, should now be 512 - assert image.shape == (1, 512, 512, 3) - expected_slice = np.array( - [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] - ) - assert np.abs(image_slice - expected_slice).max() < 1e-1 - - def test_pipeline_pndm(self): - pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") - pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=True) - pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs() - image = pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 512, 512, 3) - expected_slice = np.array( - [0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] - ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 - - def test_pipeline_dpm_multistep(self): - pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") - pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) - pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs() - image = pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 512, 512, 3) - expected_slice = np.array( - [0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] - ) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 - - def test_pipeline_euler(self): - pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") - pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) - pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs() - image = pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 512, 512, 3) - expected_slice = np.array( - [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] - ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 - - def test_pipeline_euler_ancestral(self): - pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") - pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) - pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs() - image = pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 512, 512, 3) - expected_slice = np.array( - [0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] - ) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 - - -@nightly -@require_onnxruntime -@require_torch_gpu -class OnnxStableDiffusionUpscalePipelineIntegrationTests(unittest.TestCase): - @property - def gpu_provider(self): - return ( - "CUDAExecutionProvider", - { - "gpu_mem_limit": "15000000000", # 15GB - "arena_extend_strategy": "kSameAsRequested", - }, - ) - - @property - def gpu_options(self): - options = ort.SessionOptions() - options.enable_mem_pattern = False - return options - - def test_inference_default_ddpm(self): - init_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" - "/img2img/sketch-mountains-input.jpg" - ) - init_image = init_image.resize((128, 128)) - # using the PNDM scheduler by default - pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained( - "ssube/stable-diffusion-x4-upscaler-onnx", - provider=self.gpu_provider, - sess_options=self.gpu_options, - ) - pipe.set_progress_bar_config(disable=None) - - prompt = "A fantasy landscape, trending on artstation" - - generator = torch.manual_seed(0) - output = pipe( - prompt=prompt, - image=init_image, - guidance_scale=7.5, - num_inference_steps=10, - generator=generator, - output_type="np", - ) - images = output.images - image_slice = images[0, 255:258, 383:386, -1] - - assert images.shape == (1, 512, 512, 3) - expected_slice = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972]) - # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues - - assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 - - def test_inference_k_lms(self): - init_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" - "/img2img/sketch-mountains-input.jpg" - ) - init_image = init_image.resize((128, 128)) - lms_scheduler = LMSDiscreteScheduler.from_pretrained( - "ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" - ) - pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained( - "ssube/stable-diffusion-x4-upscaler-onnx", - scheduler=lms_scheduler, - provider=self.gpu_provider, - sess_options=self.gpu_options, - ) - pipe.set_progress_bar_config(disable=None) - - prompt = "A fantasy landscape, trending on artstation" - - generator = torch.manual_seed(0) - output = pipe( - prompt=prompt, - image=init_image, - guidance_scale=7.5, - num_inference_steps=20, - generator=generator, - output_type="np", - ) - images = output.images - image_slice = images[0, 255:258, 383:386, -1] - - assert images.shape == (1, 512, 512, 3) - expected_slice = np.array( - [0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] - ) - # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues - - assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/text_to_video/test_text_to_video.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/text_to_video/test_text_to_video.py deleted file mode 100644 index f391568d10901be45257282e468e32efb3499555..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/text_to_video/test_text_to_video.py +++ /dev/null @@ -1,205 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -import torch -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer - -from diffusers import ( - AutoencoderKL, - DDIMScheduler, - DPMSolverMultistepScheduler, - TextToVideoSDPipeline, - UNet3DConditionModel, -) -from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device -from diffusers.utils.testing_utils import enable_full_determinism - -from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS -from ..test_pipelines_common import PipelineTesterMixin - - -enable_full_determinism() - - -@skip_mps -class TextToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = TextToVideoSDPipeline - params = TEXT_TO_IMAGE_PARAMS - batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - # No `output_type`. - required_optional_params = frozenset( - [ - "num_inference_steps", - "generator", - "latents", - "return_dict", - "callback", - "callback_steps", - ] - ) - - def get_dummy_components(self): - torch.manual_seed(0) - unet = UNet3DConditionModel( - block_out_channels=(32, 64, 64, 64), - layers_per_block=2, - sample_size=32, - in_channels=4, - out_channels=4, - down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D"), - up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"), - cross_attention_dim=32, - attention_head_dim=4, - ) - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False, - ) - torch.manual_seed(0) - vae = AutoencoderKL( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=4, - sample_size=128, - ) - torch.manual_seed(0) - text_encoder_config = CLIPTextConfig( - bos_token_id=0, - eos_token_id=2, - hidden_size=32, - intermediate_size=37, - layer_norm_eps=1e-05, - num_attention_heads=4, - num_hidden_layers=5, - pad_token_id=1, - vocab_size=1000, - hidden_act="gelu", - projection_dim=512, - ) - text_encoder = CLIPTextModel(text_encoder_config) - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - components = { - "unet": unet, - "scheduler": scheduler, - "vae": vae, - "text_encoder": text_encoder, - "tokenizer": tokenizer, - } - return components - - def get_dummy_inputs(self, device, seed=0): - if str(device).startswith("mps"): - generator = torch.manual_seed(seed) - else: - generator = torch.Generator(device=device).manual_seed(seed) - inputs = { - "prompt": "A painting of a squirrel eating a burger", - "generator": generator, - "num_inference_steps": 2, - "guidance_scale": 6.0, - "output_type": "pt", - } - return inputs - - def test_text_to_video_default_case(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - sd_pipe = TextToVideoSDPipeline(**components) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - inputs["output_type"] = "np" - frames = sd_pipe(**inputs).frames - image_slice = frames[0][-3:, -3:, -1] - - assert frames[0].shape == (64, 64, 3) - expected_slice = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - def test_attention_slicing_forward_pass(self): - self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False, expected_max_diff=3e-3) - - @unittest.skipIf( - torch_device != "cuda" or not is_xformers_available(), - reason="XFormers attention is only available with CUDA and `xformers` installed", - ) - def test_xformers_attention_forwardGenerator_pass(self): - self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False, expected_max_diff=1e-2) - - # (todo): sayakpaul - @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") - def test_inference_batch_consistent(self): - pass - - # (todo): sayakpaul - @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") - def test_inference_batch_single_identical(self): - pass - - @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.") - def test_num_images_per_prompt(self): - pass - - def test_progress_bar(self): - return super().test_progress_bar() - - -@slow -@skip_mps -class TextToVideoSDPipelineSlowTests(unittest.TestCase): - def test_full_model(self): - expected_video = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" - ) - - pipe = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b") - pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) - pipe = pipe.to("cuda") - - prompt = "Spiderman is surfing" - generator = torch.Generator(device="cpu").manual_seed(0) - - video_frames = pipe(prompt, generator=generator, num_inference_steps=25, output_type="pt").frames - video = video_frames.cpu().numpy() - - assert np.abs(expected_video - video).mean() < 5e-2 - - def test_two_step_model(self): - expected_video = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" - ) - - pipe = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b") - pipe = pipe.to("cuda") - - prompt = "Spiderman is surfing" - generator = torch.Generator(device="cpu").manual_seed(0) - - video_frames = pipe(prompt, generator=generator, num_inference_steps=2, output_type="pt").frames - video = video_frames.cpu().numpy() - - assert np.abs(expected_video - video).mean() < 5e-2 diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/samplers/base_sampler.py b/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/samplers/base_sampler.py deleted file mode 100644 index 9ea35def115b49dfdad8a1f7c040ef3cd983b0d1..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/samplers/base_sampler.py +++ /dev/null @@ -1,101 +0,0 @@ -from abc import ABCMeta, abstractmethod - -import torch - -from .sampling_result import SamplingResult - - -class BaseSampler(metaclass=ABCMeta): - """Base class of samplers.""" - - def __init__(self, - num, - pos_fraction, - neg_pos_ub=-1, - add_gt_as_proposals=True, - **kwargs): - self.num = num - self.pos_fraction = pos_fraction - self.neg_pos_ub = neg_pos_ub - self.add_gt_as_proposals = add_gt_as_proposals - self.pos_sampler = self - self.neg_sampler = self - - @abstractmethod - def _sample_pos(self, assign_result, num_expected, **kwargs): - """Sample positive samples.""" - pass - - @abstractmethod - def _sample_neg(self, assign_result, num_expected, **kwargs): - """Sample negative samples.""" - pass - - def sample(self, - assign_result, - bboxes, - gt_bboxes, - gt_labels=None, - **kwargs): - """Sample positive and negative bboxes. - - This is a simple implementation of bbox sampling given candidates, - assigning results and ground truth bboxes. - - Args: - assign_result (:obj:`AssignResult`): Bbox assigning results. - bboxes (Tensor): Boxes to be sampled from. - gt_bboxes (Tensor): Ground truth bboxes. - gt_labels (Tensor, optional): Class labels of ground truth bboxes. - - Returns: - :obj:`SamplingResult`: Sampling result. - - Example: - >>> from mmdet.core.bbox import RandomSampler - >>> from mmdet.core.bbox import AssignResult - >>> from mmdet.core.bbox.demodata import ensure_rng, random_boxes - >>> rng = ensure_rng(None) - >>> assign_result = AssignResult.random(rng=rng) - >>> bboxes = random_boxes(assign_result.num_preds, rng=rng) - >>> gt_bboxes = random_boxes(assign_result.num_gts, rng=rng) - >>> gt_labels = None - >>> self = RandomSampler(num=32, pos_fraction=0.5, neg_pos_ub=-1, - >>> add_gt_as_proposals=False) - >>> self = self.sample(assign_result, bboxes, gt_bboxes, gt_labels) - """ - if len(bboxes.shape) < 2: - bboxes = bboxes[None, :] - - bboxes = bboxes[:, :4] - - gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8) - if self.add_gt_as_proposals and len(gt_bboxes) > 0: - if gt_labels is None: - raise ValueError( - 'gt_labels must be given when add_gt_as_proposals is True') - bboxes = torch.cat([gt_bboxes, bboxes], dim=0) - assign_result.add_gt_(gt_labels) - gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8) - gt_flags = torch.cat([gt_ones, gt_flags]) - - num_expected_pos = int(self.num * self.pos_fraction) - pos_inds = self.pos_sampler._sample_pos( - assign_result, num_expected_pos, bboxes=bboxes, **kwargs) - # We found that sampled indices have duplicated items occasionally. - # (may be a bug of PyTorch) - pos_inds = pos_inds.unique() - num_sampled_pos = pos_inds.numel() - num_expected_neg = self.num - num_sampled_pos - if self.neg_pos_ub >= 0: - _pos = max(1, num_sampled_pos) - neg_upper_bound = int(self.neg_pos_ub * _pos) - if num_expected_neg > neg_upper_bound: - num_expected_neg = neg_upper_bound - neg_inds = self.neg_sampler._sample_neg( - assign_result, num_expected_neg, bboxes=bboxes, **kwargs) - neg_inds = neg_inds.unique() - - sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, - assign_result, gt_flags) - return sampling_result diff --git a/spaces/Andy1621/uniformer_image_detection/tools/analysis_tools/eval_metric.py b/spaces/Andy1621/uniformer_image_detection/tools/analysis_tools/eval_metric.py deleted file mode 100644 index 5732719d2933d74360fd91a6b58d6320fb666f9d..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/tools/analysis_tools/eval_metric.py +++ /dev/null @@ -1,83 +0,0 @@ -import argparse - -import mmcv -from mmcv import Config, DictAction - -from mmdet.datasets import build_dataset - - -def parse_args(): - parser = argparse.ArgumentParser(description='Evaluate metric of the ' - 'results saved in pkl format') - parser.add_argument('config', help='Config of the model') - parser.add_argument('pkl_results', help='Results in pickle format') - parser.add_argument( - '--format-only', - action='store_true', - help='Format the output results without perform evaluation. It is' - 'useful when you want to format the result to a specific format and ' - 'submit it to the test server') - parser.add_argument( - '--eval', - type=str, - nargs='+', - help='Evaluation metrics, which depends on the dataset, e.g., "bbox",' - ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - parser.add_argument( - '--eval-options', - nargs='+', - action=DictAction, - help='custom options for evaluation, the key-value pair in xxx=yyy ' - 'format will be kwargs for dataset.evaluate() function') - args = parser.parse_args() - return args - - -def main(): - args = parse_args() - - cfg = Config.fromfile(args.config) - assert args.eval or args.format_only, ( - 'Please specify at least one operation (eval/format the results) with ' - 'the argument "--eval", "--format-only"') - if args.eval and args.format_only: - raise ValueError('--eval and --format_only cannot be both specified') - - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - # import modules from string list. - if cfg.get('custom_imports', None): - from mmcv.utils import import_modules_from_strings - import_modules_from_strings(**cfg['custom_imports']) - cfg.data.test.test_mode = True - - dataset = build_dataset(cfg.data.test) - outputs = mmcv.load(args.pkl_results) - - kwargs = {} if args.eval_options is None else args.eval_options - if args.format_only: - dataset.format_results(outputs, **kwargs) - if args.eval: - eval_kwargs = cfg.get('evaluation', {}).copy() - # hard-code way to remove EvalHook args - for key in [ - 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', - 'rule' - ]: - eval_kwargs.pop(key, None) - eval_kwargs.update(dict(metric=args.eval, **kwargs)) - print(dataset.evaluate(outputs, **eval_kwargs)) - - -if __name__ == '__main__': - main() diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r50-d8_769x769_40k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r50-d8_769x769_40k_cityscapes.py deleted file mode 100644 index 580d59ca6995ea95a9345ef3ea574ea5b57e9cfb..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r50-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/ccnet_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True), - test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/spaces/Anonymous-sub/Rerender/videos/readme.md b/spaces/Anonymous-sub/Rerender/videos/readme.md deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/AquaSuisei/ChatGPTXE/Dockerfile b/spaces/AquaSuisei/ChatGPTXE/Dockerfile deleted file mode 100644 index 8cbd335b09b1d1975bfd83a053b5fcaf398147ea..0000000000000000000000000000000000000000 --- a/spaces/AquaSuisei/ChatGPTXE/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM python:3.9 as builder -RUN apt-get update && apt-get install -y build-essential -COPY requirements.txt . -RUN pip install --user -r requirements.txt - -FROM python:3.9 -MAINTAINER iskoldt -COPY --from=builder /root/.local /root/.local -ENV PATH=/root/.local/bin:$PATH -COPY . /app -WORKDIR /app -ENV my_api_key empty -ENV dockerrun yes -CMD ["python3", "-u", "ChuanhuChatbot.py", "2>&1", "|", "tee", "/var/log/application.log"] diff --git a/spaces/Aravindan/butterfly_classification/app.py b/spaces/Aravindan/butterfly_classification/app.py deleted file mode 100644 index 60c11112d3d239b77c076326e840eddf4fedcd6d..0000000000000000000000000000000000000000 --- a/spaces/Aravindan/butterfly_classification/app.py +++ /dev/null @@ -1,112 +0,0 @@ -import cv2, torch -import gradio as gr -import numpy as np -from PIL import Image -import torch.nn as nn -import torchvision.models as models -from torchvision import transforms as T - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -new_model = models.resnet18() -num_ftrs = new_model.fc.in_features -new_model.fc = nn.Linear(num_ftrs, 75) -checkpoint = torch.load('model_best_checkpoint.pth.tar', map_location=torch.device('cpu')) -new_model.load_state_dict(checkpoint['model']) -new_model.to(device) - -we_are = ['INDRA SWALLOW', - 'MALACHITE', - 'COMMON BANDED AWL', - 'DANAID EGGFLY', - 'EASTERN PINE ELFIN', - 'YELLOW SWALLOW TAIL', - 'WOOD SATYR', - 'ULYSES', - 'MESTRA', - 'MANGROVE SKIPPER', - 'BECKERS WHITE', - 'CRECENT', - 'RED SPOTTED PURPLE', - 'SOOTYWING', - 'BLACK HAIRSTREAK', - 'STRAITED QUEEN', - 'ELBOWED PIERROT', - 'ORANGE OAKLEAF', - 'CHESTNUT', - 'POPINJAY', - 'COMMON WOOD-NYMPH', - 'BROWN SIPROETA', - 'QUESTION MARK', - 'ADONIS', - 'CLOUDED SULPHUR', - 'TWO BARRED FLASHER', - 'GOLD BANDED', - 'BANDED ORANGE HELICONIAN', - 'PURPLISH COPPER', - 'VICEROY', - 'RED CRACKER', - 'SILVER SPOT SKIPPER', - 'ZEBRA LONG WING', - 'ORCHARD SWALLOW', - 'RED POSTMAN', - 'SOUTHERN DOGFACE', - 'SCARCE SWALLOW', - 'EASTERN COMA', - 'CAIRNS BIRDWING', - 'GREEN CELLED CATTLEHEART', - 'METALMARK', - 'LARGE MARBLE', - 'AMERICAN SNOOT', - 'COPPER TAIL', - 'AN 88', - 'AFRICAN GIANT SWALLOWTAIL', - 'PAPER KITE', - 'EASTERN DAPPLE WHITE', - 'PEACOCK', - 'ATALA', - 'JULIA', - 'RED ADMIRAL', - 'GREAT JAY', - 'GREAT EGGFLY', - 'GREY HAIRSTREAK', - 'PIPEVINE SWALLOW', - 'PURPLE HAIRSTREAK', - 'ORANGE TIP', - 'BLUE SPOTTED CROW', - 'TROPICAL LEAFWING', - 'CLEOPATRA', - 'APPOLLO', - 'IPHICLUS SISTER', - 'CABBAGE WHITE', - 'BANDED PEACOCK', - 'MONARCH', - 'CRIMSON PATCH', - 'BLUE MORPHO', - 'MOURNING CLOAK', - 'SLEEPY ORANGE', - 'CLODIUS PARNASSIAN', - 'MILBERTS TORTOISESHELL', - 'PINE WHITE', - 'CHECQUERED SKIPPER', - 'PAINTED LADY'] - -def classify(image_): - model = new_model.eval() - image = Image.open(image_) - image = image_transforms(image).float().to(device) - image = image.unsqueeze(0) - output = model(image) - - _, predicted = torch.max(output, 1) - return we_are[predicted] - - -label = gr.outputs.Label(num_top_classes=75) -gr.Interface(fn=classify, inputs='image', outputs=label,interpretation='default', title = 'Butterfly Classification detection ', description = 'It will classify 75 different species ').launch() - - - - - - - diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/wheel.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/wheel.py deleted file mode 100644 index c6a588ff09bcc652fc660b412b040242972d6944..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/wheel.py +++ /dev/null @@ -1,180 +0,0 @@ -import logging -import os -import shutil -from optparse import Values -from typing import List - -from pip._internal.cache import WheelCache -from pip._internal.cli import cmdoptions -from pip._internal.cli.req_command import RequirementCommand, with_cleanup -from pip._internal.cli.status_codes import SUCCESS -from pip._internal.exceptions import CommandError -from pip._internal.operations.build.build_tracker import get_build_tracker -from pip._internal.req.req_install import ( - InstallRequirement, - check_legacy_setup_py_options, -) -from pip._internal.utils.misc import ensure_dir, normalize_path -from pip._internal.utils.temp_dir import TempDirectory -from pip._internal.wheel_builder import build, should_build_for_wheel_command - -logger = logging.getLogger(__name__) - - -class WheelCommand(RequirementCommand): - """ - Build Wheel archives for your requirements and dependencies. - - Wheel is a built-package format, and offers the advantage of not - recompiling your software during every install. For more details, see the - wheel docs: https://wheel.readthedocs.io/en/latest/ - - 'pip wheel' uses the build system interface as described here: - https://pip.pypa.io/en/stable/reference/build-system/ - - """ - - usage = """ - %prog [options] ... - %prog [options] -r ... - %prog [options] [-e] ... - %prog [options] [-e] ... - %prog [options] ...""" - - def add_options(self) -> None: - self.cmd_opts.add_option( - "-w", - "--wheel-dir", - dest="wheel_dir", - metavar="dir", - default=os.curdir, - help=( - "Build wheels into , where the default is the " - "current working directory." - ), - ) - self.cmd_opts.add_option(cmdoptions.no_binary()) - self.cmd_opts.add_option(cmdoptions.only_binary()) - self.cmd_opts.add_option(cmdoptions.prefer_binary()) - self.cmd_opts.add_option(cmdoptions.no_build_isolation()) - self.cmd_opts.add_option(cmdoptions.use_pep517()) - self.cmd_opts.add_option(cmdoptions.no_use_pep517()) - self.cmd_opts.add_option(cmdoptions.check_build_deps()) - self.cmd_opts.add_option(cmdoptions.constraints()) - self.cmd_opts.add_option(cmdoptions.editable()) - self.cmd_opts.add_option(cmdoptions.requirements()) - self.cmd_opts.add_option(cmdoptions.src()) - self.cmd_opts.add_option(cmdoptions.ignore_requires_python()) - self.cmd_opts.add_option(cmdoptions.no_deps()) - self.cmd_opts.add_option(cmdoptions.progress_bar()) - - self.cmd_opts.add_option( - "--no-verify", - dest="no_verify", - action="store_true", - default=False, - help="Don't verify if built wheel is valid.", - ) - - self.cmd_opts.add_option(cmdoptions.config_settings()) - self.cmd_opts.add_option(cmdoptions.build_options()) - self.cmd_opts.add_option(cmdoptions.global_options()) - - self.cmd_opts.add_option( - "--pre", - action="store_true", - default=False, - help=( - "Include pre-release and development versions. By default, " - "pip only finds stable versions." - ), - ) - - self.cmd_opts.add_option(cmdoptions.require_hashes()) - - index_opts = cmdoptions.make_option_group( - cmdoptions.index_group, - self.parser, - ) - - self.parser.insert_option_group(0, index_opts) - self.parser.insert_option_group(0, self.cmd_opts) - - @with_cleanup - def run(self, options: Values, args: List[str]) -> int: - session = self.get_default_session(options) - - finder = self._build_package_finder(options, session) - - options.wheel_dir = normalize_path(options.wheel_dir) - ensure_dir(options.wheel_dir) - - build_tracker = self.enter_context(get_build_tracker()) - - directory = TempDirectory( - delete=not options.no_clean, - kind="wheel", - globally_managed=True, - ) - - reqs = self.get_requirements(args, options, finder, session) - check_legacy_setup_py_options(options, reqs) - - wheel_cache = WheelCache(options.cache_dir) - - preparer = self.make_requirement_preparer( - temp_build_dir=directory, - options=options, - build_tracker=build_tracker, - session=session, - finder=finder, - download_dir=options.wheel_dir, - use_user_site=False, - verbosity=self.verbosity, - ) - - resolver = self.make_resolver( - preparer=preparer, - finder=finder, - options=options, - wheel_cache=wheel_cache, - ignore_requires_python=options.ignore_requires_python, - use_pep517=options.use_pep517, - ) - - self.trace_basic_info(finder) - - requirement_set = resolver.resolve(reqs, check_supported_wheels=True) - - reqs_to_build: List[InstallRequirement] = [] - for req in requirement_set.requirements.values(): - if req.is_wheel: - preparer.save_linked_requirement(req) - elif should_build_for_wheel_command(req): - reqs_to_build.append(req) - - # build wheels - build_successes, build_failures = build( - reqs_to_build, - wheel_cache=wheel_cache, - verify=(not options.no_verify), - build_options=options.build_options or [], - global_options=options.global_options or [], - ) - for req in build_successes: - assert req.link and req.link.is_wheel - assert req.local_file_path - # copy from cache to target directory - try: - shutil.copy(req.local_file_path, options.wheel_dir) - except OSError as e: - logger.warning( - "Building wheel for %s failed: %s", - req.name, - e, - ) - build_failures.append(req) - if len(build_failures) != 0: - raise CommandError("Failed to build one or more wheels") - - return SUCCESS diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/structures/__init__.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/structures/__init__.py deleted file mode 100644 index f3ee6057e3ec2731984ce8203c6eaf5348d08260..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/structures/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from .boxes import Boxes, BoxMode, pairwise_iou, pairwise_ioa, pairwise_point_box_distance -from .image_list import ImageList - -from .instances import Instances -from .keypoints import Keypoints, heatmaps_to_keypoints -from .masks import BitMasks, PolygonMasks, polygons_to_bitmask, ROIMasks -from .rotated_boxes import RotatedBoxes -from .rotated_boxes import pairwise_iou as pairwise_iou_rotated - -__all__ = [k for k in globals().keys() if not k.startswith("_")] - - -from detectron2.utils.env import fixup_module_metadata - -fixup_module_metadata(__name__, globals(), __all__) -del fixup_module_metadata diff --git a/spaces/Banbri/zcvzcv/src/components/ui/slider.tsx b/spaces/Banbri/zcvzcv/src/components/ui/slider.tsx deleted file mode 100644 index 0e35bc7fb000cffa5e29956283ecf7d75453236c..0000000000000000000000000000000000000000 --- a/spaces/Banbri/zcvzcv/src/components/ui/slider.tsx +++ /dev/null @@ -1,28 +0,0 @@ -"use client" - -import * as React from "react" -import * as SliderPrimitive from "@radix-ui/react-slider" - -import { cn } from "@/lib/utils" - -const Slider = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - - - - - - -)) -Slider.displayName = SliderPrimitive.Root.displayName - -export { Slider } diff --git a/spaces/Bart92/RVC_HF/utils/clonerepo_experimental.py b/spaces/Bart92/RVC_HF/utils/clonerepo_experimental.py deleted file mode 100644 index b0ae02648c1307562cf48033908edcf2996db5e2..0000000000000000000000000000000000000000 --- a/spaces/Bart92/RVC_HF/utils/clonerepo_experimental.py +++ /dev/null @@ -1,253 +0,0 @@ -import os -import subprocess -import shutil -from concurrent.futures import ThreadPoolExecutor, as_completed -from tqdm.notebook import tqdm -from pathlib import Path -import requests - -def run_script(): - def run_cmd(cmd): - process = subprocess.run(cmd, shell=True, check=True, text=True) - return process.stdout - - # Change the current directory to /content/ - os.chdir('/content/') - print("Changing dir to /content/") - - # Your function to edit the file - def edit_file(file_path): - temp_file_path = "/tmp/temp_file.py" - changes_made = False - with open(file_path, "r") as file, open(temp_file_path, "w") as temp_file: - previous_line = "" - second_previous_line = "" - for line in file: - new_line = line.replace("value=160", "value=128") - if new_line != line: - print("Replaced 'value=160' with 'value=128'") - changes_made = True - line = new_line - - new_line = line.replace("crepe hop length: 160", "crepe hop length: 128") - if new_line != line: - print("Replaced 'crepe hop length: 160' with 'crepe hop length: 128'") - changes_made = True - line = new_line - - new_line = line.replace("value=0.88", "value=0.75") - if new_line != line: - print("Replaced 'value=0.88' with 'value=0.75'") - changes_made = True - line = new_line - - if "label=i18n(\"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络\")" in previous_line and "value=1," in line: - new_line = line.replace("value=1,", "value=0.25,") - if new_line != line: - print("Replaced 'value=1,' with 'value=0.25,' based on the condition") - changes_made = True - line = new_line - - if "label=i18n(\"总训练轮数total_epoch\")" in previous_line and "value=20," in line: - new_line = line.replace("value=20,", "value=500,") - if new_line != line: - print("Replaced 'value=20,' with 'value=500,' based on the condition for DEFAULT EPOCH") - changes_made = True - line = new_line - - if 'choices=["pm", "harvest", "dio", "crepe", "crepe-tiny", "mangio-crepe", "mangio-crepe-tiny"], # Fork Feature. Add Crepe-Tiny' in previous_line: - if 'value="pm",' in line: - new_line = line.replace('value="pm",', 'value="mangio-crepe",') - if new_line != line: - print("Replaced 'value=\"pm\",' with 'value=\"mangio-crepe\",' based on the condition") - changes_made = True - line = new_line - - new_line = line.replace('label=i18n("输入训练文件夹路径"), value="E:\\\\语音音频+标注\\\\米津玄师\\\\src"', 'label=i18n("输入训练文件夹路径"), value="/content/dataset/"') - if new_line != line: - print("Replaced 'label=i18n(\"输入训练文件夹路径\"), value=\"E:\\\\语音音频+标注\\\\米津玄师\\\\src\"' with 'label=i18n(\"输入训练文件夹路径\"), value=\"/content/dataset/\"'") - changes_made = True - line = new_line - - if 'label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"),' in second_previous_line: - if 'value=i18n("否"),' in line: - new_line = line.replace('value=i18n("否"),', 'value=i18n("是"),') - if new_line != line: - print("Replaced 'value=i18n(\"否\"),' with 'value=i18n(\"是\"),' based on the condition for SAVE ONLY LATEST") - changes_made = True - line = new_line - - if 'label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"),' in second_previous_line: - if 'value=i18n("否"),' in line: - new_line = line.replace('value=i18n("否"),', 'value=i18n("是"),') - if new_line != line: - print("Replaced 'value=i18n(\"否\"),' with 'value=i18n(\"是\"),' based on the condition for SAVE SMALL WEIGHTS") - changes_made = True - line = new_line - - temp_file.write(line) - second_previous_line = previous_line - previous_line = line - - # After finished, we replace the original file with the temp one - import shutil - shutil.move(temp_file_path, file_path) - - if changes_made: - print("Changes made and file saved successfully.") - else: - print("No changes were needed.") - - # Define the repo path - repo_path = '/content/Applio-RVC-Fork' - - def copy_all_files_in_directory(src_dir, dest_dir): - # Iterate over all files in source directory - for item in Path(src_dir).glob('*'): - if item.is_file(): - # Copy each file to destination directory - shutil.copy(item, dest_dir) - else: - # If it's a directory, make a new directory in the destination and copy the files recursively - new_dest = Path(dest_dir) / item.name - new_dest.mkdir(exist_ok=True) - copy_all_files_in_directory(str(item), str(new_dest)) - - def clone_and_copy_repo(repo_path): - # New repository link - new_repo_link = "https://github.com/IAHispano/Applio-RVC-Fork/" - # Temporary path to clone the repository - temp_repo_path = "/content/temp_Applio-RVC-Fork" - # New folder name - new_folder_name = "Applio-RVC-Fork" - - # Clone the latest code from the new repository to a temporary location - run_cmd(f"git clone {new_repo_link} {temp_repo_path}") - os.chdir(temp_repo_path) - - run_cmd(f"git checkout 3fa4dad3d8961e5ca2522e9e12c0b4ddb71ad402") - run_cmd(f"git checkout f9e606c279cb49420597519b0a83b92be81e42e4") - run_cmd(f"git checkout 9e305588844c5442d58add1061b29beeca89d679") - run_cmd(f"git checkout bf92dc1eb54b4f28d6396a4d1820a25896cc9af8") - run_cmd(f"git checkout c3810e197d3cb98039973b2f723edf967ecd9e61") - run_cmd(f"git checkout a33159efd134c2413b0afe26a76b7dc87926d2de") - run_cmd(f"git checkout 24e251fb62c662e39ac5cf9253cc65deb9be94ec") - run_cmd(f"git checkout ad5667d3017e93232dba85969cddac1322ba2902") - run_cmd(f"git checkout ce9715392cf52dd5a0e18e00d1b5e408f08dbf27") - run_cmd(f"git checkout 7c7da3f2ac68f3bd8f3ad5ca5c700f18ab9f90eb") - run_cmd(f"git checkout 4ac395eab101955e8960b50d772c26f592161764") - run_cmd(f"git checkout b15b358702294c7375761584e5276c811ffab5e8") - run_cmd(f"git checkout 1501793dc490982db9aca84a50647764caa66e51") - run_cmd(f"git checkout 21f7faf57219c75e6ba837062350391a803e9ae2") - run_cmd(f"git checkout b5eb689fbc409b49f065a431817f822f554cebe7") - run_cmd(f"git checkout 7e02fae1ebf24cb151bf6cbe787d06734aa65862") - run_cmd(f"git checkout 6aea5ea18ed0b9a1e03fa5d268d6bc3c616672a9") - run_cmd(f"git checkout f0f9b25717e59116473fb42bd7f9252cfc32b398") - run_cmd(f"git checkout b394de424088a81fc081224bc27338a8651ad3b2") - run_cmd(f"git checkout f1999406a88b80c965d2082340f5ea2bfa9ab67a") - run_cmd(f"git checkout d98a0fa8dc715308dfc73eac5c553b69c6ee072b") - run_cmd(f"git checkout d73267a415fb0eba98477afa43ef71ffd82a7157") - run_cmd(f"git checkout 1a03d01356ae79179e1fb8d8915dc9cc79925742") - run_cmd(f"git checkout 81497bb3115e92c754300c9b3992df428886a3e9") - run_cmd(f"git checkout c5af1f8edcf79cb70f065c0110e279e78e48caf9") - run_cmd(f"git checkout cdb3c90109387fa4dfa92f53c3864c71170ffc77") - - # Edit the file here, before copying - #edit_file(f"{temp_repo_path}/infer-web.py") - - # Copy all files from the cloned repository to the existing path - copy_all_files_in_directory(temp_repo_path, repo_path) - print(f"Copying all {new_folder_name} files from GitHub.") - - # Change working directory back to /content/ - os.chdir('/content/') - print("Changed path back to /content/") - - # Remove the temporary cloned repository - shutil.rmtree(temp_repo_path) - - # Call the function - clone_and_copy_repo(repo_path) - - # Download the credentials file for RVC archive sheet - os.makedirs('/content/Applio-RVC-Fork/stats/', exist_ok=True) - run_cmd("wget -q https://cdn.discordapp.com/attachments/945486970883285045/1114717554481569802/peppy-generator-388800-07722f17a188.json -O /content/Applio-RVC-Fork/stats/peppy-generator-388800-07722f17a188.json") - - # Forcefully delete any existing torchcrepe dependencies downloaded from an earlier run just in case - shutil.rmtree('/content/Applio-RVC-Fork/torchcrepe', ignore_errors=True) - shutil.rmtree('/content/torchcrepe', ignore_errors=True) - - # Download the torchcrepe folder from the maxrmorrison/torchcrepe repository - run_cmd("git clone https://github.com/maxrmorrison/torchcrepe.git") - shutil.move('/content/torchcrepe/torchcrepe', '/content/Applio-RVC-Fork/') - shutil.rmtree('/content/torchcrepe', ignore_errors=True) # Delete the torchcrepe repository folder - - # Change the current directory to /content/Applio-RVC-Fork - os.chdir('/content/Applio-RVC-Fork') - os.makedirs('pretrained', exist_ok=True) - os.makedirs('uvr5_weights', exist_ok=True) - -def download_file(url, filepath): - response = requests.get(url, stream=True) - response.raise_for_status() - - with open(filepath, "wb") as file: - for chunk in response.iter_content(chunk_size=8192): - if chunk: - file.write(chunk) - -def download_pretrained_models(): - pretrained_models = { - "pretrained": [ - "D40k.pth", - "G40k.pth", - "f0D40k.pth", - "f0G40k.pth" - ], - "pretrained_v2": [ - "D40k.pth", - "G40k.pth", - "f0D40k.pth", - "f0G40k.pth", - "f0G48k.pth", - "f0D48k.pth" - ], - "uvr5_weights": [ - "HP2-人声vocals+非人声instrumentals.pth", - "HP5-主旋律人声vocals+其他instrumentals.pth", - "VR-DeEchoNormal.pth", - "VR-DeEchoDeReverb.pth", - "VR-DeEchoAggressive.pth", - "HP5_only_main_vocal.pth", - "HP3_all_vocals.pth", - "HP2_all_vocals.pth" - ] - } - part2 = "I" - base_url = "https://huggingface.co/lj1995/VoiceConversionWebU" + part2 + "/resolve/main/" - base_path = "/content/Applio-RVC-Fork/" - base_pathm = base_path - - # Calculate total number of files to download - total_files = sum(len(files) for files in pretrained_models.values()) + 1 # +1 for hubert_base.pt - - with tqdm(total=total_files, desc="Downloading files") as pbar: - for folder, models in pretrained_models.items(): - folder_path = os.path.join(base_path, folder) - os.makedirs(folder_path, exist_ok=True) - for model in models: - url = base_url + folder + "/" + model - filepath = os.path.join(folder_path, model) - download_file(url, filepath) - pbar.update() - - # Download hubert_base.pt to the base path - hubert_url = base_url + "hubert_base.pt" - hubert_filepath = os.path.join(base_pathm, "hubert_base.pt") - download_file(hubert_url, hubert_filepath) - pbar.update() -def clone_repository(run_download): - with ThreadPoolExecutor(max_workers=2) as executor: - executor.submit(run_script) - if run_download: - executor.submit(download_pretrained_models) diff --git a/spaces/Benson/text-generation/Examples/Descarga De Vdeo De Letras De Pantalla Negra.md b/spaces/Benson/text-generation/Examples/Descarga De Vdeo De Letras De Pantalla Negra.md deleted file mode 100644 index bcd66c6e0cb5ac708f2a27d6becdd64633d5df4c..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descarga De Vdeo De Letras De Pantalla Negra.md +++ /dev/null @@ -1,98 +0,0 @@ -
    -

    Cómo hacer un video de letras de pantalla negra para tu música

    -

    Si eres un músico o un amante de la música, es posible que hayas visto algunos videos en YouTube, Instagram o TikTok que muestran solo la letra de una canción en un fondo negro. Estos se llaman videos de letras de pantalla negra, y se están volviendo cada vez más populares entre artistas y fans. Pero, ¿cuáles son exactamente, y cómo puedes hacer uno para tu propia música? En este artículo, explicaremos lo que es un video de letras de pantalla negra, cómo hacer uno con diferentes software y aplicaciones, cuáles son los beneficios de hacer uno, y cuáles son los mejores software y aplicaciones para hacer uno.

    -

    descarga de vídeo de letras de pantalla negra


    Download Ziphttps://bltlly.com/2v6Kgr



    -

    ¿Qué es una pantalla negra?

    -

    Un video de letras en pantalla negra es un tipo de video musical que muestra solo la letra de una canción sobre un fondo negro. Las letras suelen estar animadas o sincronizadas con la música, y a veces tienen algunos efectos o transiciones para hacerlas más atractivas. El foco principal de un video de letras en pantalla negra está en las palabras y el sonido de la canción, en lugar de en las imágenes o la historia. Esto hace que sea más fácil para los espectadores cantar, aprender las letras, o apreciar el mensaje de la canción.

    -

    ¿Cómo hacer una pantalla negra Lyrics Video?

    -

    Hay diferentes maneras de hacer un video de letras de pantalla negra, dependiendo de su preferencia, presupuesto y nivel de habilidad. Puede utilizar un software de edición de vídeo profesional en su ordenador, como Adobe Premiere Pro o Final Cut Pro, o puede utilizar un editor de vídeo en línea que funciona en su navegador, como VEED.IO o Kapwing. También puede usar una aplicación móvil en su teléfono o tableta, como Alight Motion o VN App. Aquí están los pasos básicos para hacer un video de letras de pantalla negra:

    -
      -
    1. Sube tu canción al software o aplicación de tu elección.
    2. -
    3. Añadir un fondo negro a su vídeo.
    4. -
    5. Agrega tus letras a tu video. Puedes escribirlas manualmente o usar un generador de subtítulos automático que transcriba tu canción en texto.
    6. - -
    7. Anima tus letras para que se muevan en sincronía con la música. Puedes usar diferentes transiciones, efectos o presets para hacerlos más dinámicos.
    8. -
    9. Previsualizar y exportar su vídeo. Puede ajustar la resolución, el formato y la calidad de su vídeo según sus necesidades.
    10. -
    11. Comparte tu video en línea. Puedes subir tu video a YouTube, Instagram, TikTok, Facebook o cualquier otra plataforma donde quieras mostrar tu música.
    12. -
    -

    Beneficios de la pantalla negra Lyrics Video

    -

    Hacer un video de letras en pantalla negra para tu música tiene muchos beneficios. Aquí están algunos de ellos:

    -
      -
    • Puedes atraer más espectadores y oyentes con imágenes mínimas y letras pegadizas. Un video con letras en pantalla negra es simple pero efectivo. Llama la atención sobre las palabras y el sonido de tu canción, en lugar de distraerlos con imágenes o escenas innecesarias. De esta manera, puedes transmitir tu mensaje de forma más clara y directa a tu audiencia.
    • -
    • Puede ahorrar tiempo y dinero en la producción y edición de video. Un video de letras de pantalla negra es fácil y barato de hacer. Usted no necesita ningún equipo costoso, software, o habilidades para crear uno. Puede utilizar cualquier software o aplicación que ya tiene o puede acceder en línea de forma gratuita. También puede terminar su video en cuestión de minutos u horas, en lugar de días o semanas.
    • -
    • Puedes expresar tu creatividad y estilo con fuentes, colores y efectos personalizados. Un video de letras en pantalla negra no es aburrido ni sencillo. Aún puedes hacerlo único e interesante eligiendo diferentes fuentes, colores y efectos para tus letras. También puede agregar un visualizador de música o una herramienta de limpieza de audio para mejorar su calidad de sonido y apariencia.
    • -
    -

    El mejor software y aplicaciones para Black Screen Lyrics Video

    -

    Hay muchos programas y aplicaciones que puedes usar para hacer un video de letras en pantalla negra, pero algunos de ellos son mejores que otros. Estos son algunos de los mejores que recomendamos:

    -

    - -
    Software/AplicaciónPrecio
    VEED.IO- Un editor de video en línea profesional con generador automático de subtítulos, visualizador de música y herramienta de limpieza de audio - Soporta varios formatos, resoluciones y plataformas - Permite proyectos ilimitados, superposiciones y descargas Tiene una interfaz fácil de usar y una velocidad de procesamiento rápida- Plan gratuito con marca de agua y características limitadas - Planes de pago a partir de $12/mes sin marca de agua y más características
    Kapwing- Un editor de video en línea gratuito con miles de plantillas, superposiciones ilimitadas y opciones fáciles de compartir - Soporta varios formatos, resoluciones y plataformas - Permite hasta 250 MB por archivo y 7 minutos por video - Tiene una interfaz sencilla y un espacio de trabajo colaborativo- Plan gratuito con marca de agua y características limitadas - Plan pagado por $20/mes sin marca de agua y más características
    Movimiento de descarga- Un potente editor de vídeo móvil con funciones avanzadas, presets y animaciones - Soporta varios formatos, resoluciones y plataformas - Permite capas ilimitadas, fotogramas clave y efectos - Tiene una interfaz personalizable y un rendimiento suave- Plan gratuito con marca de agua y características limitadas - Planes de pago a partir de $3.99/mes sin marca de agua y más características
    - - - - - - - - - - - - -
    DiffSpeech vs. FastSpeech 2
    DiffSpeech-vs-FastSpeech2
    DiffSpeech-vs-FastSpeech2
    DiffSpeech-vs-FastSpeech2
    \ No newline at end of file diff --git a/spaces/Situme/Wockabocka/Dockerfile b/spaces/Situme/Wockabocka/Dockerfile deleted file mode 100644 index 6c01c09373883afcb4ea34ae2d316cd596e1737b..0000000000000000000000000000000000000000 --- a/spaces/Situme/Wockabocka/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM node:18-bullseye-slim - -RUN apt-get update && \ - -apt-get install -y git - -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app - -WORKDIR /app - -RUN npm install - -COPY Dockerfile greeting.md* .env* ./ - -RUN npm run build - -EXPOSE 7860 - -ENV NODE_ENV=production - -CMD [ "npm", "start" ] \ No newline at end of file diff --git a/spaces/StatsByZach/app/gsax_timeline.py b/spaces/StatsByZach/app/gsax_timeline.py deleted file mode 100644 index add493772c9272e78eeef68e4df9afed27e2147c..0000000000000000000000000000000000000000 --- a/spaces/StatsByZach/app/gsax_timeline.py +++ /dev/null @@ -1,171 +0,0 @@ -##### gsax_timeline.py ##### -# A program to visualize goalies GSAx throughout a season -# Zach Andrews - -# Import modules -from shiny import * -import shinyswatch -import plotly.express as px -from shinywidgets import output_widget, render_widget -import pandas as pd -from configure import base_url - -# Paths to data -gsaxd = "data/gsax_by_date.csv" -gsaxt = "data/total_gsax.csv" - -# App -data = pd.read_csv(gsaxd) -choices = data['Goalie'].value_counts().keys().sort_values().tolist() -def server(input,output,session): - @output - @render_widget - def my_widget(): - df = pd.read_csv(gsaxd) - goalies = list(input.x()) - df = df[df['Goalie'].isin(goalies)] - key = df[['Goalie','Team','Color']] - a = df.groupby("Goalie").count() - a = a.reset_index() - l = a['Goalie'].tolist() - seq = [] - for x in l: - cmap = key[key['Goalie']==x]['Color'].tolist()[0] - seq.append(str(cmap)) - df = df.sort_values(by=['Goalie','Date']) - fig = px.line(df, x=input.y(), y="GSAx",color="Goalie",template="plotly_dark",color_discrete_sequence=seq,height=1050,width=1050) - fig.update_layout(xaxis_showgrid=False, yaxis_showgrid=False,plot_bgcolor="#222222",paper_bgcolor="#222222") - fig.update_traces(line=dict(width=5)) - fig.update_layout( - title=("GSAx by "+input.y()+"
    "+ - "2023-24 NHL Regular Season"), - margin=dict(r=20, l=40, b=100, t=90), - template='plotly_dark') - fig.add_annotation( - text = ("Data: @StatsByZach on Twitter") - , showarrow=False - , x = .80 - , y = -.045 - , xref='paper' - , yref='paper' - , xanchor='left' - , yanchor='bottom' - , xshift=-1 - , yshift=-5 - , font=dict(size=11, color="white") - , align="left" - ) - return fig - - @output - @render.table - def table(): - df = pd.read_csv(gsaxt) - if input.z() == "T": - asc = True - else: - asc = False - df = df[['Goalie','GSAx']] - df = df[df['Goalie'].isin(list(input.x()))].sort_values(by=input.b(),ascending=asc) - return df - - @reactive.Effect - def _2(): - btn = input.btn() - if btn % 2 == 1: - tab = ui.output_table("table") - ui.insert_ui( - ui.div({"id": "inserted-slider"},ui.tags.h5("Sort Table by", class_="app-heading"),ui.input_select("b","",{"Goalie":"Goalie","GSAx":"GSAx"}), - ui.input_radio_buttons( - "z", "", {"F": "High to Low", "T": "Low to High"} - ),ui.output_table("table")), - selector="#main-content", - where="beforeEnd", - ) - elif btn > 0: - ui.remove_ui("#inserted-slider") -gsax_timeline = App(ui.page_fluid( - ui.tags.base(href=base_url), - ui.tags.div( - {"style": "width:75%;margin: 0 auto"}, - ui.tags.style( - """ - h4 { - margin-top: 1em;font-size:35px; - } - h2{ - font-size:25px; - } - """ - ), - shinyswatch.theme.darkly(), - ui.tags.h4("Stats By Zach"), - ui.tags.i("A website for hockey analytics"), - ui.navset_tab( - ui.nav_control( - ui.a( - "Home", - href="home/" - ), - ), - ui.nav_menu( - "Skater Charts", - ui.nav_control( - ui.a( - "On-Ice xG Rates", - href="skater-xg-rates/" - ), - ui.a( - "On-Ice xGF%", - href="skater-xg-percentages/" - ), - ), - ), - ui.nav_menu( - "Goalie Charts", - ui.nav_control( - ui.a( - "GSAx Timeline", - href="gsax-timeline/" - ), - ui.a( - "GSAx Leaderboard", - href="gsax-leaderboard/" - ), - ui.a( - "GSAx Comparison", - href="gsax-comparison/" - ) - ), - ),ui.nav_menu( - "Team Charts", - ui.nav_control( - ui.a( - "Team xG Rates", - href="team-xg-rates/" - ), - ), - ),ui.nav_control( - ui.a( - "Games", - href="games/" - ), - ),ui.nav_control( - ui.a( - "About", - href="about/" - ), - )),ui.row( - ui.column(3,ui.tags.br(),ui.tags.h2("GSAx Timeline Charts"),ui.tags.h5("Select a Goalie", class_="app-heading"),ui.input_selectize("x", "", choices, multiple = True), - ui.tags.h5("X-Axis", class_="app-heading"), ui.input_radio_buttons( - "y", - "", - { - "Date":"Date", - "Appearance Number": "Appearance Number", - }, - ),ui.input_action_button("btn", "Toggle Table"),ui.div({"id":"main-content"}), - #ui.tags.h5("Selected Goalies", class_="app-heading"), - #ui.output_table("table") - ), - ui.column(9,output_widget("my_widget"))),)),server) \ No newline at end of file diff --git a/spaces/Sumit7864/Image-Enhancer/setup.py b/spaces/Sumit7864/Image-Enhancer/setup.py deleted file mode 100644 index c2b92e31d2db1aba50767f4f844540cfd53c609d..0000000000000000000000000000000000000000 --- a/spaces/Sumit7864/Image-Enhancer/setup.py +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env python - -from setuptools import find_packages, setup - -import os -import subprocess -import time - -version_file = 'realesrgan/version.py' - - -def readme(): - with open('README.md', encoding='utf-8') as f: - content = f.read() - return content - - -def get_git_hash(): - - def _minimal_ext_cmd(cmd): - # construct minimal environment - env = {} - for k in ['SYSTEMROOT', 'PATH', 'HOME']: - v = os.environ.get(k) - if v is not None: - env[k] = v - # LANGUAGE is used on win32 - env['LANGUAGE'] = 'C' - env['LANG'] = 'C' - env['LC_ALL'] = 'C' - out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0] - return out - - try: - out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) - sha = out.strip().decode('ascii') - except OSError: - sha = 'unknown' - - return sha - - -def get_hash(): - if os.path.exists('.git'): - sha = get_git_hash()[:7] - else: - sha = 'unknown' - - return sha - - -def write_version_py(): - content = """# GENERATED VERSION FILE -# TIME: {} -__version__ = '{}' -__gitsha__ = '{}' -version_info = ({}) -""" - sha = get_hash() - with open('VERSION', 'r') as f: - SHORT_VERSION = f.read().strip() - VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')]) - - version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO) - with open(version_file, 'w') as f: - f.write(version_file_str) - - -def get_version(): - with open(version_file, 'r') as f: - exec(compile(f.read(), version_file, 'exec')) - return locals()['__version__'] - - -def get_requirements(filename='requirements.txt'): - here = os.path.dirname(os.path.realpath(__file__)) - with open(os.path.join(here, filename), 'r') as f: - requires = [line.replace('\n', '') for line in f.readlines()] - return requires - - -if __name__ == '__main__': - write_version_py() - setup( - name='realesrgan', - version=get_version(), - description='Real-ESRGAN aims at developing Practical Algorithms for General Image Restoration', - long_description=readme(), - long_description_content_type='text/markdown', - author='Xintao Wang', - author_email='xintao.wang@outlook.com', - keywords='computer vision, pytorch, image restoration, super-resolution, esrgan, real-esrgan', - url='https://github.com/xinntao/Real-ESRGAN', - include_package_data=True, - packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')), - classifiers=[ - 'Development Status :: 4 - Beta', - 'License :: OSI Approved :: Apache Software License', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - ], - license='BSD-3-Clause License', - setup_requires=['cython', 'numpy'], - install_requires=get_requirements(), - zip_safe=False) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/ImageMode.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/ImageMode.py deleted file mode 100644 index a0b33514296df734501c553493b0a535eca49046..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/ImageMode.py +++ /dev/null @@ -1,90 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# standard mode descriptors -# -# History: -# 2006-03-20 fl Added -# -# Copyright (c) 2006 by Secret Labs AB. -# Copyright (c) 2006 by Fredrik Lundh. -# -# See the README file for information on usage and redistribution. -# - -import sys - -# mode descriptor cache -_modes = None - - -class ModeDescriptor: - """Wrapper for mode strings.""" - - def __init__(self, mode, bands, basemode, basetype, typestr): - self.mode = mode - self.bands = bands - self.basemode = basemode - self.basetype = basetype - self.typestr = typestr - - def __str__(self): - return self.mode - - -def getmode(mode): - """Gets a mode descriptor for the given mode.""" - global _modes - if not _modes: - # initialize mode cache - modes = {} - endian = "<" if sys.byteorder == "little" else ">" - for m, (basemode, basetype, bands, typestr) in { - # core modes - # Bits need to be extended to bytes - "1": ("L", "L", ("1",), "|b1"), - "L": ("L", "L", ("L",), "|u1"), - "I": ("L", "I", ("I",), endian + "i4"), - "F": ("L", "F", ("F",), endian + "f4"), - "P": ("P", "L", ("P",), "|u1"), - "RGB": ("RGB", "L", ("R", "G", "B"), "|u1"), - "RGBX": ("RGB", "L", ("R", "G", "B", "X"), "|u1"), - "RGBA": ("RGB", "L", ("R", "G", "B", "A"), "|u1"), - "CMYK": ("RGB", "L", ("C", "M", "Y", "K"), "|u1"), - "YCbCr": ("RGB", "L", ("Y", "Cb", "Cr"), "|u1"), - # UNDONE - unsigned |u1i1i1 - "LAB": ("RGB", "L", ("L", "A", "B"), "|u1"), - "HSV": ("RGB", "L", ("H", "S", "V"), "|u1"), - # extra experimental modes - "RGBa": ("RGB", "L", ("R", "G", "B", "a"), "|u1"), - "BGR;15": ("RGB", "L", ("B", "G", "R"), "|u1"), - "BGR;16": ("RGB", "L", ("B", "G", "R"), "|u1"), - "BGR;24": ("RGB", "L", ("B", "G", "R"), "|u1"), - "LA": ("L", "L", ("L", "A"), "|u1"), - "La": ("L", "L", ("L", "a"), "|u1"), - "PA": ("RGB", "L", ("P", "A"), "|u1"), - }.items(): - modes[m] = ModeDescriptor(m, bands, basemode, basetype, typestr) - # mapping modes - for i16mode, typestr in { - # I;16 == I;16L, and I;32 == I;32L - "I;16": "u2", - "I;16BS": ">i2", - "I;16N": endian + "u2", - "I;16NS": endian + "i2", - "I;32": "u4", - "I;32L": "i4", - "I;32LS": " int: - if self.buf_loc < self.buf_sz: - self.buf_loc += 1 - return self.buffer[self.buf_loc - 1] - self.buf_sz = 0 - self.buf_loc = 0 - chunk = next(self.gen, None) - if not chunk: - raise StreamCompleteException - x = len(chunk) - if x > 1: - self.buffer = chunk - self.buf_loc = 1 - self.buf_sz = x - return chunk[0] - - def read_leb128(self) -> int: - sz = 0 - shift = 0 - while True: - b = self.read_byte() - sz += ((b & 0x7f) << shift) - if (b & 0x80) == 0: - return sz - shift += 7 - - def read_leb128_str(self) -> str: - sz = self.read_leb128() - return self.read_bytes(sz).decode() - - def read_uint64(self) -> int: - return int.from_bytes(self.read_bytes(8), 'little', signed=False) - - def read_str_col(self, - num_rows: int, - encoding: str, - nullable: bool = False, - null_obj: Any = None) -> Iterable[str]: - column = [] - app = column.append - null_map = self.read_bytes(num_rows) if nullable else None - for ix in range(num_rows): - sz = 0 - shift = 0 - while True: - b = self.read_byte() - sz += ((b & 0x7f) << shift) - if (b & 0x80) == 0: - break - shift += 7 - x = self.read_bytes(sz) - if null_map and null_map[ix]: - app(null_obj) - elif encoding: - try: - app(x.decode(encoding)) - except UnicodeDecodeError: - app(x.hex()) - else: - app(x) - return column - - def read_bytes_col(self, sz: int, num_rows: int) -> Iterable[bytes]: - source = self.read_bytes(sz * num_rows) - return [bytes(source[x:x+sz]) for x in range(0, sz * num_rows, sz)] - - def read_fixed_str_col(self, sz: int, num_rows: int, encoding: str) -> Iterable[str]: - source = self.read_bytes(sz * num_rows) - column = [] - app = column.append - for ix in range(0, sz * num_rows, sz): - try: - app(str(source[ix: ix + sz], encoding).rstrip('\x00')) - except UnicodeDecodeError: - app(source[ix: ix + sz].hex()) - return column - - def read_array(self, array_type: str, num_rows: int) -> Iterable[Any]: - column = array.array(array_type) - sz = column.itemsize * num_rows - b = self.read_bytes(sz) - column.frombytes(b) - if must_swap: - column.byteswap() - return column - - @property - def last_message(self): - if len(self.buffer) == 0: - return None - return self.buffer.decode() - - def close(self): - if self.source: - self.source.close() - self.source = None diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/typing/tensor/embedding/tensorflow.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/typing/tensor/embedding/tensorflow.py deleted file mode 100644 index a5e408d60559e3f04abfd2bae9eb20952a170896..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/typing/tensor/embedding/tensorflow.py +++ /dev/null @@ -1,19 +0,0 @@ -from typing import Any # noqa: F401 - -from docarray.typing.proto_register import _register_proto -from docarray.typing.tensor.embedding.embedding_mixin import EmbeddingMixin -from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor - -tensorflow_base = type(TensorFlowTensor) # type: Any -embedding_base = type(EmbeddingMixin) # type: Any - - -class metaTensorFlowAndEmbedding(tensorflow_base, embedding_base): - pass - - -@_register_proto(proto_type_name='tensorflow_embedding') -class TensorFlowEmbedding( - TensorFlowTensor, EmbeddingMixin, metaclass=metaTensorFlowAndEmbedding -): - alternative_type = TensorFlowTensor diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/runner/base_module.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/runner/base_module.py deleted file mode 100644 index 617fad9bb89f10a9a0911d962dfb3bc8f3a3628c..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/runner/base_module.py +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import warnings -from abc import ABCMeta -from collections import defaultdict -from logging import FileHandler - -import torch.nn as nn - -from annotator.uniformer.mmcv.runner.dist_utils import master_only -from annotator.uniformer.mmcv.utils.logging import get_logger, logger_initialized, print_log - - -class BaseModule(nn.Module, metaclass=ABCMeta): - """Base module for all modules in openmmlab. - - ``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional - functionality of parameter initialization. Compared with - ``torch.nn.Module``, ``BaseModule`` mainly adds three attributes. - - - ``init_cfg``: the config to control the initialization. - - ``init_weights``: The function of parameter - initialization and recording initialization - information. - - ``_params_init_info``: Used to track the parameter - initialization information. This attribute only - exists during executing the ``init_weights``. - - Args: - init_cfg (dict, optional): Initialization config dict. - """ - - def __init__(self, init_cfg=None): - """Initialize BaseModule, inherited from `torch.nn.Module`""" - - # NOTE init_cfg can be defined in different levels, but init_cfg - # in low levels has a higher priority. - - super(BaseModule, self).__init__() - # define default value of init_cfg instead of hard code - # in init_weights() function - self._is_init = False - - self.init_cfg = copy.deepcopy(init_cfg) - - # Backward compatibility in derived classes - # if pretrained is not None: - # warnings.warn('DeprecationWarning: pretrained is a deprecated \ - # key, please consider using init_cfg') - # self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) - - @property - def is_init(self): - return self._is_init - - def init_weights(self): - """Initialize the weights.""" - - is_top_level_module = False - # check if it is top-level module - if not hasattr(self, '_params_init_info'): - # The `_params_init_info` is used to record the initialization - # information of the parameters - # the key should be the obj:`nn.Parameter` of model and the value - # should be a dict containing - # - init_info (str): The string that describes the initialization. - # - tmp_mean_value (FloatTensor): The mean of the parameter, - # which indicates whether the parameter has been modified. - # this attribute would be deleted after all parameters - # is initialized. - self._params_init_info = defaultdict(dict) - is_top_level_module = True - - # Initialize the `_params_init_info`, - # When detecting the `tmp_mean_value` of - # the corresponding parameter is changed, update related - # initialization information - for name, param in self.named_parameters(): - self._params_init_info[param][ - 'init_info'] = f'The value is the same before and ' \ - f'after calling `init_weights` ' \ - f'of {self.__class__.__name__} ' - self._params_init_info[param][ - 'tmp_mean_value'] = param.data.mean() - - # pass `params_init_info` to all submodules - # All submodules share the same `params_init_info`, - # so it will be updated when parameters are - # modified at any level of the model. - for sub_module in self.modules(): - sub_module._params_init_info = self._params_init_info - - # Get the initialized logger, if not exist, - # create a logger named `mmcv` - logger_names = list(logger_initialized.keys()) - logger_name = logger_names[0] if logger_names else 'mmcv' - - from ..cnn import initialize - from ..cnn.utils.weight_init import update_init_info - module_name = self.__class__.__name__ - if not self._is_init: - if self.init_cfg: - print_log( - f'initialize {module_name} with init_cfg {self.init_cfg}', - logger=logger_name) - initialize(self, self.init_cfg) - if isinstance(self.init_cfg, dict): - # prevent the parameters of - # the pre-trained model - # from being overwritten by - # the `init_weights` - if self.init_cfg['type'] == 'Pretrained': - return - - for m in self.children(): - if hasattr(m, 'init_weights'): - m.init_weights() - # users may overload the `init_weights` - update_init_info( - m, - init_info=f'Initialized by ' - f'user-defined `init_weights`' - f' in {m.__class__.__name__} ') - - self._is_init = True - else: - warnings.warn(f'init_weights of {self.__class__.__name__} has ' - f'been called more than once.') - - if is_top_level_module: - self._dump_init_info(logger_name) - - for sub_module in self.modules(): - del sub_module._params_init_info - - @master_only - def _dump_init_info(self, logger_name): - """Dump the initialization information to a file named - `initialization.log.json` in workdir. - - Args: - logger_name (str): The name of logger. - """ - - logger = get_logger(logger_name) - - with_file_handler = False - # dump the information to the logger file if there is a `FileHandler` - for handler in logger.handlers: - if isinstance(handler, FileHandler): - handler.stream.write( - 'Name of parameter - Initialization information\n') - for name, param in self.named_parameters(): - handler.stream.write( - f'\n{name} - {param.shape}: ' - f"\n{self._params_init_info[param]['init_info']} \n") - handler.stream.flush() - with_file_handler = True - if not with_file_handler: - for name, param in self.named_parameters(): - print_log( - f'\n{name} - {param.shape}: ' - f"\n{self._params_init_info[param]['init_info']} \n ", - logger=logger_name) - - def __repr__(self): - s = super().__repr__() - if self.init_cfg: - s += f'\ninit_cfg={self.init_cfg}' - return s - - -class Sequential(BaseModule, nn.Sequential): - """Sequential module in openmmlab. - - Args: - init_cfg (dict, optional): Initialization config dict. - """ - - def __init__(self, *args, init_cfg=None): - BaseModule.__init__(self, init_cfg) - nn.Sequential.__init__(self, *args) - - -class ModuleList(BaseModule, nn.ModuleList): - """ModuleList in openmmlab. - - Args: - modules (iterable, optional): an iterable of modules to add. - init_cfg (dict, optional): Initialization config dict. - """ - - def __init__(self, modules=None, init_cfg=None): - BaseModule.__init__(self, init_cfg) - nn.ModuleList.__init__(self, modules) diff --git a/spaces/Superlang/ImageProcessor/annotator/zoe/zoedepth/utils/arg_utils.py b/spaces/Superlang/ImageProcessor/annotator/zoe/zoedepth/utils/arg_utils.py deleted file mode 100644 index 8a3004ec3679c0a40fd8961253733fb4343ad545..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/zoe/zoedepth/utils/arg_utils.py +++ /dev/null @@ -1,33 +0,0 @@ - - -def infer_type(x): # hacky way to infer type from string args - if not isinstance(x, str): - return x - - try: - x = int(x) - return x - except ValueError: - pass - - try: - x = float(x) - return x - except ValueError: - pass - - return x - - -def parse_unknown(unknown_args): - clean = [] - for a in unknown_args: - if "=" in a: - k, v = a.split("=") - clean.extend([k, v]) - else: - clean.append(a) - - keys = clean[::2] - values = clean[1::2] - return {k.replace("--", ""): infer_type(v) for k, v in zip(keys, values)} diff --git a/spaces/Sybghat/resume-parser/ResumeReader.py b/spaces/Sybghat/resume-parser/ResumeReader.py deleted file mode 100644 index 7f8808b1a559624394fc43907031abb5fc6e1fc2..0000000000000000000000000000000000000000 --- a/spaces/Sybghat/resume-parser/ResumeReader.py +++ /dev/null @@ -1,103 +0,0 @@ -import re -import os -import logging -import pdfplumber -import fitz - -class ResumeReader: - - def convert_docx_to_txt(self, docx_file,docx_parser): - """ - A utility function to convert a Microsoft docx files to raw text. - - This code is largely borrowed from existing solutions, and does not match the style of the rest of this repo. - :param docx_file: docx file with gets uploaded by the user - :type docx_file: InMemoryUploadedFile - :return: The text contents of the docx file - :rtype: str - """ - - # doc = docx.Document(docx_file) - # allText = [] - # for docpara in doc.paragraphs: - # allText.append(docpara.text) - # text = ' '.join(allText) - text = "" - try: - clean_text = re.sub(r'\n+', '\n', text) - clean_text = clean_text.replace("\r", "\n").replace("\t", " ") # Normalize text blob - resume_lines = clean_text.splitlines() # Split text blob into individual lines - resume_lines = [re.sub('\s+', ' ', line.strip()) for line in resume_lines if - line.strip()] # Remove empty strings and whitespaces - return resume_lines, text - except Exception as e: - logging.error('Error in docx file:: ' + str(e)) - return [], " " - - def convert_pdf_to_txt(self, pdf_file): - """ - A utility function to convert a machine-readable PDF to raw text. - - This code is largely borrowed from existing solutions, and does not match the style of the rest of this repo. - :param input_pdf_path: Path to the .pdf file which should be converted - :type input_pdf_path: str - :return: The text contents of the pdf - :rtype: str - """ - - pdf = pdfplumber.open(pdf_file) - raw_text= "" - with fitz.open(pdf_file) as doc: - for page in doc: - raw_text += page.get_text() - print(raw_text) - # for page in pdf.pages: - # raw_text += page.extract_text() + "\n" - - pdf.close() - - try: - full_string = re.sub(r'\n+', '\n', raw_text) - full_string = full_string.replace("\r", "\n") - full_string = full_string.replace("\t", " ") - - # Remove awkward LaTeX bullet characters - full_string = re.sub(r"\uf0b7", " ", full_string) - full_string = re.sub(r"\(cid:\d{0,3}\)", " ", full_string) - full_string = re.sub(r'• ', " ", full_string) - - # Split text blob into individual lines - resume_lines = full_string.splitlines(True) - - # Remove empty strings and whitespaces - resume_lines = [re.sub('\s+', ' ', line.strip()) for line in resume_lines if line.strip()] - - return resume_lines, raw_text - except Exception as e: - logging.error('Error in docx file:: ' + str(e)) - return [], " " - - def read_file(self, file,docx_parser = "tika"): - """ - file : Give path of resume file - docx_parser : Enter docx2txt or tika, by default is tika - """ - print("Reading the Resume...") - # file = "/content/Asst Manager Trust Administration.docx" - file = os.path.join(file) - if file.endswith('docx') or file.endswith('doc'): - # if file.endswith('doc') and docx_parser == "docx2txt": - # docx_parser = "tika" - # logging.error("doc format not supported by the docx2txt changing back to tika") - resume_lines, raw_text = self.convert_docx_to_txt(file,docx_parser) - elif file.endswith('pdf'): - resume_lines, raw_text = self.convert_pdf_to_txt(file) - elif file.endswith('txt'): - with open(file, 'r', encoding='utf-8') as f: - resume_lines = f.readlines() - - else: - resume_lines = None - - - return resume_lines \ No newline at end of file diff --git a/spaces/TEnngal/bingo/src/components/ui/button.tsx b/spaces/TEnngal/bingo/src/components/ui/button.tsx deleted file mode 100644 index 281da005124fa94c89a9a9db7605748a92b60865..0000000000000000000000000000000000000000 --- a/spaces/TEnngal/bingo/src/components/ui/button.tsx +++ /dev/null @@ -1,57 +0,0 @@ -import * as React from 'react' -import { Slot } from '@radix-ui/react-slot' -import { cva, type VariantProps } from 'class-variance-authority' - -import { cn } from '@/lib/utils' - -const buttonVariants = cva( - 'inline-flex items-center justify-center rounded-md text-sm font-medium shadow ring-offset-background transition-colors outline-none disabled:pointer-events-none disabled:opacity-50', - { - variants: { - variant: { - default: - 'bg-primary text-primary-foreground shadow-md hover:bg-primary/90', - destructive: - 'bg-destructive text-destructive-foreground hover:bg-destructive/90', - outline: - 'border border-input hover:bg-accent hover:text-accent-foreground', - secondary: - 'bg-secondary text-secondary-foreground hover:bg-secondary/80', - ghost: 'shadow-none hover:bg-accent hover:text-accent-foreground', - link: 'text-primary underline-offset-4 shadow-none hover:underline' - }, - size: { - default: 'h-8 px-4 py-2', - sm: 'h-8 rounded-md px-3', - lg: 'h-11 rounded-md px-8', - icon: 'h-8 w-8 p-0' - } - }, - defaultVariants: { - variant: 'default', - size: 'default' - } - } -) - -export interface ButtonProps - extends React.ButtonHTMLAttributes, - VariantProps { - asChild?: boolean -} - -const Button = React.forwardRef( - ({ className, variant, size, asChild = false, ...props }, ref) => { - const Comp = asChild ? Slot : 'button' - return ( - - ) - } -) -Button.displayName = 'Button' - -export { Button, buttonVariants } diff --git a/spaces/TEnngal/bingo/src/lib/bots/bing/tts.ts b/spaces/TEnngal/bingo/src/lib/bots/bing/tts.ts deleted file mode 100644 index cd10b7d1d7581bf9cf46ff6755fcca550c558c9b..0000000000000000000000000000000000000000 --- a/spaces/TEnngal/bingo/src/lib/bots/bing/tts.ts +++ /dev/null @@ -1,82 +0,0 @@ -import { sleep } from './utils' - -const synth = window.speechSynthesis - -export class TTS { - currentText = '' - speakText = '' - private controller = new AbortController() - speaking = false - get isSpeaking() { - return this.speaking - } - finished = false - constructor() {} - abort = () => { - this.controller.abort() - } - - reset = () => { - this.speaking = false - this.finished = true - this.currentText = '' - this.speakText = '' - this.abort() - } - - speak = (text: string) => { - if (!synth || text?.trim()?.length < 2) { - return - } - this.currentText = text.replace(/[^\u4e00-\u9fa5_a-zA-Z0-9,。?,:;\.,:]+/g, '') - this.finished = false - this.loop() - } - - private async doSpeek() { - return new Promise((resolve) => { - const endIndex = this.finished ? this.currentText.length : - Math.max( - this.currentText.lastIndexOf('。'), - this.currentText.lastIndexOf(';'), - this.currentText.lastIndexOf('、'), - this.currentText.lastIndexOf('?'), - this.currentText.lastIndexOf('\n') - ) - const startIndex = this.speakText.length ? Math.max(0, this.currentText.lastIndexOf(this.speakText) + this.speakText.length) : 0 - - if (startIndex >= endIndex) { - return resolve(true) - } - const text = this.currentText.slice(startIndex, endIndex) - this.speakText = text - const utterThis = new SpeechSynthesisUtterance(text) - this.controller.signal.onabort = () => { - synth.cancel() - this.finished = true - resolve(false) - } - - utterThis.onend = function (event) { - resolve(true) - } - - utterThis.onerror = function (event) { - resolve(false) - } - - const voice = synth.getVoices().find(v => v.name.includes('Microsoft Yunxi Online')) ?? null - utterThis.voice = voice - synth.speak(utterThis) - }) - } - - private async loop() { - if (this.speaking) return - this.speaking = true - while(!this.finished) { - await Promise.all([sleep(1000), this.doSpeek()]) - } - this.speaking = false - } -} diff --git a/spaces/TMojo/FoodVision_Mini/app.py b/spaces/TMojo/FoodVision_Mini/app.py deleted file mode 100644 index 579a73600771217604aaab1e3006243d2e553d92..0000000000000000000000000000000000000000 --- a/spaces/TMojo/FoodVision_Mini/app.py +++ /dev/null @@ -1,72 +0,0 @@ -### 1. Imports and class names setup ### -import gradio as gr -import os -import torch - -from model import create_effnetb2_model -from timeit import default_timer as timer -from typing import Tuple, Dict - -# Setup class names -class_names = ['pizza', 'steak', 'sushi'] - -### 2. Model and transforms preparation ### -effnetb2, effnetb2_transforms = create_effnetb2_model( - num_classes=len(class_names)) - -# Load saved weights -effnetb2.load_state_dict( - torch.load( - f="09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi.pth", - map_location=torch.device("cpu") - ) -) - -### 3. Predict Function ### - -def predict(img) -> Tuple[Dict, float]: - """Transforms and performs a prediction on img and returns prediction and time taken. - """ - # Start the timer - start_time = timer() - - # Transform the target image and add a batch dimension - img = effnetb2_transforms(img).unsqueeze(0) - - # Put model into evaluation mode and turn on inference mode - effnetb2.eval() - with torch.inference_mode(): - # Pass the transformed image through the model and turn the prediction logits into prediction probabilities - pred_probs = torch.softmax(effnetb2(img), dim=1) - - # Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter) - pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))} - - # Calculate the prediction time - pred_time = round(timer() - start_time, 5) - - # Return the prediction dictionary and prediction time - return pred_labels_and_probs, pred_time - -### 4. Gradio app ### - -# Create title, description and article -title = "FoodVision Mini🍕🥩🍥" -description = "An EfficientNetB2 feature extractor computer vision model to classify images as pizza, steak or sushi" -article = "Created at 09. [PyTorch Model Deployment](https://www.learnpytorch.io/09_pytorch_model_deployment/)." - -# Create example list -example_list = [["examples/" + example] for example in os.listdir("examples")] - -# Create the Gradio demo -demo = gr.Interface(fn=predict, - inputs=gr.Image(type="pil"), - outputs=[gr.Label(num_top_classes=3, label="Predictions"), - gr.Number(label="Prediction time (s)")], - examples=example_list, - title=title, - description=description, - article=article) - -# Launch the demo -demo.launch(debug=False) # Don't need share = true in Hugging Face Spaces diff --git a/spaces/TabooAM/What-game-you-should-play/README.md b/spaces/TabooAM/What-game-you-should-play/README.md deleted file mode 100644 index dc50adfd85c00a8960b31c86c6042ad766719ccf..0000000000000000000000000000000000000000 --- a/spaces/TabooAM/What-game-you-should-play/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: What Game You Should Play -emoji: 👁 -colorFrom: indigo -colorTo: yellow -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false -license: afl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/idna/uts46data.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/idna/uts46data.py deleted file mode 100644 index 186796c17b25c1e766112ef4d9f16bb2dea4b306..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/idna/uts46data.py +++ /dev/null @@ -1,8600 +0,0 @@ -# This file is automatically generated by tools/idna-data -# vim: set fileencoding=utf-8 : - -from typing import List, Tuple, Union - - -"""IDNA Mapping Table from UTS46.""" - - -__version__ = '15.0.0' -def _seg_0() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x0, '3'), - (0x1, '3'), - (0x2, '3'), - (0x3, '3'), - (0x4, '3'), - (0x5, '3'), - (0x6, '3'), - (0x7, '3'), - (0x8, '3'), - (0x9, '3'), - (0xA, '3'), - (0xB, '3'), - (0xC, '3'), - (0xD, '3'), - (0xE, '3'), - (0xF, '3'), - (0x10, '3'), - (0x11, '3'), - (0x12, '3'), - (0x13, '3'), - (0x14, '3'), - (0x15, '3'), - (0x16, '3'), - (0x17, '3'), - (0x18, '3'), - (0x19, '3'), - (0x1A, '3'), - (0x1B, '3'), - (0x1C, '3'), - (0x1D, '3'), - (0x1E, '3'), - (0x1F, '3'), - (0x20, '3'), - (0x21, '3'), - (0x22, '3'), - (0x23, '3'), - (0x24, '3'), - (0x25, '3'), - (0x26, '3'), - (0x27, '3'), - (0x28, '3'), - (0x29, '3'), - (0x2A, '3'), - (0x2B, '3'), - (0x2C, '3'), - (0x2D, 'V'), - (0x2E, 'V'), - (0x2F, '3'), - (0x30, 'V'), - (0x31, 'V'), - (0x32, 'V'), - (0x33, 'V'), - (0x34, 'V'), - (0x35, 'V'), - (0x36, 'V'), - (0x37, 'V'), - (0x38, 'V'), - (0x39, 'V'), - (0x3A, '3'), - (0x3B, '3'), - (0x3C, '3'), - (0x3D, '3'), - (0x3E, '3'), - (0x3F, '3'), - (0x40, '3'), - (0x41, 'M', 'a'), - (0x42, 'M', 'b'), - (0x43, 'M', 'c'), - (0x44, 'M', 'd'), - (0x45, 'M', 'e'), - (0x46, 'M', 'f'), - (0x47, 'M', 'g'), - (0x48, 'M', 'h'), - (0x49, 'M', 'i'), - (0x4A, 'M', 'j'), - (0x4B, 'M', 'k'), - (0x4C, 'M', 'l'), - (0x4D, 'M', 'm'), - (0x4E, 'M', 'n'), - (0x4F, 'M', 'o'), - (0x50, 'M', 'p'), - (0x51, 'M', 'q'), - (0x52, 'M', 'r'), - (0x53, 'M', 's'), - (0x54, 'M', 't'), - (0x55, 'M', 'u'), - (0x56, 'M', 'v'), - (0x57, 'M', 'w'), - (0x58, 'M', 'x'), - (0x59, 'M', 'y'), - (0x5A, 'M', 'z'), - (0x5B, '3'), - (0x5C, '3'), - (0x5D, '3'), - (0x5E, '3'), - (0x5F, '3'), - (0x60, '3'), - (0x61, 'V'), - (0x62, 'V'), - (0x63, 'V'), - ] - -def _seg_1() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x64, 'V'), - (0x65, 'V'), - (0x66, 'V'), - (0x67, 'V'), - (0x68, 'V'), - (0x69, 'V'), - (0x6A, 'V'), - (0x6B, 'V'), - (0x6C, 'V'), - (0x6D, 'V'), - (0x6E, 'V'), - (0x6F, 'V'), - (0x70, 'V'), - (0x71, 'V'), - (0x72, 'V'), - (0x73, 'V'), - (0x74, 'V'), - (0x75, 'V'), - (0x76, 'V'), - (0x77, 'V'), - (0x78, 'V'), - (0x79, 'V'), - (0x7A, 'V'), - (0x7B, '3'), - (0x7C, '3'), - (0x7D, '3'), - (0x7E, '3'), - (0x7F, '3'), - (0x80, 'X'), - (0x81, 'X'), - (0x82, 'X'), - (0x83, 'X'), - (0x84, 'X'), - (0x85, 'X'), - (0x86, 'X'), - (0x87, 'X'), - (0x88, 'X'), - (0x89, 'X'), - (0x8A, 'X'), - (0x8B, 'X'), - (0x8C, 'X'), - (0x8D, 'X'), - (0x8E, 'X'), - (0x8F, 'X'), - (0x90, 'X'), - (0x91, 'X'), - (0x92, 'X'), - (0x93, 'X'), - (0x94, 'X'), - (0x95, 'X'), - (0x96, 'X'), - (0x97, 'X'), - (0x98, 'X'), - (0x99, 'X'), - (0x9A, 'X'), - (0x9B, 'X'), - (0x9C, 'X'), - (0x9D, 'X'), - (0x9E, 'X'), - (0x9F, 'X'), - (0xA0, '3', ' '), - (0xA1, 'V'), - (0xA2, 'V'), - (0xA3, 'V'), - (0xA4, 'V'), - (0xA5, 'V'), - (0xA6, 'V'), - (0xA7, 'V'), - (0xA8, '3', ' ̈'), - (0xA9, 'V'), - (0xAA, 'M', 'a'), - (0xAB, 'V'), - (0xAC, 'V'), - (0xAD, 'I'), - (0xAE, 'V'), - (0xAF, '3', ' ̄'), - (0xB0, 'V'), - (0xB1, 'V'), - (0xB2, 'M', '2'), - (0xB3, 'M', '3'), - (0xB4, '3', ' ́'), - (0xB5, 'M', 'μ'), - (0xB6, 'V'), - (0xB7, 'V'), - (0xB8, '3', ' ̧'), - (0xB9, 'M', '1'), - (0xBA, 'M', 'o'), - (0xBB, 'V'), - (0xBC, 'M', '1⁄4'), - (0xBD, 'M', '1⁄2'), - (0xBE, 'M', '3⁄4'), - (0xBF, 'V'), - (0xC0, 'M', 'à'), - (0xC1, 'M', 'á'), - (0xC2, 'M', 'â'), - (0xC3, 'M', 'ã'), - (0xC4, 'M', 'ä'), - (0xC5, 'M', 'å'), - (0xC6, 'M', 'æ'), - (0xC7, 'M', 'ç'), - ] - -def _seg_2() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xC8, 'M', 'è'), - (0xC9, 'M', 'é'), - (0xCA, 'M', 'ê'), - (0xCB, 'M', 'ë'), - (0xCC, 'M', 'ì'), - (0xCD, 'M', 'í'), - (0xCE, 'M', 'î'), - (0xCF, 'M', 'ï'), - (0xD0, 'M', 'ð'), - (0xD1, 'M', 'ñ'), - (0xD2, 'M', 'ò'), - (0xD3, 'M', 'ó'), - (0xD4, 'M', 'ô'), - (0xD5, 'M', 'õ'), - (0xD6, 'M', 'ö'), - (0xD7, 'V'), - (0xD8, 'M', 'ø'), - (0xD9, 'M', 'ù'), - (0xDA, 'M', 'ú'), - (0xDB, 'M', 'û'), - (0xDC, 'M', 'ü'), - (0xDD, 'M', 'ý'), - (0xDE, 'M', 'þ'), - (0xDF, 'D', 'ss'), - (0xE0, 'V'), - (0xE1, 'V'), - (0xE2, 'V'), - (0xE3, 'V'), - (0xE4, 'V'), - (0xE5, 'V'), - (0xE6, 'V'), - (0xE7, 'V'), - (0xE8, 'V'), - (0xE9, 'V'), - (0xEA, 'V'), - (0xEB, 'V'), - (0xEC, 'V'), - (0xED, 'V'), - (0xEE, 'V'), - (0xEF, 'V'), - (0xF0, 'V'), - (0xF1, 'V'), - (0xF2, 'V'), - (0xF3, 'V'), - (0xF4, 'V'), - (0xF5, 'V'), - (0xF6, 'V'), - (0xF7, 'V'), - (0xF8, 'V'), - (0xF9, 'V'), - (0xFA, 'V'), - (0xFB, 'V'), - (0xFC, 'V'), - (0xFD, 'V'), - (0xFE, 'V'), - (0xFF, 'V'), - (0x100, 'M', 'ā'), - (0x101, 'V'), - (0x102, 'M', 'ă'), - (0x103, 'V'), - (0x104, 'M', 'ą'), - (0x105, 'V'), - (0x106, 'M', 'ć'), - (0x107, 'V'), - (0x108, 'M', 'ĉ'), - (0x109, 'V'), - (0x10A, 'M', 'ċ'), - (0x10B, 'V'), - (0x10C, 'M', 'č'), - (0x10D, 'V'), - (0x10E, 'M', 'ď'), - (0x10F, 'V'), - (0x110, 'M', 'đ'), - (0x111, 'V'), - (0x112, 'M', 'ē'), - (0x113, 'V'), - (0x114, 'M', 'ĕ'), - (0x115, 'V'), - (0x116, 'M', 'ė'), - (0x117, 'V'), - (0x118, 'M', 'ę'), - (0x119, 'V'), - (0x11A, 'M', 'ě'), - (0x11B, 'V'), - (0x11C, 'M', 'ĝ'), - (0x11D, 'V'), - (0x11E, 'M', 'ğ'), - (0x11F, 'V'), - (0x120, 'M', 'ġ'), - (0x121, 'V'), - (0x122, 'M', 'ģ'), - (0x123, 'V'), - (0x124, 'M', 'ĥ'), - (0x125, 'V'), - (0x126, 'M', 'ħ'), - (0x127, 'V'), - (0x128, 'M', 'ĩ'), - (0x129, 'V'), - (0x12A, 'M', 'ī'), - (0x12B, 'V'), - ] - -def _seg_3() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x12C, 'M', 'ĭ'), - (0x12D, 'V'), - (0x12E, 'M', 'į'), - (0x12F, 'V'), - (0x130, 'M', 'i̇'), - (0x131, 'V'), - (0x132, 'M', 'ij'), - (0x134, 'M', 'ĵ'), - (0x135, 'V'), - (0x136, 'M', 'ķ'), - (0x137, 'V'), - (0x139, 'M', 'ĺ'), - (0x13A, 'V'), - (0x13B, 'M', 'ļ'), - (0x13C, 'V'), - (0x13D, 'M', 'ľ'), - (0x13E, 'V'), - (0x13F, 'M', 'l·'), - (0x141, 'M', 'ł'), - (0x142, 'V'), - (0x143, 'M', 'ń'), - (0x144, 'V'), - (0x145, 'M', 'ņ'), - (0x146, 'V'), - (0x147, 'M', 'ň'), - (0x148, 'V'), - (0x149, 'M', 'ʼn'), - (0x14A, 'M', 'ŋ'), - (0x14B, 'V'), - (0x14C, 'M', 'ō'), - (0x14D, 'V'), - (0x14E, 'M', 'ŏ'), - (0x14F, 'V'), - (0x150, 'M', 'ő'), - (0x151, 'V'), - (0x152, 'M', 'œ'), - (0x153, 'V'), - (0x154, 'M', 'ŕ'), - (0x155, 'V'), - (0x156, 'M', 'ŗ'), - (0x157, 'V'), - (0x158, 'M', 'ř'), - (0x159, 'V'), - (0x15A, 'M', 'ś'), - (0x15B, 'V'), - (0x15C, 'M', 'ŝ'), - (0x15D, 'V'), - (0x15E, 'M', 'ş'), - (0x15F, 'V'), - (0x160, 'M', 'š'), - (0x161, 'V'), - (0x162, 'M', 'ţ'), - (0x163, 'V'), - (0x164, 'M', 'ť'), - (0x165, 'V'), - (0x166, 'M', 'ŧ'), - (0x167, 'V'), - (0x168, 'M', 'ũ'), - (0x169, 'V'), - (0x16A, 'M', 'ū'), - (0x16B, 'V'), - (0x16C, 'M', 'ŭ'), - (0x16D, 'V'), - (0x16E, 'M', 'ů'), - (0x16F, 'V'), - (0x170, 'M', 'ű'), - (0x171, 'V'), - (0x172, 'M', 'ų'), - (0x173, 'V'), - (0x174, 'M', 'ŵ'), - (0x175, 'V'), - (0x176, 'M', 'ŷ'), - (0x177, 'V'), - (0x178, 'M', 'ÿ'), - (0x179, 'M', 'ź'), - (0x17A, 'V'), - (0x17B, 'M', 'ż'), - (0x17C, 'V'), - (0x17D, 'M', 'ž'), - (0x17E, 'V'), - (0x17F, 'M', 's'), - (0x180, 'V'), - (0x181, 'M', 'ɓ'), - (0x182, 'M', 'ƃ'), - (0x183, 'V'), - (0x184, 'M', 'ƅ'), - (0x185, 'V'), - (0x186, 'M', 'ɔ'), - (0x187, 'M', 'ƈ'), - (0x188, 'V'), - (0x189, 'M', 'ɖ'), - (0x18A, 'M', 'ɗ'), - (0x18B, 'M', 'ƌ'), - (0x18C, 'V'), - (0x18E, 'M', 'ǝ'), - (0x18F, 'M', 'ə'), - (0x190, 'M', 'ɛ'), - (0x191, 'M', 'ƒ'), - (0x192, 'V'), - (0x193, 'M', 'ɠ'), - ] - -def _seg_4() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x194, 'M', 'ɣ'), - (0x195, 'V'), - (0x196, 'M', 'ɩ'), - (0x197, 'M', 'ɨ'), - (0x198, 'M', 'ƙ'), - (0x199, 'V'), - (0x19C, 'M', 'ɯ'), - (0x19D, 'M', 'ɲ'), - (0x19E, 'V'), - (0x19F, 'M', 'ɵ'), - (0x1A0, 'M', 'ơ'), - (0x1A1, 'V'), - (0x1A2, 'M', 'ƣ'), - (0x1A3, 'V'), - (0x1A4, 'M', 'ƥ'), - (0x1A5, 'V'), - (0x1A6, 'M', 'ʀ'), - (0x1A7, 'M', 'ƨ'), - (0x1A8, 'V'), - (0x1A9, 'M', 'ʃ'), - (0x1AA, 'V'), - (0x1AC, 'M', 'ƭ'), - (0x1AD, 'V'), - (0x1AE, 'M', 'ʈ'), - (0x1AF, 'M', 'ư'), - (0x1B0, 'V'), - (0x1B1, 'M', 'ʊ'), - (0x1B2, 'M', 'ʋ'), - (0x1B3, 'M', 'ƴ'), - (0x1B4, 'V'), - (0x1B5, 'M', 'ƶ'), - (0x1B6, 'V'), - (0x1B7, 'M', 'ʒ'), - (0x1B8, 'M', 'ƹ'), - (0x1B9, 'V'), - (0x1BC, 'M', 'ƽ'), - (0x1BD, 'V'), - (0x1C4, 'M', 'dž'), - (0x1C7, 'M', 'lj'), - (0x1CA, 'M', 'nj'), - (0x1CD, 'M', 'ǎ'), - (0x1CE, 'V'), - (0x1CF, 'M', 'ǐ'), - (0x1D0, 'V'), - (0x1D1, 'M', 'ǒ'), - (0x1D2, 'V'), - (0x1D3, 'M', 'ǔ'), - (0x1D4, 'V'), - (0x1D5, 'M', 'ǖ'), - (0x1D6, 'V'), - (0x1D7, 'M', 'ǘ'), - (0x1D8, 'V'), - (0x1D9, 'M', 'ǚ'), - (0x1DA, 'V'), - (0x1DB, 'M', 'ǜ'), - (0x1DC, 'V'), - (0x1DE, 'M', 'ǟ'), - (0x1DF, 'V'), - (0x1E0, 'M', 'ǡ'), - (0x1E1, 'V'), - (0x1E2, 'M', 'ǣ'), - (0x1E3, 'V'), - (0x1E4, 'M', 'ǥ'), - (0x1E5, 'V'), - (0x1E6, 'M', 'ǧ'), - (0x1E7, 'V'), - (0x1E8, 'M', 'ǩ'), - (0x1E9, 'V'), - (0x1EA, 'M', 'ǫ'), - (0x1EB, 'V'), - (0x1EC, 'M', 'ǭ'), - (0x1ED, 'V'), - (0x1EE, 'M', 'ǯ'), - (0x1EF, 'V'), - (0x1F1, 'M', 'dz'), - (0x1F4, 'M', 'ǵ'), - (0x1F5, 'V'), - (0x1F6, 'M', 'ƕ'), - (0x1F7, 'M', 'ƿ'), - (0x1F8, 'M', 'ǹ'), - (0x1F9, 'V'), - (0x1FA, 'M', 'ǻ'), - (0x1FB, 'V'), - (0x1FC, 'M', 'ǽ'), - (0x1FD, 'V'), - (0x1FE, 'M', 'ǿ'), - (0x1FF, 'V'), - (0x200, 'M', 'ȁ'), - (0x201, 'V'), - (0x202, 'M', 'ȃ'), - (0x203, 'V'), - (0x204, 'M', 'ȅ'), - (0x205, 'V'), - (0x206, 'M', 'ȇ'), - (0x207, 'V'), - (0x208, 'M', 'ȉ'), - (0x209, 'V'), - (0x20A, 'M', 'ȋ'), - (0x20B, 'V'), - (0x20C, 'M', 'ȍ'), - ] - -def _seg_5() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x20D, 'V'), - (0x20E, 'M', 'ȏ'), - (0x20F, 'V'), - (0x210, 'M', 'ȑ'), - (0x211, 'V'), - (0x212, 'M', 'ȓ'), - (0x213, 'V'), - (0x214, 'M', 'ȕ'), - (0x215, 'V'), - (0x216, 'M', 'ȗ'), - (0x217, 'V'), - (0x218, 'M', 'ș'), - (0x219, 'V'), - (0x21A, 'M', 'ț'), - (0x21B, 'V'), - (0x21C, 'M', 'ȝ'), - (0x21D, 'V'), - (0x21E, 'M', 'ȟ'), - (0x21F, 'V'), - (0x220, 'M', 'ƞ'), - (0x221, 'V'), - (0x222, 'M', 'ȣ'), - (0x223, 'V'), - (0x224, 'M', 'ȥ'), - (0x225, 'V'), - (0x226, 'M', 'ȧ'), - (0x227, 'V'), - (0x228, 'M', 'ȩ'), - (0x229, 'V'), - (0x22A, 'M', 'ȫ'), - (0x22B, 'V'), - (0x22C, 'M', 'ȭ'), - (0x22D, 'V'), - (0x22E, 'M', 'ȯ'), - (0x22F, 'V'), - (0x230, 'M', 'ȱ'), - (0x231, 'V'), - (0x232, 'M', 'ȳ'), - (0x233, 'V'), - (0x23A, 'M', 'ⱥ'), - (0x23B, 'M', 'ȼ'), - (0x23C, 'V'), - (0x23D, 'M', 'ƚ'), - (0x23E, 'M', 'ⱦ'), - (0x23F, 'V'), - (0x241, 'M', 'ɂ'), - (0x242, 'V'), - (0x243, 'M', 'ƀ'), - (0x244, 'M', 'ʉ'), - (0x245, 'M', 'ʌ'), - (0x246, 'M', 'ɇ'), - (0x247, 'V'), - (0x248, 'M', 'ɉ'), - (0x249, 'V'), - (0x24A, 'M', 'ɋ'), - (0x24B, 'V'), - (0x24C, 'M', 'ɍ'), - (0x24D, 'V'), - (0x24E, 'M', 'ɏ'), - (0x24F, 'V'), - (0x2B0, 'M', 'h'), - (0x2B1, 'M', 'ɦ'), - (0x2B2, 'M', 'j'), - (0x2B3, 'M', 'r'), - (0x2B4, 'M', 'ɹ'), - (0x2B5, 'M', 'ɻ'), - (0x2B6, 'M', 'ʁ'), - (0x2B7, 'M', 'w'), - (0x2B8, 'M', 'y'), - (0x2B9, 'V'), - (0x2D8, '3', ' ̆'), - (0x2D9, '3', ' ̇'), - (0x2DA, '3', ' ̊'), - (0x2DB, '3', ' ̨'), - (0x2DC, '3', ' ̃'), - (0x2DD, '3', ' ̋'), - (0x2DE, 'V'), - (0x2E0, 'M', 'ɣ'), - (0x2E1, 'M', 'l'), - (0x2E2, 'M', 's'), - (0x2E3, 'M', 'x'), - (0x2E4, 'M', 'ʕ'), - (0x2E5, 'V'), - (0x340, 'M', '̀'), - (0x341, 'M', '́'), - (0x342, 'V'), - (0x343, 'M', '̓'), - (0x344, 'M', '̈́'), - (0x345, 'M', 'ι'), - (0x346, 'V'), - (0x34F, 'I'), - (0x350, 'V'), - (0x370, 'M', 'ͱ'), - (0x371, 'V'), - (0x372, 'M', 'ͳ'), - (0x373, 'V'), - (0x374, 'M', 'ʹ'), - (0x375, 'V'), - (0x376, 'M', 'ͷ'), - (0x377, 'V'), - ] - -def _seg_6() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x378, 'X'), - (0x37A, '3', ' ι'), - (0x37B, 'V'), - (0x37E, '3', ';'), - (0x37F, 'M', 'ϳ'), - (0x380, 'X'), - (0x384, '3', ' ́'), - (0x385, '3', ' ̈́'), - (0x386, 'M', 'ά'), - (0x387, 'M', '·'), - (0x388, 'M', 'έ'), - (0x389, 'M', 'ή'), - (0x38A, 'M', 'ί'), - (0x38B, 'X'), - (0x38C, 'M', 'ό'), - (0x38D, 'X'), - (0x38E, 'M', 'ύ'), - (0x38F, 'M', 'ώ'), - (0x390, 'V'), - (0x391, 'M', 'α'), - (0x392, 'M', 'β'), - (0x393, 'M', 'γ'), - (0x394, 'M', 'δ'), - (0x395, 'M', 'ε'), - (0x396, 'M', 'ζ'), - (0x397, 'M', 'η'), - (0x398, 'M', 'θ'), - (0x399, 'M', 'ι'), - (0x39A, 'M', 'κ'), - (0x39B, 'M', 'λ'), - (0x39C, 'M', 'μ'), - (0x39D, 'M', 'ν'), - (0x39E, 'M', 'ξ'), - (0x39F, 'M', 'ο'), - (0x3A0, 'M', 'π'), - (0x3A1, 'M', 'ρ'), - (0x3A2, 'X'), - (0x3A3, 'M', 'σ'), - (0x3A4, 'M', 'τ'), - (0x3A5, 'M', 'υ'), - (0x3A6, 'M', 'φ'), - (0x3A7, 'M', 'χ'), - (0x3A8, 'M', 'ψ'), - (0x3A9, 'M', 'ω'), - (0x3AA, 'M', 'ϊ'), - (0x3AB, 'M', 'ϋ'), - (0x3AC, 'V'), - (0x3C2, 'D', 'σ'), - (0x3C3, 'V'), - (0x3CF, 'M', 'ϗ'), - (0x3D0, 'M', 'β'), - (0x3D1, 'M', 'θ'), - (0x3D2, 'M', 'υ'), - (0x3D3, 'M', 'ύ'), - (0x3D4, 'M', 'ϋ'), - (0x3D5, 'M', 'φ'), - (0x3D6, 'M', 'π'), - (0x3D7, 'V'), - (0x3D8, 'M', 'ϙ'), - (0x3D9, 'V'), - (0x3DA, 'M', 'ϛ'), - (0x3DB, 'V'), - (0x3DC, 'M', 'ϝ'), - (0x3DD, 'V'), - (0x3DE, 'M', 'ϟ'), - (0x3DF, 'V'), - (0x3E0, 'M', 'ϡ'), - (0x3E1, 'V'), - (0x3E2, 'M', 'ϣ'), - (0x3E3, 'V'), - (0x3E4, 'M', 'ϥ'), - (0x3E5, 'V'), - (0x3E6, 'M', 'ϧ'), - (0x3E7, 'V'), - (0x3E8, 'M', 'ϩ'), - (0x3E9, 'V'), - (0x3EA, 'M', 'ϫ'), - (0x3EB, 'V'), - (0x3EC, 'M', 'ϭ'), - (0x3ED, 'V'), - (0x3EE, 'M', 'ϯ'), - (0x3EF, 'V'), - (0x3F0, 'M', 'κ'), - (0x3F1, 'M', 'ρ'), - (0x3F2, 'M', 'σ'), - (0x3F3, 'V'), - (0x3F4, 'M', 'θ'), - (0x3F5, 'M', 'ε'), - (0x3F6, 'V'), - (0x3F7, 'M', 'ϸ'), - (0x3F8, 'V'), - (0x3F9, 'M', 'σ'), - (0x3FA, 'M', 'ϻ'), - (0x3FB, 'V'), - (0x3FD, 'M', 'ͻ'), - (0x3FE, 'M', 'ͼ'), - (0x3FF, 'M', 'ͽ'), - (0x400, 'M', 'ѐ'), - (0x401, 'M', 'ё'), - (0x402, 'M', 'ђ'), - ] - -def _seg_7() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x403, 'M', 'ѓ'), - (0x404, 'M', 'є'), - (0x405, 'M', 'ѕ'), - (0x406, 'M', 'і'), - (0x407, 'M', 'ї'), - (0x408, 'M', 'ј'), - (0x409, 'M', 'љ'), - (0x40A, 'M', 'њ'), - (0x40B, 'M', 'ћ'), - (0x40C, 'M', 'ќ'), - (0x40D, 'M', 'ѝ'), - (0x40E, 'M', 'ў'), - (0x40F, 'M', 'џ'), - (0x410, 'M', 'а'), - (0x411, 'M', 'б'), - (0x412, 'M', 'в'), - (0x413, 'M', 'г'), - (0x414, 'M', 'д'), - (0x415, 'M', 'е'), - (0x416, 'M', 'ж'), - (0x417, 'M', 'з'), - (0x418, 'M', 'и'), - (0x419, 'M', 'й'), - (0x41A, 'M', 'к'), - (0x41B, 'M', 'л'), - (0x41C, 'M', 'м'), - (0x41D, 'M', 'н'), - (0x41E, 'M', 'о'), - (0x41F, 'M', 'п'), - (0x420, 'M', 'р'), - (0x421, 'M', 'с'), - (0x422, 'M', 'т'), - (0x423, 'M', 'у'), - (0x424, 'M', 'ф'), - (0x425, 'M', 'х'), - (0x426, 'M', 'ц'), - (0x427, 'M', 'ч'), - (0x428, 'M', 'ш'), - (0x429, 'M', 'щ'), - (0x42A, 'M', 'ъ'), - (0x42B, 'M', 'ы'), - (0x42C, 'M', 'ь'), - (0x42D, 'M', 'э'), - (0x42E, 'M', 'ю'), - (0x42F, 'M', 'я'), - (0x430, 'V'), - (0x460, 'M', 'ѡ'), - (0x461, 'V'), - (0x462, 'M', 'ѣ'), - (0x463, 'V'), - (0x464, 'M', 'ѥ'), - (0x465, 'V'), - (0x466, 'M', 'ѧ'), - (0x467, 'V'), - (0x468, 'M', 'ѩ'), - (0x469, 'V'), - (0x46A, 'M', 'ѫ'), - (0x46B, 'V'), - (0x46C, 'M', 'ѭ'), - (0x46D, 'V'), - (0x46E, 'M', 'ѯ'), - (0x46F, 'V'), - (0x470, 'M', 'ѱ'), - (0x471, 'V'), - (0x472, 'M', 'ѳ'), - (0x473, 'V'), - (0x474, 'M', 'ѵ'), - (0x475, 'V'), - (0x476, 'M', 'ѷ'), - (0x477, 'V'), - (0x478, 'M', 'ѹ'), - (0x479, 'V'), - (0x47A, 'M', 'ѻ'), - (0x47B, 'V'), - (0x47C, 'M', 'ѽ'), - (0x47D, 'V'), - (0x47E, 'M', 'ѿ'), - (0x47F, 'V'), - (0x480, 'M', 'ҁ'), - (0x481, 'V'), - (0x48A, 'M', 'ҋ'), - (0x48B, 'V'), - (0x48C, 'M', 'ҍ'), - (0x48D, 'V'), - (0x48E, 'M', 'ҏ'), - (0x48F, 'V'), - (0x490, 'M', 'ґ'), - (0x491, 'V'), - (0x492, 'M', 'ғ'), - (0x493, 'V'), - (0x494, 'M', 'ҕ'), - (0x495, 'V'), - (0x496, 'M', 'җ'), - (0x497, 'V'), - (0x498, 'M', 'ҙ'), - (0x499, 'V'), - (0x49A, 'M', 'қ'), - (0x49B, 'V'), - (0x49C, 'M', 'ҝ'), - (0x49D, 'V'), - ] - -def _seg_8() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x49E, 'M', 'ҟ'), - (0x49F, 'V'), - (0x4A0, 'M', 'ҡ'), - (0x4A1, 'V'), - (0x4A2, 'M', 'ң'), - (0x4A3, 'V'), - (0x4A4, 'M', 'ҥ'), - (0x4A5, 'V'), - (0x4A6, 'M', 'ҧ'), - (0x4A7, 'V'), - (0x4A8, 'M', 'ҩ'), - (0x4A9, 'V'), - (0x4AA, 'M', 'ҫ'), - (0x4AB, 'V'), - (0x4AC, 'M', 'ҭ'), - (0x4AD, 'V'), - (0x4AE, 'M', 'ү'), - (0x4AF, 'V'), - (0x4B0, 'M', 'ұ'), - (0x4B1, 'V'), - (0x4B2, 'M', 'ҳ'), - (0x4B3, 'V'), - (0x4B4, 'M', 'ҵ'), - (0x4B5, 'V'), - (0x4B6, 'M', 'ҷ'), - (0x4B7, 'V'), - (0x4B8, 'M', 'ҹ'), - (0x4B9, 'V'), - (0x4BA, 'M', 'һ'), - (0x4BB, 'V'), - (0x4BC, 'M', 'ҽ'), - (0x4BD, 'V'), - (0x4BE, 'M', 'ҿ'), - (0x4BF, 'V'), - (0x4C0, 'X'), - (0x4C1, 'M', 'ӂ'), - (0x4C2, 'V'), - (0x4C3, 'M', 'ӄ'), - (0x4C4, 'V'), - (0x4C5, 'M', 'ӆ'), - (0x4C6, 'V'), - (0x4C7, 'M', 'ӈ'), - (0x4C8, 'V'), - (0x4C9, 'M', 'ӊ'), - (0x4CA, 'V'), - (0x4CB, 'M', 'ӌ'), - (0x4CC, 'V'), - (0x4CD, 'M', 'ӎ'), - (0x4CE, 'V'), - (0x4D0, 'M', 'ӑ'), - (0x4D1, 'V'), - (0x4D2, 'M', 'ӓ'), - (0x4D3, 'V'), - (0x4D4, 'M', 'ӕ'), - (0x4D5, 'V'), - (0x4D6, 'M', 'ӗ'), - (0x4D7, 'V'), - (0x4D8, 'M', 'ә'), - (0x4D9, 'V'), - (0x4DA, 'M', 'ӛ'), - (0x4DB, 'V'), - (0x4DC, 'M', 'ӝ'), - (0x4DD, 'V'), - (0x4DE, 'M', 'ӟ'), - (0x4DF, 'V'), - (0x4E0, 'M', 'ӡ'), - (0x4E1, 'V'), - (0x4E2, 'M', 'ӣ'), - (0x4E3, 'V'), - (0x4E4, 'M', 'ӥ'), - (0x4E5, 'V'), - (0x4E6, 'M', 'ӧ'), - (0x4E7, 'V'), - (0x4E8, 'M', 'ө'), - (0x4E9, 'V'), - (0x4EA, 'M', 'ӫ'), - (0x4EB, 'V'), - (0x4EC, 'M', 'ӭ'), - (0x4ED, 'V'), - (0x4EE, 'M', 'ӯ'), - (0x4EF, 'V'), - (0x4F0, 'M', 'ӱ'), - (0x4F1, 'V'), - (0x4F2, 'M', 'ӳ'), - (0x4F3, 'V'), - (0x4F4, 'M', 'ӵ'), - (0x4F5, 'V'), - (0x4F6, 'M', 'ӷ'), - (0x4F7, 'V'), - (0x4F8, 'M', 'ӹ'), - (0x4F9, 'V'), - (0x4FA, 'M', 'ӻ'), - (0x4FB, 'V'), - (0x4FC, 'M', 'ӽ'), - (0x4FD, 'V'), - (0x4FE, 'M', 'ӿ'), - (0x4FF, 'V'), - (0x500, 'M', 'ԁ'), - (0x501, 'V'), - (0x502, 'M', 'ԃ'), - ] - -def _seg_9() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x503, 'V'), - (0x504, 'M', 'ԅ'), - (0x505, 'V'), - (0x506, 'M', 'ԇ'), - (0x507, 'V'), - (0x508, 'M', 'ԉ'), - (0x509, 'V'), - (0x50A, 'M', 'ԋ'), - (0x50B, 'V'), - (0x50C, 'M', 'ԍ'), - (0x50D, 'V'), - (0x50E, 'M', 'ԏ'), - (0x50F, 'V'), - (0x510, 'M', 'ԑ'), - (0x511, 'V'), - (0x512, 'M', 'ԓ'), - (0x513, 'V'), - (0x514, 'M', 'ԕ'), - (0x515, 'V'), - (0x516, 'M', 'ԗ'), - (0x517, 'V'), - (0x518, 'M', 'ԙ'), - (0x519, 'V'), - (0x51A, 'M', 'ԛ'), - (0x51B, 'V'), - (0x51C, 'M', 'ԝ'), - (0x51D, 'V'), - (0x51E, 'M', 'ԟ'), - (0x51F, 'V'), - (0x520, 'M', 'ԡ'), - (0x521, 'V'), - (0x522, 'M', 'ԣ'), - (0x523, 'V'), - (0x524, 'M', 'ԥ'), - (0x525, 'V'), - (0x526, 'M', 'ԧ'), - (0x527, 'V'), - (0x528, 'M', 'ԩ'), - (0x529, 'V'), - (0x52A, 'M', 'ԫ'), - (0x52B, 'V'), - (0x52C, 'M', 'ԭ'), - (0x52D, 'V'), - (0x52E, 'M', 'ԯ'), - (0x52F, 'V'), - (0x530, 'X'), - (0x531, 'M', 'ա'), - (0x532, 'M', 'բ'), - (0x533, 'M', 'գ'), - (0x534, 'M', 'դ'), - (0x535, 'M', 'ե'), - (0x536, 'M', 'զ'), - (0x537, 'M', 'է'), - (0x538, 'M', 'ը'), - (0x539, 'M', 'թ'), - (0x53A, 'M', 'ժ'), - (0x53B, 'M', 'ի'), - (0x53C, 'M', 'լ'), - (0x53D, 'M', 'խ'), - (0x53E, 'M', 'ծ'), - (0x53F, 'M', 'կ'), - (0x540, 'M', 'հ'), - (0x541, 'M', 'ձ'), - (0x542, 'M', 'ղ'), - (0x543, 'M', 'ճ'), - (0x544, 'M', 'մ'), - (0x545, 'M', 'յ'), - (0x546, 'M', 'ն'), - (0x547, 'M', 'շ'), - (0x548, 'M', 'ո'), - (0x549, 'M', 'չ'), - (0x54A, 'M', 'պ'), - (0x54B, 'M', 'ջ'), - (0x54C, 'M', 'ռ'), - (0x54D, 'M', 'ս'), - (0x54E, 'M', 'վ'), - (0x54F, 'M', 'տ'), - (0x550, 'M', 'ր'), - (0x551, 'M', 'ց'), - (0x552, 'M', 'ւ'), - (0x553, 'M', 'փ'), - (0x554, 'M', 'ք'), - (0x555, 'M', 'օ'), - (0x556, 'M', 'ֆ'), - (0x557, 'X'), - (0x559, 'V'), - (0x587, 'M', 'եւ'), - (0x588, 'V'), - (0x58B, 'X'), - (0x58D, 'V'), - (0x590, 'X'), - (0x591, 'V'), - (0x5C8, 'X'), - (0x5D0, 'V'), - (0x5EB, 'X'), - (0x5EF, 'V'), - (0x5F5, 'X'), - (0x606, 'V'), - (0x61C, 'X'), - (0x61D, 'V'), - ] - -def _seg_10() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x675, 'M', 'اٴ'), - (0x676, 'M', 'وٴ'), - (0x677, 'M', 'ۇٴ'), - (0x678, 'M', 'يٴ'), - (0x679, 'V'), - (0x6DD, 'X'), - (0x6DE, 'V'), - (0x70E, 'X'), - (0x710, 'V'), - (0x74B, 'X'), - (0x74D, 'V'), - (0x7B2, 'X'), - (0x7C0, 'V'), - (0x7FB, 'X'), - (0x7FD, 'V'), - (0x82E, 'X'), - (0x830, 'V'), - (0x83F, 'X'), - (0x840, 'V'), - (0x85C, 'X'), - (0x85E, 'V'), - (0x85F, 'X'), - (0x860, 'V'), - (0x86B, 'X'), - (0x870, 'V'), - (0x88F, 'X'), - (0x898, 'V'), - (0x8E2, 'X'), - (0x8E3, 'V'), - (0x958, 'M', 'क़'), - (0x959, 'M', 'ख़'), - (0x95A, 'M', 'ग़'), - (0x95B, 'M', 'ज़'), - (0x95C, 'M', 'ड़'), - (0x95D, 'M', 'ढ़'), - (0x95E, 'M', 'फ़'), - (0x95F, 'M', 'य़'), - (0x960, 'V'), - (0x984, 'X'), - (0x985, 'V'), - (0x98D, 'X'), - (0x98F, 'V'), - (0x991, 'X'), - (0x993, 'V'), - (0x9A9, 'X'), - (0x9AA, 'V'), - (0x9B1, 'X'), - (0x9B2, 'V'), - (0x9B3, 'X'), - (0x9B6, 'V'), - (0x9BA, 'X'), - (0x9BC, 'V'), - (0x9C5, 'X'), - (0x9C7, 'V'), - (0x9C9, 'X'), - (0x9CB, 'V'), - (0x9CF, 'X'), - (0x9D7, 'V'), - (0x9D8, 'X'), - (0x9DC, 'M', 'ড়'), - (0x9DD, 'M', 'ঢ়'), - (0x9DE, 'X'), - (0x9DF, 'M', 'য়'), - (0x9E0, 'V'), - (0x9E4, 'X'), - (0x9E6, 'V'), - (0x9FF, 'X'), - (0xA01, 'V'), - (0xA04, 'X'), - (0xA05, 'V'), - (0xA0B, 'X'), - (0xA0F, 'V'), - (0xA11, 'X'), - (0xA13, 'V'), - (0xA29, 'X'), - (0xA2A, 'V'), - (0xA31, 'X'), - (0xA32, 'V'), - (0xA33, 'M', 'ਲ਼'), - (0xA34, 'X'), - (0xA35, 'V'), - (0xA36, 'M', 'ਸ਼'), - (0xA37, 'X'), - (0xA38, 'V'), - (0xA3A, 'X'), - (0xA3C, 'V'), - (0xA3D, 'X'), - (0xA3E, 'V'), - (0xA43, 'X'), - (0xA47, 'V'), - (0xA49, 'X'), - (0xA4B, 'V'), - (0xA4E, 'X'), - (0xA51, 'V'), - (0xA52, 'X'), - (0xA59, 'M', 'ਖ਼'), - (0xA5A, 'M', 'ਗ਼'), - (0xA5B, 'M', 'ਜ਼'), - (0xA5C, 'V'), - (0xA5D, 'X'), - ] - -def _seg_11() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xA5E, 'M', 'ਫ਼'), - (0xA5F, 'X'), - (0xA66, 'V'), - (0xA77, 'X'), - (0xA81, 'V'), - (0xA84, 'X'), - (0xA85, 'V'), - (0xA8E, 'X'), - (0xA8F, 'V'), - (0xA92, 'X'), - (0xA93, 'V'), - (0xAA9, 'X'), - (0xAAA, 'V'), - (0xAB1, 'X'), - (0xAB2, 'V'), - (0xAB4, 'X'), - (0xAB5, 'V'), - (0xABA, 'X'), - (0xABC, 'V'), - (0xAC6, 'X'), - (0xAC7, 'V'), - (0xACA, 'X'), - (0xACB, 'V'), - (0xACE, 'X'), - (0xAD0, 'V'), - (0xAD1, 'X'), - (0xAE0, 'V'), - (0xAE4, 'X'), - (0xAE6, 'V'), - (0xAF2, 'X'), - (0xAF9, 'V'), - (0xB00, 'X'), - (0xB01, 'V'), - (0xB04, 'X'), - (0xB05, 'V'), - (0xB0D, 'X'), - (0xB0F, 'V'), - (0xB11, 'X'), - (0xB13, 'V'), - (0xB29, 'X'), - (0xB2A, 'V'), - (0xB31, 'X'), - (0xB32, 'V'), - (0xB34, 'X'), - (0xB35, 'V'), - (0xB3A, 'X'), - (0xB3C, 'V'), - (0xB45, 'X'), - (0xB47, 'V'), - (0xB49, 'X'), - (0xB4B, 'V'), - (0xB4E, 'X'), - (0xB55, 'V'), - (0xB58, 'X'), - (0xB5C, 'M', 'ଡ଼'), - (0xB5D, 'M', 'ଢ଼'), - (0xB5E, 'X'), - (0xB5F, 'V'), - (0xB64, 'X'), - (0xB66, 'V'), - (0xB78, 'X'), - (0xB82, 'V'), - (0xB84, 'X'), - (0xB85, 'V'), - (0xB8B, 'X'), - (0xB8E, 'V'), - (0xB91, 'X'), - (0xB92, 'V'), - (0xB96, 'X'), - (0xB99, 'V'), - (0xB9B, 'X'), - (0xB9C, 'V'), - (0xB9D, 'X'), - (0xB9E, 'V'), - (0xBA0, 'X'), - (0xBA3, 'V'), - (0xBA5, 'X'), - (0xBA8, 'V'), - (0xBAB, 'X'), - (0xBAE, 'V'), - (0xBBA, 'X'), - (0xBBE, 'V'), - (0xBC3, 'X'), - (0xBC6, 'V'), - (0xBC9, 'X'), - (0xBCA, 'V'), - (0xBCE, 'X'), - (0xBD0, 'V'), - (0xBD1, 'X'), - (0xBD7, 'V'), - (0xBD8, 'X'), - (0xBE6, 'V'), - (0xBFB, 'X'), - (0xC00, 'V'), - (0xC0D, 'X'), - (0xC0E, 'V'), - (0xC11, 'X'), - (0xC12, 'V'), - (0xC29, 'X'), - (0xC2A, 'V'), - ] - -def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xC3A, 'X'), - (0xC3C, 'V'), - (0xC45, 'X'), - (0xC46, 'V'), - (0xC49, 'X'), - (0xC4A, 'V'), - (0xC4E, 'X'), - (0xC55, 'V'), - (0xC57, 'X'), - (0xC58, 'V'), - (0xC5B, 'X'), - (0xC5D, 'V'), - (0xC5E, 'X'), - (0xC60, 'V'), - (0xC64, 'X'), - (0xC66, 'V'), - (0xC70, 'X'), - (0xC77, 'V'), - (0xC8D, 'X'), - (0xC8E, 'V'), - (0xC91, 'X'), - (0xC92, 'V'), - (0xCA9, 'X'), - (0xCAA, 'V'), - (0xCB4, 'X'), - (0xCB5, 'V'), - (0xCBA, 'X'), - (0xCBC, 'V'), - (0xCC5, 'X'), - (0xCC6, 'V'), - (0xCC9, 'X'), - (0xCCA, 'V'), - (0xCCE, 'X'), - (0xCD5, 'V'), - (0xCD7, 'X'), - (0xCDD, 'V'), - (0xCDF, 'X'), - (0xCE0, 'V'), - (0xCE4, 'X'), - (0xCE6, 'V'), - (0xCF0, 'X'), - (0xCF1, 'V'), - (0xCF4, 'X'), - (0xD00, 'V'), - (0xD0D, 'X'), - (0xD0E, 'V'), - (0xD11, 'X'), - (0xD12, 'V'), - (0xD45, 'X'), - (0xD46, 'V'), - (0xD49, 'X'), - (0xD4A, 'V'), - (0xD50, 'X'), - (0xD54, 'V'), - (0xD64, 'X'), - (0xD66, 'V'), - (0xD80, 'X'), - (0xD81, 'V'), - (0xD84, 'X'), - (0xD85, 'V'), - (0xD97, 'X'), - (0xD9A, 'V'), - (0xDB2, 'X'), - (0xDB3, 'V'), - (0xDBC, 'X'), - (0xDBD, 'V'), - (0xDBE, 'X'), - (0xDC0, 'V'), - (0xDC7, 'X'), - (0xDCA, 'V'), - (0xDCB, 'X'), - (0xDCF, 'V'), - (0xDD5, 'X'), - (0xDD6, 'V'), - (0xDD7, 'X'), - (0xDD8, 'V'), - (0xDE0, 'X'), - (0xDE6, 'V'), - (0xDF0, 'X'), - (0xDF2, 'V'), - (0xDF5, 'X'), - (0xE01, 'V'), - (0xE33, 'M', 'ํา'), - (0xE34, 'V'), - (0xE3B, 'X'), - (0xE3F, 'V'), - (0xE5C, 'X'), - (0xE81, 'V'), - (0xE83, 'X'), - (0xE84, 'V'), - (0xE85, 'X'), - (0xE86, 'V'), - (0xE8B, 'X'), - (0xE8C, 'V'), - (0xEA4, 'X'), - (0xEA5, 'V'), - (0xEA6, 'X'), - (0xEA7, 'V'), - (0xEB3, 'M', 'ໍາ'), - (0xEB4, 'V'), - ] - -def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xEBE, 'X'), - (0xEC0, 'V'), - (0xEC5, 'X'), - (0xEC6, 'V'), - (0xEC7, 'X'), - (0xEC8, 'V'), - (0xECF, 'X'), - (0xED0, 'V'), - (0xEDA, 'X'), - (0xEDC, 'M', 'ຫນ'), - (0xEDD, 'M', 'ຫມ'), - (0xEDE, 'V'), - (0xEE0, 'X'), - (0xF00, 'V'), - (0xF0C, 'M', '་'), - (0xF0D, 'V'), - (0xF43, 'M', 'གྷ'), - (0xF44, 'V'), - (0xF48, 'X'), - (0xF49, 'V'), - (0xF4D, 'M', 'ཌྷ'), - (0xF4E, 'V'), - (0xF52, 'M', 'དྷ'), - (0xF53, 'V'), - (0xF57, 'M', 'བྷ'), - (0xF58, 'V'), - (0xF5C, 'M', 'ཛྷ'), - (0xF5D, 'V'), - (0xF69, 'M', 'ཀྵ'), - (0xF6A, 'V'), - (0xF6D, 'X'), - (0xF71, 'V'), - (0xF73, 'M', 'ཱི'), - (0xF74, 'V'), - (0xF75, 'M', 'ཱུ'), - (0xF76, 'M', 'ྲྀ'), - (0xF77, 'M', 'ྲཱྀ'), - (0xF78, 'M', 'ླྀ'), - (0xF79, 'M', 'ླཱྀ'), - (0xF7A, 'V'), - (0xF81, 'M', 'ཱྀ'), - (0xF82, 'V'), - (0xF93, 'M', 'ྒྷ'), - (0xF94, 'V'), - (0xF98, 'X'), - (0xF99, 'V'), - (0xF9D, 'M', 'ྜྷ'), - (0xF9E, 'V'), - (0xFA2, 'M', 'ྡྷ'), - (0xFA3, 'V'), - (0xFA7, 'M', 'ྦྷ'), - (0xFA8, 'V'), - (0xFAC, 'M', 'ྫྷ'), - (0xFAD, 'V'), - (0xFB9, 'M', 'ྐྵ'), - (0xFBA, 'V'), - (0xFBD, 'X'), - (0xFBE, 'V'), - (0xFCD, 'X'), - (0xFCE, 'V'), - (0xFDB, 'X'), - (0x1000, 'V'), - (0x10A0, 'X'), - (0x10C7, 'M', 'ⴧ'), - (0x10C8, 'X'), - (0x10CD, 'M', 'ⴭ'), - (0x10CE, 'X'), - (0x10D0, 'V'), - (0x10FC, 'M', 'ნ'), - (0x10FD, 'V'), - (0x115F, 'X'), - (0x1161, 'V'), - (0x1249, 'X'), - (0x124A, 'V'), - (0x124E, 'X'), - (0x1250, 'V'), - (0x1257, 'X'), - (0x1258, 'V'), - (0x1259, 'X'), - (0x125A, 'V'), - (0x125E, 'X'), - (0x1260, 'V'), - (0x1289, 'X'), - (0x128A, 'V'), - (0x128E, 'X'), - (0x1290, 'V'), - (0x12B1, 'X'), - (0x12B2, 'V'), - (0x12B6, 'X'), - (0x12B8, 'V'), - (0x12BF, 'X'), - (0x12C0, 'V'), - (0x12C1, 'X'), - (0x12C2, 'V'), - (0x12C6, 'X'), - (0x12C8, 'V'), - (0x12D7, 'X'), - (0x12D8, 'V'), - (0x1311, 'X'), - (0x1312, 'V'), - ] - -def _seg_14() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1316, 'X'), - (0x1318, 'V'), - (0x135B, 'X'), - (0x135D, 'V'), - (0x137D, 'X'), - (0x1380, 'V'), - (0x139A, 'X'), - (0x13A0, 'V'), - (0x13F6, 'X'), - (0x13F8, 'M', 'Ᏸ'), - (0x13F9, 'M', 'Ᏹ'), - (0x13FA, 'M', 'Ᏺ'), - (0x13FB, 'M', 'Ᏻ'), - (0x13FC, 'M', 'Ᏼ'), - (0x13FD, 'M', 'Ᏽ'), - (0x13FE, 'X'), - (0x1400, 'V'), - (0x1680, 'X'), - (0x1681, 'V'), - (0x169D, 'X'), - (0x16A0, 'V'), - (0x16F9, 'X'), - (0x1700, 'V'), - (0x1716, 'X'), - (0x171F, 'V'), - (0x1737, 'X'), - (0x1740, 'V'), - (0x1754, 'X'), - (0x1760, 'V'), - (0x176D, 'X'), - (0x176E, 'V'), - (0x1771, 'X'), - (0x1772, 'V'), - (0x1774, 'X'), - (0x1780, 'V'), - (0x17B4, 'X'), - (0x17B6, 'V'), - (0x17DE, 'X'), - (0x17E0, 'V'), - (0x17EA, 'X'), - (0x17F0, 'V'), - (0x17FA, 'X'), - (0x1800, 'V'), - (0x1806, 'X'), - (0x1807, 'V'), - (0x180B, 'I'), - (0x180E, 'X'), - (0x180F, 'I'), - (0x1810, 'V'), - (0x181A, 'X'), - (0x1820, 'V'), - (0x1879, 'X'), - (0x1880, 'V'), - (0x18AB, 'X'), - (0x18B0, 'V'), - (0x18F6, 'X'), - (0x1900, 'V'), - (0x191F, 'X'), - (0x1920, 'V'), - (0x192C, 'X'), - (0x1930, 'V'), - (0x193C, 'X'), - (0x1940, 'V'), - (0x1941, 'X'), - (0x1944, 'V'), - (0x196E, 'X'), - (0x1970, 'V'), - (0x1975, 'X'), - (0x1980, 'V'), - (0x19AC, 'X'), - (0x19B0, 'V'), - (0x19CA, 'X'), - (0x19D0, 'V'), - (0x19DB, 'X'), - (0x19DE, 'V'), - (0x1A1C, 'X'), - (0x1A1E, 'V'), - (0x1A5F, 'X'), - (0x1A60, 'V'), - (0x1A7D, 'X'), - (0x1A7F, 'V'), - (0x1A8A, 'X'), - (0x1A90, 'V'), - (0x1A9A, 'X'), - (0x1AA0, 'V'), - (0x1AAE, 'X'), - (0x1AB0, 'V'), - (0x1ACF, 'X'), - (0x1B00, 'V'), - (0x1B4D, 'X'), - (0x1B50, 'V'), - (0x1B7F, 'X'), - (0x1B80, 'V'), - (0x1BF4, 'X'), - (0x1BFC, 'V'), - (0x1C38, 'X'), - (0x1C3B, 'V'), - (0x1C4A, 'X'), - (0x1C4D, 'V'), - (0x1C80, 'M', 'в'), - ] - -def _seg_15() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1C81, 'M', 'д'), - (0x1C82, 'M', 'о'), - (0x1C83, 'M', 'с'), - (0x1C84, 'M', 'т'), - (0x1C86, 'M', 'ъ'), - (0x1C87, 'M', 'ѣ'), - (0x1C88, 'M', 'ꙋ'), - (0x1C89, 'X'), - (0x1C90, 'M', 'ა'), - (0x1C91, 'M', 'ბ'), - (0x1C92, 'M', 'გ'), - (0x1C93, 'M', 'დ'), - (0x1C94, 'M', 'ე'), - (0x1C95, 'M', 'ვ'), - (0x1C96, 'M', 'ზ'), - (0x1C97, 'M', 'თ'), - (0x1C98, 'M', 'ი'), - (0x1C99, 'M', 'კ'), - (0x1C9A, 'M', 'ლ'), - (0x1C9B, 'M', 'მ'), - (0x1C9C, 'M', 'ნ'), - (0x1C9D, 'M', 'ო'), - (0x1C9E, 'M', 'პ'), - (0x1C9F, 'M', 'ჟ'), - (0x1CA0, 'M', 'რ'), - (0x1CA1, 'M', 'ს'), - (0x1CA2, 'M', 'ტ'), - (0x1CA3, 'M', 'უ'), - (0x1CA4, 'M', 'ფ'), - (0x1CA5, 'M', 'ქ'), - (0x1CA6, 'M', 'ღ'), - (0x1CA7, 'M', 'ყ'), - (0x1CA8, 'M', 'შ'), - (0x1CA9, 'M', 'ჩ'), - (0x1CAA, 'M', 'ც'), - (0x1CAB, 'M', 'ძ'), - (0x1CAC, 'M', 'წ'), - (0x1CAD, 'M', 'ჭ'), - (0x1CAE, 'M', 'ხ'), - (0x1CAF, 'M', 'ჯ'), - (0x1CB0, 'M', 'ჰ'), - (0x1CB1, 'M', 'ჱ'), - (0x1CB2, 'M', 'ჲ'), - (0x1CB3, 'M', 'ჳ'), - (0x1CB4, 'M', 'ჴ'), - (0x1CB5, 'M', 'ჵ'), - (0x1CB6, 'M', 'ჶ'), - (0x1CB7, 'M', 'ჷ'), - (0x1CB8, 'M', 'ჸ'), - (0x1CB9, 'M', 'ჹ'), - (0x1CBA, 'M', 'ჺ'), - (0x1CBB, 'X'), - (0x1CBD, 'M', 'ჽ'), - (0x1CBE, 'M', 'ჾ'), - (0x1CBF, 'M', 'ჿ'), - (0x1CC0, 'V'), - (0x1CC8, 'X'), - (0x1CD0, 'V'), - (0x1CFB, 'X'), - (0x1D00, 'V'), - (0x1D2C, 'M', 'a'), - (0x1D2D, 'M', 'æ'), - (0x1D2E, 'M', 'b'), - (0x1D2F, 'V'), - (0x1D30, 'M', 'd'), - (0x1D31, 'M', 'e'), - (0x1D32, 'M', 'ǝ'), - (0x1D33, 'M', 'g'), - (0x1D34, 'M', 'h'), - (0x1D35, 'M', 'i'), - (0x1D36, 'M', 'j'), - (0x1D37, 'M', 'k'), - (0x1D38, 'M', 'l'), - (0x1D39, 'M', 'm'), - (0x1D3A, 'M', 'n'), - (0x1D3B, 'V'), - (0x1D3C, 'M', 'o'), - (0x1D3D, 'M', 'ȣ'), - (0x1D3E, 'M', 'p'), - (0x1D3F, 'M', 'r'), - (0x1D40, 'M', 't'), - (0x1D41, 'M', 'u'), - (0x1D42, 'M', 'w'), - (0x1D43, 'M', 'a'), - (0x1D44, 'M', 'ɐ'), - (0x1D45, 'M', 'ɑ'), - (0x1D46, 'M', 'ᴂ'), - (0x1D47, 'M', 'b'), - (0x1D48, 'M', 'd'), - (0x1D49, 'M', 'e'), - (0x1D4A, 'M', 'ə'), - (0x1D4B, 'M', 'ɛ'), - (0x1D4C, 'M', 'ɜ'), - (0x1D4D, 'M', 'g'), - (0x1D4E, 'V'), - (0x1D4F, 'M', 'k'), - (0x1D50, 'M', 'm'), - (0x1D51, 'M', 'ŋ'), - (0x1D52, 'M', 'o'), - (0x1D53, 'M', 'ɔ'), - ] - -def _seg_16() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D54, 'M', 'ᴖ'), - (0x1D55, 'M', 'ᴗ'), - (0x1D56, 'M', 'p'), - (0x1D57, 'M', 't'), - (0x1D58, 'M', 'u'), - (0x1D59, 'M', 'ᴝ'), - (0x1D5A, 'M', 'ɯ'), - (0x1D5B, 'M', 'v'), - (0x1D5C, 'M', 'ᴥ'), - (0x1D5D, 'M', 'β'), - (0x1D5E, 'M', 'γ'), - (0x1D5F, 'M', 'δ'), - (0x1D60, 'M', 'φ'), - (0x1D61, 'M', 'χ'), - (0x1D62, 'M', 'i'), - (0x1D63, 'M', 'r'), - (0x1D64, 'M', 'u'), - (0x1D65, 'M', 'v'), - (0x1D66, 'M', 'β'), - (0x1D67, 'M', 'γ'), - (0x1D68, 'M', 'ρ'), - (0x1D69, 'M', 'φ'), - (0x1D6A, 'M', 'χ'), - (0x1D6B, 'V'), - (0x1D78, 'M', 'н'), - (0x1D79, 'V'), - (0x1D9B, 'M', 'ɒ'), - (0x1D9C, 'M', 'c'), - (0x1D9D, 'M', 'ɕ'), - (0x1D9E, 'M', 'ð'), - (0x1D9F, 'M', 'ɜ'), - (0x1DA0, 'M', 'f'), - (0x1DA1, 'M', 'ɟ'), - (0x1DA2, 'M', 'ɡ'), - (0x1DA3, 'M', 'ɥ'), - (0x1DA4, 'M', 'ɨ'), - (0x1DA5, 'M', 'ɩ'), - (0x1DA6, 'M', 'ɪ'), - (0x1DA7, 'M', 'ᵻ'), - (0x1DA8, 'M', 'ʝ'), - (0x1DA9, 'M', 'ɭ'), - (0x1DAA, 'M', 'ᶅ'), - (0x1DAB, 'M', 'ʟ'), - (0x1DAC, 'M', 'ɱ'), - (0x1DAD, 'M', 'ɰ'), - (0x1DAE, 'M', 'ɲ'), - (0x1DAF, 'M', 'ɳ'), - (0x1DB0, 'M', 'ɴ'), - (0x1DB1, 'M', 'ɵ'), - (0x1DB2, 'M', 'ɸ'), - (0x1DB3, 'M', 'ʂ'), - (0x1DB4, 'M', 'ʃ'), - (0x1DB5, 'M', 'ƫ'), - (0x1DB6, 'M', 'ʉ'), - (0x1DB7, 'M', 'ʊ'), - (0x1DB8, 'M', 'ᴜ'), - (0x1DB9, 'M', 'ʋ'), - (0x1DBA, 'M', 'ʌ'), - (0x1DBB, 'M', 'z'), - (0x1DBC, 'M', 'ʐ'), - (0x1DBD, 'M', 'ʑ'), - (0x1DBE, 'M', 'ʒ'), - (0x1DBF, 'M', 'θ'), - (0x1DC0, 'V'), - (0x1E00, 'M', 'ḁ'), - (0x1E01, 'V'), - (0x1E02, 'M', 'ḃ'), - (0x1E03, 'V'), - (0x1E04, 'M', 'ḅ'), - (0x1E05, 'V'), - (0x1E06, 'M', 'ḇ'), - (0x1E07, 'V'), - (0x1E08, 'M', 'ḉ'), - (0x1E09, 'V'), - (0x1E0A, 'M', 'ḋ'), - (0x1E0B, 'V'), - (0x1E0C, 'M', 'ḍ'), - (0x1E0D, 'V'), - (0x1E0E, 'M', 'ḏ'), - (0x1E0F, 'V'), - (0x1E10, 'M', 'ḑ'), - (0x1E11, 'V'), - (0x1E12, 'M', 'ḓ'), - (0x1E13, 'V'), - (0x1E14, 'M', 'ḕ'), - (0x1E15, 'V'), - (0x1E16, 'M', 'ḗ'), - (0x1E17, 'V'), - (0x1E18, 'M', 'ḙ'), - (0x1E19, 'V'), - (0x1E1A, 'M', 'ḛ'), - (0x1E1B, 'V'), - (0x1E1C, 'M', 'ḝ'), - (0x1E1D, 'V'), - (0x1E1E, 'M', 'ḟ'), - (0x1E1F, 'V'), - (0x1E20, 'M', 'ḡ'), - (0x1E21, 'V'), - (0x1E22, 'M', 'ḣ'), - (0x1E23, 'V'), - ] - -def _seg_17() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1E24, 'M', 'ḥ'), - (0x1E25, 'V'), - (0x1E26, 'M', 'ḧ'), - (0x1E27, 'V'), - (0x1E28, 'M', 'ḩ'), - (0x1E29, 'V'), - (0x1E2A, 'M', 'ḫ'), - (0x1E2B, 'V'), - (0x1E2C, 'M', 'ḭ'), - (0x1E2D, 'V'), - (0x1E2E, 'M', 'ḯ'), - (0x1E2F, 'V'), - (0x1E30, 'M', 'ḱ'), - (0x1E31, 'V'), - (0x1E32, 'M', 'ḳ'), - (0x1E33, 'V'), - (0x1E34, 'M', 'ḵ'), - (0x1E35, 'V'), - (0x1E36, 'M', 'ḷ'), - (0x1E37, 'V'), - (0x1E38, 'M', 'ḹ'), - (0x1E39, 'V'), - (0x1E3A, 'M', 'ḻ'), - (0x1E3B, 'V'), - (0x1E3C, 'M', 'ḽ'), - (0x1E3D, 'V'), - (0x1E3E, 'M', 'ḿ'), - (0x1E3F, 'V'), - (0x1E40, 'M', 'ṁ'), - (0x1E41, 'V'), - (0x1E42, 'M', 'ṃ'), - (0x1E43, 'V'), - (0x1E44, 'M', 'ṅ'), - (0x1E45, 'V'), - (0x1E46, 'M', 'ṇ'), - (0x1E47, 'V'), - (0x1E48, 'M', 'ṉ'), - (0x1E49, 'V'), - (0x1E4A, 'M', 'ṋ'), - (0x1E4B, 'V'), - (0x1E4C, 'M', 'ṍ'), - (0x1E4D, 'V'), - (0x1E4E, 'M', 'ṏ'), - (0x1E4F, 'V'), - (0x1E50, 'M', 'ṑ'), - (0x1E51, 'V'), - (0x1E52, 'M', 'ṓ'), - (0x1E53, 'V'), - (0x1E54, 'M', 'ṕ'), - (0x1E55, 'V'), - (0x1E56, 'M', 'ṗ'), - (0x1E57, 'V'), - (0x1E58, 'M', 'ṙ'), - (0x1E59, 'V'), - (0x1E5A, 'M', 'ṛ'), - (0x1E5B, 'V'), - (0x1E5C, 'M', 'ṝ'), - (0x1E5D, 'V'), - (0x1E5E, 'M', 'ṟ'), - (0x1E5F, 'V'), - (0x1E60, 'M', 'ṡ'), - (0x1E61, 'V'), - (0x1E62, 'M', 'ṣ'), - (0x1E63, 'V'), - (0x1E64, 'M', 'ṥ'), - (0x1E65, 'V'), - (0x1E66, 'M', 'ṧ'), - (0x1E67, 'V'), - (0x1E68, 'M', 'ṩ'), - (0x1E69, 'V'), - (0x1E6A, 'M', 'ṫ'), - (0x1E6B, 'V'), - (0x1E6C, 'M', 'ṭ'), - (0x1E6D, 'V'), - (0x1E6E, 'M', 'ṯ'), - (0x1E6F, 'V'), - (0x1E70, 'M', 'ṱ'), - (0x1E71, 'V'), - (0x1E72, 'M', 'ṳ'), - (0x1E73, 'V'), - (0x1E74, 'M', 'ṵ'), - (0x1E75, 'V'), - (0x1E76, 'M', 'ṷ'), - (0x1E77, 'V'), - (0x1E78, 'M', 'ṹ'), - (0x1E79, 'V'), - (0x1E7A, 'M', 'ṻ'), - (0x1E7B, 'V'), - (0x1E7C, 'M', 'ṽ'), - (0x1E7D, 'V'), - (0x1E7E, 'M', 'ṿ'), - (0x1E7F, 'V'), - (0x1E80, 'M', 'ẁ'), - (0x1E81, 'V'), - (0x1E82, 'M', 'ẃ'), - (0x1E83, 'V'), - (0x1E84, 'M', 'ẅ'), - (0x1E85, 'V'), - (0x1E86, 'M', 'ẇ'), - (0x1E87, 'V'), - ] - -def _seg_18() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1E88, 'M', 'ẉ'), - (0x1E89, 'V'), - (0x1E8A, 'M', 'ẋ'), - (0x1E8B, 'V'), - (0x1E8C, 'M', 'ẍ'), - (0x1E8D, 'V'), - (0x1E8E, 'M', 'ẏ'), - (0x1E8F, 'V'), - (0x1E90, 'M', 'ẑ'), - (0x1E91, 'V'), - (0x1E92, 'M', 'ẓ'), - (0x1E93, 'V'), - (0x1E94, 'M', 'ẕ'), - (0x1E95, 'V'), - (0x1E9A, 'M', 'aʾ'), - (0x1E9B, 'M', 'ṡ'), - (0x1E9C, 'V'), - (0x1E9E, 'M', 'ss'), - (0x1E9F, 'V'), - (0x1EA0, 'M', 'ạ'), - (0x1EA1, 'V'), - (0x1EA2, 'M', 'ả'), - (0x1EA3, 'V'), - (0x1EA4, 'M', 'ấ'), - (0x1EA5, 'V'), - (0x1EA6, 'M', 'ầ'), - (0x1EA7, 'V'), - (0x1EA8, 'M', 'ẩ'), - (0x1EA9, 'V'), - (0x1EAA, 'M', 'ẫ'), - (0x1EAB, 'V'), - (0x1EAC, 'M', 'ậ'), - (0x1EAD, 'V'), - (0x1EAE, 'M', 'ắ'), - (0x1EAF, 'V'), - (0x1EB0, 'M', 'ằ'), - (0x1EB1, 'V'), - (0x1EB2, 'M', 'ẳ'), - (0x1EB3, 'V'), - (0x1EB4, 'M', 'ẵ'), - (0x1EB5, 'V'), - (0x1EB6, 'M', 'ặ'), - (0x1EB7, 'V'), - (0x1EB8, 'M', 'ẹ'), - (0x1EB9, 'V'), - (0x1EBA, 'M', 'ẻ'), - (0x1EBB, 'V'), - (0x1EBC, 'M', 'ẽ'), - (0x1EBD, 'V'), - (0x1EBE, 'M', 'ế'), - (0x1EBF, 'V'), - (0x1EC0, 'M', 'ề'), - (0x1EC1, 'V'), - (0x1EC2, 'M', 'ể'), - (0x1EC3, 'V'), - (0x1EC4, 'M', 'ễ'), - (0x1EC5, 'V'), - (0x1EC6, 'M', 'ệ'), - (0x1EC7, 'V'), - (0x1EC8, 'M', 'ỉ'), - (0x1EC9, 'V'), - (0x1ECA, 'M', 'ị'), - (0x1ECB, 'V'), - (0x1ECC, 'M', 'ọ'), - (0x1ECD, 'V'), - (0x1ECE, 'M', 'ỏ'), - (0x1ECF, 'V'), - (0x1ED0, 'M', 'ố'), - (0x1ED1, 'V'), - (0x1ED2, 'M', 'ồ'), - (0x1ED3, 'V'), - (0x1ED4, 'M', 'ổ'), - (0x1ED5, 'V'), - (0x1ED6, 'M', 'ỗ'), - (0x1ED7, 'V'), - (0x1ED8, 'M', 'ộ'), - (0x1ED9, 'V'), - (0x1EDA, 'M', 'ớ'), - (0x1EDB, 'V'), - (0x1EDC, 'M', 'ờ'), - (0x1EDD, 'V'), - (0x1EDE, 'M', 'ở'), - (0x1EDF, 'V'), - (0x1EE0, 'M', 'ỡ'), - (0x1EE1, 'V'), - (0x1EE2, 'M', 'ợ'), - (0x1EE3, 'V'), - (0x1EE4, 'M', 'ụ'), - (0x1EE5, 'V'), - (0x1EE6, 'M', 'ủ'), - (0x1EE7, 'V'), - (0x1EE8, 'M', 'ứ'), - (0x1EE9, 'V'), - (0x1EEA, 'M', 'ừ'), - (0x1EEB, 'V'), - (0x1EEC, 'M', 'ử'), - (0x1EED, 'V'), - (0x1EEE, 'M', 'ữ'), - (0x1EEF, 'V'), - (0x1EF0, 'M', 'ự'), - ] - -def _seg_19() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1EF1, 'V'), - (0x1EF2, 'M', 'ỳ'), - (0x1EF3, 'V'), - (0x1EF4, 'M', 'ỵ'), - (0x1EF5, 'V'), - (0x1EF6, 'M', 'ỷ'), - (0x1EF7, 'V'), - (0x1EF8, 'M', 'ỹ'), - (0x1EF9, 'V'), - (0x1EFA, 'M', 'ỻ'), - (0x1EFB, 'V'), - (0x1EFC, 'M', 'ỽ'), - (0x1EFD, 'V'), - (0x1EFE, 'M', 'ỿ'), - (0x1EFF, 'V'), - (0x1F08, 'M', 'ἀ'), - (0x1F09, 'M', 'ἁ'), - (0x1F0A, 'M', 'ἂ'), - (0x1F0B, 'M', 'ἃ'), - (0x1F0C, 'M', 'ἄ'), - (0x1F0D, 'M', 'ἅ'), - (0x1F0E, 'M', 'ἆ'), - (0x1F0F, 'M', 'ἇ'), - (0x1F10, 'V'), - (0x1F16, 'X'), - (0x1F18, 'M', 'ἐ'), - (0x1F19, 'M', 'ἑ'), - (0x1F1A, 'M', 'ἒ'), - (0x1F1B, 'M', 'ἓ'), - (0x1F1C, 'M', 'ἔ'), - (0x1F1D, 'M', 'ἕ'), - (0x1F1E, 'X'), - (0x1F20, 'V'), - (0x1F28, 'M', 'ἠ'), - (0x1F29, 'M', 'ἡ'), - (0x1F2A, 'M', 'ἢ'), - (0x1F2B, 'M', 'ἣ'), - (0x1F2C, 'M', 'ἤ'), - (0x1F2D, 'M', 'ἥ'), - (0x1F2E, 'M', 'ἦ'), - (0x1F2F, 'M', 'ἧ'), - (0x1F30, 'V'), - (0x1F38, 'M', 'ἰ'), - (0x1F39, 'M', 'ἱ'), - (0x1F3A, 'M', 'ἲ'), - (0x1F3B, 'M', 'ἳ'), - (0x1F3C, 'M', 'ἴ'), - (0x1F3D, 'M', 'ἵ'), - (0x1F3E, 'M', 'ἶ'), - (0x1F3F, 'M', 'ἷ'), - (0x1F40, 'V'), - (0x1F46, 'X'), - (0x1F48, 'M', 'ὀ'), - (0x1F49, 'M', 'ὁ'), - (0x1F4A, 'M', 'ὂ'), - (0x1F4B, 'M', 'ὃ'), - (0x1F4C, 'M', 'ὄ'), - (0x1F4D, 'M', 'ὅ'), - (0x1F4E, 'X'), - (0x1F50, 'V'), - (0x1F58, 'X'), - (0x1F59, 'M', 'ὑ'), - (0x1F5A, 'X'), - (0x1F5B, 'M', 'ὓ'), - (0x1F5C, 'X'), - (0x1F5D, 'M', 'ὕ'), - (0x1F5E, 'X'), - (0x1F5F, 'M', 'ὗ'), - (0x1F60, 'V'), - (0x1F68, 'M', 'ὠ'), - (0x1F69, 'M', 'ὡ'), - (0x1F6A, 'M', 'ὢ'), - (0x1F6B, 'M', 'ὣ'), - (0x1F6C, 'M', 'ὤ'), - (0x1F6D, 'M', 'ὥ'), - (0x1F6E, 'M', 'ὦ'), - (0x1F6F, 'M', 'ὧ'), - (0x1F70, 'V'), - (0x1F71, 'M', 'ά'), - (0x1F72, 'V'), - (0x1F73, 'M', 'έ'), - (0x1F74, 'V'), - (0x1F75, 'M', 'ή'), - (0x1F76, 'V'), - (0x1F77, 'M', 'ί'), - (0x1F78, 'V'), - (0x1F79, 'M', 'ό'), - (0x1F7A, 'V'), - (0x1F7B, 'M', 'ύ'), - (0x1F7C, 'V'), - (0x1F7D, 'M', 'ώ'), - (0x1F7E, 'X'), - (0x1F80, 'M', 'ἀι'), - (0x1F81, 'M', 'ἁι'), - (0x1F82, 'M', 'ἂι'), - (0x1F83, 'M', 'ἃι'), - (0x1F84, 'M', 'ἄι'), - (0x1F85, 'M', 'ἅι'), - (0x1F86, 'M', 'ἆι'), - (0x1F87, 'M', 'ἇι'), - ] - -def _seg_20() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1F88, 'M', 'ἀι'), - (0x1F89, 'M', 'ἁι'), - (0x1F8A, 'M', 'ἂι'), - (0x1F8B, 'M', 'ἃι'), - (0x1F8C, 'M', 'ἄι'), - (0x1F8D, 'M', 'ἅι'), - (0x1F8E, 'M', 'ἆι'), - (0x1F8F, 'M', 'ἇι'), - (0x1F90, 'M', 'ἠι'), - (0x1F91, 'M', 'ἡι'), - (0x1F92, 'M', 'ἢι'), - (0x1F93, 'M', 'ἣι'), - (0x1F94, 'M', 'ἤι'), - (0x1F95, 'M', 'ἥι'), - (0x1F96, 'M', 'ἦι'), - (0x1F97, 'M', 'ἧι'), - (0x1F98, 'M', 'ἠι'), - (0x1F99, 'M', 'ἡι'), - (0x1F9A, 'M', 'ἢι'), - (0x1F9B, 'M', 'ἣι'), - (0x1F9C, 'M', 'ἤι'), - (0x1F9D, 'M', 'ἥι'), - (0x1F9E, 'M', 'ἦι'), - (0x1F9F, 'M', 'ἧι'), - (0x1FA0, 'M', 'ὠι'), - (0x1FA1, 'M', 'ὡι'), - (0x1FA2, 'M', 'ὢι'), - (0x1FA3, 'M', 'ὣι'), - (0x1FA4, 'M', 'ὤι'), - (0x1FA5, 'M', 'ὥι'), - (0x1FA6, 'M', 'ὦι'), - (0x1FA7, 'M', 'ὧι'), - (0x1FA8, 'M', 'ὠι'), - (0x1FA9, 'M', 'ὡι'), - (0x1FAA, 'M', 'ὢι'), - (0x1FAB, 'M', 'ὣι'), - (0x1FAC, 'M', 'ὤι'), - (0x1FAD, 'M', 'ὥι'), - (0x1FAE, 'M', 'ὦι'), - (0x1FAF, 'M', 'ὧι'), - (0x1FB0, 'V'), - (0x1FB2, 'M', 'ὰι'), - (0x1FB3, 'M', 'αι'), - (0x1FB4, 'M', 'άι'), - (0x1FB5, 'X'), - (0x1FB6, 'V'), - (0x1FB7, 'M', 'ᾶι'), - (0x1FB8, 'M', 'ᾰ'), - (0x1FB9, 'M', 'ᾱ'), - (0x1FBA, 'M', 'ὰ'), - (0x1FBB, 'M', 'ά'), - (0x1FBC, 'M', 'αι'), - (0x1FBD, '3', ' ̓'), - (0x1FBE, 'M', 'ι'), - (0x1FBF, '3', ' ̓'), - (0x1FC0, '3', ' ͂'), - (0x1FC1, '3', ' ̈͂'), - (0x1FC2, 'M', 'ὴι'), - (0x1FC3, 'M', 'ηι'), - (0x1FC4, 'M', 'ήι'), - (0x1FC5, 'X'), - (0x1FC6, 'V'), - (0x1FC7, 'M', 'ῆι'), - (0x1FC8, 'M', 'ὲ'), - (0x1FC9, 'M', 'έ'), - (0x1FCA, 'M', 'ὴ'), - (0x1FCB, 'M', 'ή'), - (0x1FCC, 'M', 'ηι'), - (0x1FCD, '3', ' ̓̀'), - (0x1FCE, '3', ' ̓́'), - (0x1FCF, '3', ' ̓͂'), - (0x1FD0, 'V'), - (0x1FD3, 'M', 'ΐ'), - (0x1FD4, 'X'), - (0x1FD6, 'V'), - (0x1FD8, 'M', 'ῐ'), - (0x1FD9, 'M', 'ῑ'), - (0x1FDA, 'M', 'ὶ'), - (0x1FDB, 'M', 'ί'), - (0x1FDC, 'X'), - (0x1FDD, '3', ' ̔̀'), - (0x1FDE, '3', ' ̔́'), - (0x1FDF, '3', ' ̔͂'), - (0x1FE0, 'V'), - (0x1FE3, 'M', 'ΰ'), - (0x1FE4, 'V'), - (0x1FE8, 'M', 'ῠ'), - (0x1FE9, 'M', 'ῡ'), - (0x1FEA, 'M', 'ὺ'), - (0x1FEB, 'M', 'ύ'), - (0x1FEC, 'M', 'ῥ'), - (0x1FED, '3', ' ̈̀'), - (0x1FEE, '3', ' ̈́'), - (0x1FEF, '3', '`'), - (0x1FF0, 'X'), - (0x1FF2, 'M', 'ὼι'), - (0x1FF3, 'M', 'ωι'), - (0x1FF4, 'M', 'ώι'), - (0x1FF5, 'X'), - (0x1FF6, 'V'), - ] - -def _seg_21() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1FF7, 'M', 'ῶι'), - (0x1FF8, 'M', 'ὸ'), - (0x1FF9, 'M', 'ό'), - (0x1FFA, 'M', 'ὼ'), - (0x1FFB, 'M', 'ώ'), - (0x1FFC, 'M', 'ωι'), - (0x1FFD, '3', ' ́'), - (0x1FFE, '3', ' ̔'), - (0x1FFF, 'X'), - (0x2000, '3', ' '), - (0x200B, 'I'), - (0x200C, 'D', ''), - (0x200E, 'X'), - (0x2010, 'V'), - (0x2011, 'M', '‐'), - (0x2012, 'V'), - (0x2017, '3', ' ̳'), - (0x2018, 'V'), - (0x2024, 'X'), - (0x2027, 'V'), - (0x2028, 'X'), - (0x202F, '3', ' '), - (0x2030, 'V'), - (0x2033, 'M', '′′'), - (0x2034, 'M', '′′′'), - (0x2035, 'V'), - (0x2036, 'M', '‵‵'), - (0x2037, 'M', '‵‵‵'), - (0x2038, 'V'), - (0x203C, '3', '!!'), - (0x203D, 'V'), - (0x203E, '3', ' ̅'), - (0x203F, 'V'), - (0x2047, '3', '??'), - (0x2048, '3', '?!'), - (0x2049, '3', '!?'), - (0x204A, 'V'), - (0x2057, 'M', '′′′′'), - (0x2058, 'V'), - (0x205F, '3', ' '), - (0x2060, 'I'), - (0x2061, 'X'), - (0x2064, 'I'), - (0x2065, 'X'), - (0x2070, 'M', '0'), - (0x2071, 'M', 'i'), - (0x2072, 'X'), - (0x2074, 'M', '4'), - (0x2075, 'M', '5'), - (0x2076, 'M', '6'), - (0x2077, 'M', '7'), - (0x2078, 'M', '8'), - (0x2079, 'M', '9'), - (0x207A, '3', '+'), - (0x207B, 'M', '−'), - (0x207C, '3', '='), - (0x207D, '3', '('), - (0x207E, '3', ')'), - (0x207F, 'M', 'n'), - (0x2080, 'M', '0'), - (0x2081, 'M', '1'), - (0x2082, 'M', '2'), - (0x2083, 'M', '3'), - (0x2084, 'M', '4'), - (0x2085, 'M', '5'), - (0x2086, 'M', '6'), - (0x2087, 'M', '7'), - (0x2088, 'M', '8'), - (0x2089, 'M', '9'), - (0x208A, '3', '+'), - (0x208B, 'M', '−'), - (0x208C, '3', '='), - (0x208D, '3', '('), - (0x208E, '3', ')'), - (0x208F, 'X'), - (0x2090, 'M', 'a'), - (0x2091, 'M', 'e'), - (0x2092, 'M', 'o'), - (0x2093, 'M', 'x'), - (0x2094, 'M', 'ə'), - (0x2095, 'M', 'h'), - (0x2096, 'M', 'k'), - (0x2097, 'M', 'l'), - (0x2098, 'M', 'm'), - (0x2099, 'M', 'n'), - (0x209A, 'M', 'p'), - (0x209B, 'M', 's'), - (0x209C, 'M', 't'), - (0x209D, 'X'), - (0x20A0, 'V'), - (0x20A8, 'M', 'rs'), - (0x20A9, 'V'), - (0x20C1, 'X'), - (0x20D0, 'V'), - (0x20F1, 'X'), - (0x2100, '3', 'a/c'), - (0x2101, '3', 'a/s'), - (0x2102, 'M', 'c'), - (0x2103, 'M', '°c'), - (0x2104, 'V'), - ] - -def _seg_22() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2105, '3', 'c/o'), - (0x2106, '3', 'c/u'), - (0x2107, 'M', 'ɛ'), - (0x2108, 'V'), - (0x2109, 'M', '°f'), - (0x210A, 'M', 'g'), - (0x210B, 'M', 'h'), - (0x210F, 'M', 'ħ'), - (0x2110, 'M', 'i'), - (0x2112, 'M', 'l'), - (0x2114, 'V'), - (0x2115, 'M', 'n'), - (0x2116, 'M', 'no'), - (0x2117, 'V'), - (0x2119, 'M', 'p'), - (0x211A, 'M', 'q'), - (0x211B, 'M', 'r'), - (0x211E, 'V'), - (0x2120, 'M', 'sm'), - (0x2121, 'M', 'tel'), - (0x2122, 'M', 'tm'), - (0x2123, 'V'), - (0x2124, 'M', 'z'), - (0x2125, 'V'), - (0x2126, 'M', 'ω'), - (0x2127, 'V'), - (0x2128, 'M', 'z'), - (0x2129, 'V'), - (0x212A, 'M', 'k'), - (0x212B, 'M', 'å'), - (0x212C, 'M', 'b'), - (0x212D, 'M', 'c'), - (0x212E, 'V'), - (0x212F, 'M', 'e'), - (0x2131, 'M', 'f'), - (0x2132, 'X'), - (0x2133, 'M', 'm'), - (0x2134, 'M', 'o'), - (0x2135, 'M', 'א'), - (0x2136, 'M', 'ב'), - (0x2137, 'M', 'ג'), - (0x2138, 'M', 'ד'), - (0x2139, 'M', 'i'), - (0x213A, 'V'), - (0x213B, 'M', 'fax'), - (0x213C, 'M', 'π'), - (0x213D, 'M', 'γ'), - (0x213F, 'M', 'π'), - (0x2140, 'M', '∑'), - (0x2141, 'V'), - (0x2145, 'M', 'd'), - (0x2147, 'M', 'e'), - (0x2148, 'M', 'i'), - (0x2149, 'M', 'j'), - (0x214A, 'V'), - (0x2150, 'M', '1⁄7'), - (0x2151, 'M', '1⁄9'), - (0x2152, 'M', '1⁄10'), - (0x2153, 'M', '1⁄3'), - (0x2154, 'M', '2⁄3'), - (0x2155, 'M', '1⁄5'), - (0x2156, 'M', '2⁄5'), - (0x2157, 'M', '3⁄5'), - (0x2158, 'M', '4⁄5'), - (0x2159, 'M', '1⁄6'), - (0x215A, 'M', '5⁄6'), - (0x215B, 'M', '1⁄8'), - (0x215C, 'M', '3⁄8'), - (0x215D, 'M', '5⁄8'), - (0x215E, 'M', '7⁄8'), - (0x215F, 'M', '1⁄'), - (0x2160, 'M', 'i'), - (0x2161, 'M', 'ii'), - (0x2162, 'M', 'iii'), - (0x2163, 'M', 'iv'), - (0x2164, 'M', 'v'), - (0x2165, 'M', 'vi'), - (0x2166, 'M', 'vii'), - (0x2167, 'M', 'viii'), - (0x2168, 'M', 'ix'), - (0x2169, 'M', 'x'), - (0x216A, 'M', 'xi'), - (0x216B, 'M', 'xii'), - (0x216C, 'M', 'l'), - (0x216D, 'M', 'c'), - (0x216E, 'M', 'd'), - (0x216F, 'M', 'm'), - (0x2170, 'M', 'i'), - (0x2171, 'M', 'ii'), - (0x2172, 'M', 'iii'), - (0x2173, 'M', 'iv'), - (0x2174, 'M', 'v'), - (0x2175, 'M', 'vi'), - (0x2176, 'M', 'vii'), - (0x2177, 'M', 'viii'), - (0x2178, 'M', 'ix'), - (0x2179, 'M', 'x'), - (0x217A, 'M', 'xi'), - (0x217B, 'M', 'xii'), - (0x217C, 'M', 'l'), - ] - -def _seg_23() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x217D, 'M', 'c'), - (0x217E, 'M', 'd'), - (0x217F, 'M', 'm'), - (0x2180, 'V'), - (0x2183, 'X'), - (0x2184, 'V'), - (0x2189, 'M', '0⁄3'), - (0x218A, 'V'), - (0x218C, 'X'), - (0x2190, 'V'), - (0x222C, 'M', '∫∫'), - (0x222D, 'M', '∫∫∫'), - (0x222E, 'V'), - (0x222F, 'M', '∮∮'), - (0x2230, 'M', '∮∮∮'), - (0x2231, 'V'), - (0x2260, '3'), - (0x2261, 'V'), - (0x226E, '3'), - (0x2270, 'V'), - (0x2329, 'M', '〈'), - (0x232A, 'M', '〉'), - (0x232B, 'V'), - (0x2427, 'X'), - (0x2440, 'V'), - (0x244B, 'X'), - (0x2460, 'M', '1'), - (0x2461, 'M', '2'), - (0x2462, 'M', '3'), - (0x2463, 'M', '4'), - (0x2464, 'M', '5'), - (0x2465, 'M', '6'), - (0x2466, 'M', '7'), - (0x2467, 'M', '8'), - (0x2468, 'M', '9'), - (0x2469, 'M', '10'), - (0x246A, 'M', '11'), - (0x246B, 'M', '12'), - (0x246C, 'M', '13'), - (0x246D, 'M', '14'), - (0x246E, 'M', '15'), - (0x246F, 'M', '16'), - (0x2470, 'M', '17'), - (0x2471, 'M', '18'), - (0x2472, 'M', '19'), - (0x2473, 'M', '20'), - (0x2474, '3', '(1)'), - (0x2475, '3', '(2)'), - (0x2476, '3', '(3)'), - (0x2477, '3', '(4)'), - (0x2478, '3', '(5)'), - (0x2479, '3', '(6)'), - (0x247A, '3', '(7)'), - (0x247B, '3', '(8)'), - (0x247C, '3', '(9)'), - (0x247D, '3', '(10)'), - (0x247E, '3', '(11)'), - (0x247F, '3', '(12)'), - (0x2480, '3', '(13)'), - (0x2481, '3', '(14)'), - (0x2482, '3', '(15)'), - (0x2483, '3', '(16)'), - (0x2484, '3', '(17)'), - (0x2485, '3', '(18)'), - (0x2486, '3', '(19)'), - (0x2487, '3', '(20)'), - (0x2488, 'X'), - (0x249C, '3', '(a)'), - (0x249D, '3', '(b)'), - (0x249E, '3', '(c)'), - (0x249F, '3', '(d)'), - (0x24A0, '3', '(e)'), - (0x24A1, '3', '(f)'), - (0x24A2, '3', '(g)'), - (0x24A3, '3', '(h)'), - (0x24A4, '3', '(i)'), - (0x24A5, '3', '(j)'), - (0x24A6, '3', '(k)'), - (0x24A7, '3', '(l)'), - (0x24A8, '3', '(m)'), - (0x24A9, '3', '(n)'), - (0x24AA, '3', '(o)'), - (0x24AB, '3', '(p)'), - (0x24AC, '3', '(q)'), - (0x24AD, '3', '(r)'), - (0x24AE, '3', '(s)'), - (0x24AF, '3', '(t)'), - (0x24B0, '3', '(u)'), - (0x24B1, '3', '(v)'), - (0x24B2, '3', '(w)'), - (0x24B3, '3', '(x)'), - (0x24B4, '3', '(y)'), - (0x24B5, '3', '(z)'), - (0x24B6, 'M', 'a'), - (0x24B7, 'M', 'b'), - (0x24B8, 'M', 'c'), - (0x24B9, 'M', 'd'), - (0x24BA, 'M', 'e'), - (0x24BB, 'M', 'f'), - (0x24BC, 'M', 'g'), - ] - -def _seg_24() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x24BD, 'M', 'h'), - (0x24BE, 'M', 'i'), - (0x24BF, 'M', 'j'), - (0x24C0, 'M', 'k'), - (0x24C1, 'M', 'l'), - (0x24C2, 'M', 'm'), - (0x24C3, 'M', 'n'), - (0x24C4, 'M', 'o'), - (0x24C5, 'M', 'p'), - (0x24C6, 'M', 'q'), - (0x24C7, 'M', 'r'), - (0x24C8, 'M', 's'), - (0x24C9, 'M', 't'), - (0x24CA, 'M', 'u'), - (0x24CB, 'M', 'v'), - (0x24CC, 'M', 'w'), - (0x24CD, 'M', 'x'), - (0x24CE, 'M', 'y'), - (0x24CF, 'M', 'z'), - (0x24D0, 'M', 'a'), - (0x24D1, 'M', 'b'), - (0x24D2, 'M', 'c'), - (0x24D3, 'M', 'd'), - (0x24D4, 'M', 'e'), - (0x24D5, 'M', 'f'), - (0x24D6, 'M', 'g'), - (0x24D7, 'M', 'h'), - (0x24D8, 'M', 'i'), - (0x24D9, 'M', 'j'), - (0x24DA, 'M', 'k'), - (0x24DB, 'M', 'l'), - (0x24DC, 'M', 'm'), - (0x24DD, 'M', 'n'), - (0x24DE, 'M', 'o'), - (0x24DF, 'M', 'p'), - (0x24E0, 'M', 'q'), - (0x24E1, 'M', 'r'), - (0x24E2, 'M', 's'), - (0x24E3, 'M', 't'), - (0x24E4, 'M', 'u'), - (0x24E5, 'M', 'v'), - (0x24E6, 'M', 'w'), - (0x24E7, 'M', 'x'), - (0x24E8, 'M', 'y'), - (0x24E9, 'M', 'z'), - (0x24EA, 'M', '0'), - (0x24EB, 'V'), - (0x2A0C, 'M', '∫∫∫∫'), - (0x2A0D, 'V'), - (0x2A74, '3', '::='), - (0x2A75, '3', '=='), - (0x2A76, '3', '==='), - (0x2A77, 'V'), - (0x2ADC, 'M', '⫝̸'), - (0x2ADD, 'V'), - (0x2B74, 'X'), - (0x2B76, 'V'), - (0x2B96, 'X'), - (0x2B97, 'V'), - (0x2C00, 'M', 'ⰰ'), - (0x2C01, 'M', 'ⰱ'), - (0x2C02, 'M', 'ⰲ'), - (0x2C03, 'M', 'ⰳ'), - (0x2C04, 'M', 'ⰴ'), - (0x2C05, 'M', 'ⰵ'), - (0x2C06, 'M', 'ⰶ'), - (0x2C07, 'M', 'ⰷ'), - (0x2C08, 'M', 'ⰸ'), - (0x2C09, 'M', 'ⰹ'), - (0x2C0A, 'M', 'ⰺ'), - (0x2C0B, 'M', 'ⰻ'), - (0x2C0C, 'M', 'ⰼ'), - (0x2C0D, 'M', 'ⰽ'), - (0x2C0E, 'M', 'ⰾ'), - (0x2C0F, 'M', 'ⰿ'), - (0x2C10, 'M', 'ⱀ'), - (0x2C11, 'M', 'ⱁ'), - (0x2C12, 'M', 'ⱂ'), - (0x2C13, 'M', 'ⱃ'), - (0x2C14, 'M', 'ⱄ'), - (0x2C15, 'M', 'ⱅ'), - (0x2C16, 'M', 'ⱆ'), - (0x2C17, 'M', 'ⱇ'), - (0x2C18, 'M', 'ⱈ'), - (0x2C19, 'M', 'ⱉ'), - (0x2C1A, 'M', 'ⱊ'), - (0x2C1B, 'M', 'ⱋ'), - (0x2C1C, 'M', 'ⱌ'), - (0x2C1D, 'M', 'ⱍ'), - (0x2C1E, 'M', 'ⱎ'), - (0x2C1F, 'M', 'ⱏ'), - (0x2C20, 'M', 'ⱐ'), - (0x2C21, 'M', 'ⱑ'), - (0x2C22, 'M', 'ⱒ'), - (0x2C23, 'M', 'ⱓ'), - (0x2C24, 'M', 'ⱔ'), - (0x2C25, 'M', 'ⱕ'), - (0x2C26, 'M', 'ⱖ'), - (0x2C27, 'M', 'ⱗ'), - (0x2C28, 'M', 'ⱘ'), - ] - -def _seg_25() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2C29, 'M', 'ⱙ'), - (0x2C2A, 'M', 'ⱚ'), - (0x2C2B, 'M', 'ⱛ'), - (0x2C2C, 'M', 'ⱜ'), - (0x2C2D, 'M', 'ⱝ'), - (0x2C2E, 'M', 'ⱞ'), - (0x2C2F, 'M', 'ⱟ'), - (0x2C30, 'V'), - (0x2C60, 'M', 'ⱡ'), - (0x2C61, 'V'), - (0x2C62, 'M', 'ɫ'), - (0x2C63, 'M', 'ᵽ'), - (0x2C64, 'M', 'ɽ'), - (0x2C65, 'V'), - (0x2C67, 'M', 'ⱨ'), - (0x2C68, 'V'), - (0x2C69, 'M', 'ⱪ'), - (0x2C6A, 'V'), - (0x2C6B, 'M', 'ⱬ'), - (0x2C6C, 'V'), - (0x2C6D, 'M', 'ɑ'), - (0x2C6E, 'M', 'ɱ'), - (0x2C6F, 'M', 'ɐ'), - (0x2C70, 'M', 'ɒ'), - (0x2C71, 'V'), - (0x2C72, 'M', 'ⱳ'), - (0x2C73, 'V'), - (0x2C75, 'M', 'ⱶ'), - (0x2C76, 'V'), - (0x2C7C, 'M', 'j'), - (0x2C7D, 'M', 'v'), - (0x2C7E, 'M', 'ȿ'), - (0x2C7F, 'M', 'ɀ'), - (0x2C80, 'M', 'ⲁ'), - (0x2C81, 'V'), - (0x2C82, 'M', 'ⲃ'), - (0x2C83, 'V'), - (0x2C84, 'M', 'ⲅ'), - (0x2C85, 'V'), - (0x2C86, 'M', 'ⲇ'), - (0x2C87, 'V'), - (0x2C88, 'M', 'ⲉ'), - (0x2C89, 'V'), - (0x2C8A, 'M', 'ⲋ'), - (0x2C8B, 'V'), - (0x2C8C, 'M', 'ⲍ'), - (0x2C8D, 'V'), - (0x2C8E, 'M', 'ⲏ'), - (0x2C8F, 'V'), - (0x2C90, 'M', 'ⲑ'), - (0x2C91, 'V'), - (0x2C92, 'M', 'ⲓ'), - (0x2C93, 'V'), - (0x2C94, 'M', 'ⲕ'), - (0x2C95, 'V'), - (0x2C96, 'M', 'ⲗ'), - (0x2C97, 'V'), - (0x2C98, 'M', 'ⲙ'), - (0x2C99, 'V'), - (0x2C9A, 'M', 'ⲛ'), - (0x2C9B, 'V'), - (0x2C9C, 'M', 'ⲝ'), - (0x2C9D, 'V'), - (0x2C9E, 'M', 'ⲟ'), - (0x2C9F, 'V'), - (0x2CA0, 'M', 'ⲡ'), - (0x2CA1, 'V'), - (0x2CA2, 'M', 'ⲣ'), - (0x2CA3, 'V'), - (0x2CA4, 'M', 'ⲥ'), - (0x2CA5, 'V'), - (0x2CA6, 'M', 'ⲧ'), - (0x2CA7, 'V'), - (0x2CA8, 'M', 'ⲩ'), - (0x2CA9, 'V'), - (0x2CAA, 'M', 'ⲫ'), - (0x2CAB, 'V'), - (0x2CAC, 'M', 'ⲭ'), - (0x2CAD, 'V'), - (0x2CAE, 'M', 'ⲯ'), - (0x2CAF, 'V'), - (0x2CB0, 'M', 'ⲱ'), - (0x2CB1, 'V'), - (0x2CB2, 'M', 'ⲳ'), - (0x2CB3, 'V'), - (0x2CB4, 'M', 'ⲵ'), - (0x2CB5, 'V'), - (0x2CB6, 'M', 'ⲷ'), - (0x2CB7, 'V'), - (0x2CB8, 'M', 'ⲹ'), - (0x2CB9, 'V'), - (0x2CBA, 'M', 'ⲻ'), - (0x2CBB, 'V'), - (0x2CBC, 'M', 'ⲽ'), - (0x2CBD, 'V'), - (0x2CBE, 'M', 'ⲿ'), - (0x2CBF, 'V'), - (0x2CC0, 'M', 'ⳁ'), - (0x2CC1, 'V'), - (0x2CC2, 'M', 'ⳃ'), - ] - -def _seg_26() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2CC3, 'V'), - (0x2CC4, 'M', 'ⳅ'), - (0x2CC5, 'V'), - (0x2CC6, 'M', 'ⳇ'), - (0x2CC7, 'V'), - (0x2CC8, 'M', 'ⳉ'), - (0x2CC9, 'V'), - (0x2CCA, 'M', 'ⳋ'), - (0x2CCB, 'V'), - (0x2CCC, 'M', 'ⳍ'), - (0x2CCD, 'V'), - (0x2CCE, 'M', 'ⳏ'), - (0x2CCF, 'V'), - (0x2CD0, 'M', 'ⳑ'), - (0x2CD1, 'V'), - (0x2CD2, 'M', 'ⳓ'), - (0x2CD3, 'V'), - (0x2CD4, 'M', 'ⳕ'), - (0x2CD5, 'V'), - (0x2CD6, 'M', 'ⳗ'), - (0x2CD7, 'V'), - (0x2CD8, 'M', 'ⳙ'), - (0x2CD9, 'V'), - (0x2CDA, 'M', 'ⳛ'), - (0x2CDB, 'V'), - (0x2CDC, 'M', 'ⳝ'), - (0x2CDD, 'V'), - (0x2CDE, 'M', 'ⳟ'), - (0x2CDF, 'V'), - (0x2CE0, 'M', 'ⳡ'), - (0x2CE1, 'V'), - (0x2CE2, 'M', 'ⳣ'), - (0x2CE3, 'V'), - (0x2CEB, 'M', 'ⳬ'), - (0x2CEC, 'V'), - (0x2CED, 'M', 'ⳮ'), - (0x2CEE, 'V'), - (0x2CF2, 'M', 'ⳳ'), - (0x2CF3, 'V'), - (0x2CF4, 'X'), - (0x2CF9, 'V'), - (0x2D26, 'X'), - (0x2D27, 'V'), - (0x2D28, 'X'), - (0x2D2D, 'V'), - (0x2D2E, 'X'), - (0x2D30, 'V'), - (0x2D68, 'X'), - (0x2D6F, 'M', 'ⵡ'), - (0x2D70, 'V'), - (0x2D71, 'X'), - (0x2D7F, 'V'), - (0x2D97, 'X'), - (0x2DA0, 'V'), - (0x2DA7, 'X'), - (0x2DA8, 'V'), - (0x2DAF, 'X'), - (0x2DB0, 'V'), - (0x2DB7, 'X'), - (0x2DB8, 'V'), - (0x2DBF, 'X'), - (0x2DC0, 'V'), - (0x2DC7, 'X'), - (0x2DC8, 'V'), - (0x2DCF, 'X'), - (0x2DD0, 'V'), - (0x2DD7, 'X'), - (0x2DD8, 'V'), - (0x2DDF, 'X'), - (0x2DE0, 'V'), - (0x2E5E, 'X'), - (0x2E80, 'V'), - (0x2E9A, 'X'), - (0x2E9B, 'V'), - (0x2E9F, 'M', '母'), - (0x2EA0, 'V'), - (0x2EF3, 'M', '龟'), - (0x2EF4, 'X'), - (0x2F00, 'M', '一'), - (0x2F01, 'M', '丨'), - (0x2F02, 'M', '丶'), - (0x2F03, 'M', '丿'), - (0x2F04, 'M', '乙'), - (0x2F05, 'M', '亅'), - (0x2F06, 'M', '二'), - (0x2F07, 'M', '亠'), - (0x2F08, 'M', '人'), - (0x2F09, 'M', '儿'), - (0x2F0A, 'M', '入'), - (0x2F0B, 'M', '八'), - (0x2F0C, 'M', '冂'), - (0x2F0D, 'M', '冖'), - (0x2F0E, 'M', '冫'), - (0x2F0F, 'M', '几'), - (0x2F10, 'M', '凵'), - (0x2F11, 'M', '刀'), - (0x2F12, 'M', '力'), - (0x2F13, 'M', '勹'), - (0x2F14, 'M', '匕'), - (0x2F15, 'M', '匚'), - ] - -def _seg_27() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F16, 'M', '匸'), - (0x2F17, 'M', '十'), - (0x2F18, 'M', '卜'), - (0x2F19, 'M', '卩'), - (0x2F1A, 'M', '厂'), - (0x2F1B, 'M', '厶'), - (0x2F1C, 'M', '又'), - (0x2F1D, 'M', '口'), - (0x2F1E, 'M', '囗'), - (0x2F1F, 'M', '土'), - (0x2F20, 'M', '士'), - (0x2F21, 'M', '夂'), - (0x2F22, 'M', '夊'), - (0x2F23, 'M', '夕'), - (0x2F24, 'M', '大'), - (0x2F25, 'M', '女'), - (0x2F26, 'M', '子'), - (0x2F27, 'M', '宀'), - (0x2F28, 'M', '寸'), - (0x2F29, 'M', '小'), - (0x2F2A, 'M', '尢'), - (0x2F2B, 'M', '尸'), - (0x2F2C, 'M', '屮'), - (0x2F2D, 'M', '山'), - (0x2F2E, 'M', '巛'), - (0x2F2F, 'M', '工'), - (0x2F30, 'M', '己'), - (0x2F31, 'M', '巾'), - (0x2F32, 'M', '干'), - (0x2F33, 'M', '幺'), - (0x2F34, 'M', '广'), - (0x2F35, 'M', '廴'), - (0x2F36, 'M', '廾'), - (0x2F37, 'M', '弋'), - (0x2F38, 'M', '弓'), - (0x2F39, 'M', '彐'), - (0x2F3A, 'M', '彡'), - (0x2F3B, 'M', '彳'), - (0x2F3C, 'M', '心'), - (0x2F3D, 'M', '戈'), - (0x2F3E, 'M', '戶'), - (0x2F3F, 'M', '手'), - (0x2F40, 'M', '支'), - (0x2F41, 'M', '攴'), - (0x2F42, 'M', '文'), - (0x2F43, 'M', '斗'), - (0x2F44, 'M', '斤'), - (0x2F45, 'M', '方'), - (0x2F46, 'M', '无'), - (0x2F47, 'M', '日'), - (0x2F48, 'M', '曰'), - (0x2F49, 'M', '月'), - (0x2F4A, 'M', '木'), - (0x2F4B, 'M', '欠'), - (0x2F4C, 'M', '止'), - (0x2F4D, 'M', '歹'), - (0x2F4E, 'M', '殳'), - (0x2F4F, 'M', '毋'), - (0x2F50, 'M', '比'), - (0x2F51, 'M', '毛'), - (0x2F52, 'M', '氏'), - (0x2F53, 'M', '气'), - (0x2F54, 'M', '水'), - (0x2F55, 'M', '火'), - (0x2F56, 'M', '爪'), - (0x2F57, 'M', '父'), - (0x2F58, 'M', '爻'), - (0x2F59, 'M', '爿'), - (0x2F5A, 'M', '片'), - (0x2F5B, 'M', '牙'), - (0x2F5C, 'M', '牛'), - (0x2F5D, 'M', '犬'), - (0x2F5E, 'M', '玄'), - (0x2F5F, 'M', '玉'), - (0x2F60, 'M', '瓜'), - (0x2F61, 'M', '瓦'), - (0x2F62, 'M', '甘'), - (0x2F63, 'M', '生'), - (0x2F64, 'M', '用'), - (0x2F65, 'M', '田'), - (0x2F66, 'M', '疋'), - (0x2F67, 'M', '疒'), - (0x2F68, 'M', '癶'), - (0x2F69, 'M', '白'), - (0x2F6A, 'M', '皮'), - (0x2F6B, 'M', '皿'), - (0x2F6C, 'M', '目'), - (0x2F6D, 'M', '矛'), - (0x2F6E, 'M', '矢'), - (0x2F6F, 'M', '石'), - (0x2F70, 'M', '示'), - (0x2F71, 'M', '禸'), - (0x2F72, 'M', '禾'), - (0x2F73, 'M', '穴'), - (0x2F74, 'M', '立'), - (0x2F75, 'M', '竹'), - (0x2F76, 'M', '米'), - (0x2F77, 'M', '糸'), - (0x2F78, 'M', '缶'), - (0x2F79, 'M', '网'), - ] - -def _seg_28() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F7A, 'M', '羊'), - (0x2F7B, 'M', '羽'), - (0x2F7C, 'M', '老'), - (0x2F7D, 'M', '而'), - (0x2F7E, 'M', '耒'), - (0x2F7F, 'M', '耳'), - (0x2F80, 'M', '聿'), - (0x2F81, 'M', '肉'), - (0x2F82, 'M', '臣'), - (0x2F83, 'M', '自'), - (0x2F84, 'M', '至'), - (0x2F85, 'M', '臼'), - (0x2F86, 'M', '舌'), - (0x2F87, 'M', '舛'), - (0x2F88, 'M', '舟'), - (0x2F89, 'M', '艮'), - (0x2F8A, 'M', '色'), - (0x2F8B, 'M', '艸'), - (0x2F8C, 'M', '虍'), - (0x2F8D, 'M', '虫'), - (0x2F8E, 'M', '血'), - (0x2F8F, 'M', '行'), - (0x2F90, 'M', '衣'), - (0x2F91, 'M', '襾'), - (0x2F92, 'M', '見'), - (0x2F93, 'M', '角'), - (0x2F94, 'M', '言'), - (0x2F95, 'M', '谷'), - (0x2F96, 'M', '豆'), - (0x2F97, 'M', '豕'), - (0x2F98, 'M', '豸'), - (0x2F99, 'M', '貝'), - (0x2F9A, 'M', '赤'), - (0x2F9B, 'M', '走'), - (0x2F9C, 'M', '足'), - (0x2F9D, 'M', '身'), - (0x2F9E, 'M', '車'), - (0x2F9F, 'M', '辛'), - (0x2FA0, 'M', '辰'), - (0x2FA1, 'M', '辵'), - (0x2FA2, 'M', '邑'), - (0x2FA3, 'M', '酉'), - (0x2FA4, 'M', '釆'), - (0x2FA5, 'M', '里'), - (0x2FA6, 'M', '金'), - (0x2FA7, 'M', '長'), - (0x2FA8, 'M', '門'), - (0x2FA9, 'M', '阜'), - (0x2FAA, 'M', '隶'), - (0x2FAB, 'M', '隹'), - (0x2FAC, 'M', '雨'), - (0x2FAD, 'M', '靑'), - (0x2FAE, 'M', '非'), - (0x2FAF, 'M', '面'), - (0x2FB0, 'M', '革'), - (0x2FB1, 'M', '韋'), - (0x2FB2, 'M', '韭'), - (0x2FB3, 'M', '音'), - (0x2FB4, 'M', '頁'), - (0x2FB5, 'M', '風'), - (0x2FB6, 'M', '飛'), - (0x2FB7, 'M', '食'), - (0x2FB8, 'M', '首'), - (0x2FB9, 'M', '香'), - (0x2FBA, 'M', '馬'), - (0x2FBB, 'M', '骨'), - (0x2FBC, 'M', '高'), - (0x2FBD, 'M', '髟'), - (0x2FBE, 'M', '鬥'), - (0x2FBF, 'M', '鬯'), - (0x2FC0, 'M', '鬲'), - (0x2FC1, 'M', '鬼'), - (0x2FC2, 'M', '魚'), - (0x2FC3, 'M', '鳥'), - (0x2FC4, 'M', '鹵'), - (0x2FC5, 'M', '鹿'), - (0x2FC6, 'M', '麥'), - (0x2FC7, 'M', '麻'), - (0x2FC8, 'M', '黃'), - (0x2FC9, 'M', '黍'), - (0x2FCA, 'M', '黑'), - (0x2FCB, 'M', '黹'), - (0x2FCC, 'M', '黽'), - (0x2FCD, 'M', '鼎'), - (0x2FCE, 'M', '鼓'), - (0x2FCF, 'M', '鼠'), - (0x2FD0, 'M', '鼻'), - (0x2FD1, 'M', '齊'), - (0x2FD2, 'M', '齒'), - (0x2FD3, 'M', '龍'), - (0x2FD4, 'M', '龜'), - (0x2FD5, 'M', '龠'), - (0x2FD6, 'X'), - (0x3000, '3', ' '), - (0x3001, 'V'), - (0x3002, 'M', '.'), - (0x3003, 'V'), - (0x3036, 'M', '〒'), - (0x3037, 'V'), - (0x3038, 'M', '十'), - ] - -def _seg_29() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x3039, 'M', '卄'), - (0x303A, 'M', '卅'), - (0x303B, 'V'), - (0x3040, 'X'), - (0x3041, 'V'), - (0x3097, 'X'), - (0x3099, 'V'), - (0x309B, '3', ' ゙'), - (0x309C, '3', ' ゚'), - (0x309D, 'V'), - (0x309F, 'M', 'より'), - (0x30A0, 'V'), - (0x30FF, 'M', 'コト'), - (0x3100, 'X'), - (0x3105, 'V'), - (0x3130, 'X'), - (0x3131, 'M', 'ᄀ'), - (0x3132, 'M', 'ᄁ'), - (0x3133, 'M', 'ᆪ'), - (0x3134, 'M', 'ᄂ'), - (0x3135, 'M', 'ᆬ'), - (0x3136, 'M', 'ᆭ'), - (0x3137, 'M', 'ᄃ'), - (0x3138, 'M', 'ᄄ'), - (0x3139, 'M', 'ᄅ'), - (0x313A, 'M', 'ᆰ'), - (0x313B, 'M', 'ᆱ'), - (0x313C, 'M', 'ᆲ'), - (0x313D, 'M', 'ᆳ'), - (0x313E, 'M', 'ᆴ'), - (0x313F, 'M', 'ᆵ'), - (0x3140, 'M', 'ᄚ'), - (0x3141, 'M', 'ᄆ'), - (0x3142, 'M', 'ᄇ'), - (0x3143, 'M', 'ᄈ'), - (0x3144, 'M', 'ᄡ'), - (0x3145, 'M', 'ᄉ'), - (0x3146, 'M', 'ᄊ'), - (0x3147, 'M', 'ᄋ'), - (0x3148, 'M', 'ᄌ'), - (0x3149, 'M', 'ᄍ'), - (0x314A, 'M', 'ᄎ'), - (0x314B, 'M', 'ᄏ'), - (0x314C, 'M', 'ᄐ'), - (0x314D, 'M', 'ᄑ'), - (0x314E, 'M', 'ᄒ'), - (0x314F, 'M', 'ᅡ'), - (0x3150, 'M', 'ᅢ'), - (0x3151, 'M', 'ᅣ'), - (0x3152, 'M', 'ᅤ'), - (0x3153, 'M', 'ᅥ'), - (0x3154, 'M', 'ᅦ'), - (0x3155, 'M', 'ᅧ'), - (0x3156, 'M', 'ᅨ'), - (0x3157, 'M', 'ᅩ'), - (0x3158, 'M', 'ᅪ'), - (0x3159, 'M', 'ᅫ'), - (0x315A, 'M', 'ᅬ'), - (0x315B, 'M', 'ᅭ'), - (0x315C, 'M', 'ᅮ'), - (0x315D, 'M', 'ᅯ'), - (0x315E, 'M', 'ᅰ'), - (0x315F, 'M', 'ᅱ'), - (0x3160, 'M', 'ᅲ'), - (0x3161, 'M', 'ᅳ'), - (0x3162, 'M', 'ᅴ'), - (0x3163, 'M', 'ᅵ'), - (0x3164, 'X'), - (0x3165, 'M', 'ᄔ'), - (0x3166, 'M', 'ᄕ'), - (0x3167, 'M', 'ᇇ'), - (0x3168, 'M', 'ᇈ'), - (0x3169, 'M', 'ᇌ'), - (0x316A, 'M', 'ᇎ'), - (0x316B, 'M', 'ᇓ'), - (0x316C, 'M', 'ᇗ'), - (0x316D, 'M', 'ᇙ'), - (0x316E, 'M', 'ᄜ'), - (0x316F, 'M', 'ᇝ'), - (0x3170, 'M', 'ᇟ'), - (0x3171, 'M', 'ᄝ'), - (0x3172, 'M', 'ᄞ'), - (0x3173, 'M', 'ᄠ'), - (0x3174, 'M', 'ᄢ'), - (0x3175, 'M', 'ᄣ'), - (0x3176, 'M', 'ᄧ'), - (0x3177, 'M', 'ᄩ'), - (0x3178, 'M', 'ᄫ'), - (0x3179, 'M', 'ᄬ'), - (0x317A, 'M', 'ᄭ'), - (0x317B, 'M', 'ᄮ'), - (0x317C, 'M', 'ᄯ'), - (0x317D, 'M', 'ᄲ'), - (0x317E, 'M', 'ᄶ'), - (0x317F, 'M', 'ᅀ'), - (0x3180, 'M', 'ᅇ'), - (0x3181, 'M', 'ᅌ'), - (0x3182, 'M', 'ᇱ'), - (0x3183, 'M', 'ᇲ'), - (0x3184, 'M', 'ᅗ'), - ] - -def _seg_30() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x3185, 'M', 'ᅘ'), - (0x3186, 'M', 'ᅙ'), - (0x3187, 'M', 'ᆄ'), - (0x3188, 'M', 'ᆅ'), - (0x3189, 'M', 'ᆈ'), - (0x318A, 'M', 'ᆑ'), - (0x318B, 'M', 'ᆒ'), - (0x318C, 'M', 'ᆔ'), - (0x318D, 'M', 'ᆞ'), - (0x318E, 'M', 'ᆡ'), - (0x318F, 'X'), - (0x3190, 'V'), - (0x3192, 'M', '一'), - (0x3193, 'M', '二'), - (0x3194, 'M', '三'), - (0x3195, 'M', '四'), - (0x3196, 'M', '上'), - (0x3197, 'M', '中'), - (0x3198, 'M', '下'), - (0x3199, 'M', '甲'), - (0x319A, 'M', '乙'), - (0x319B, 'M', '丙'), - (0x319C, 'M', '丁'), - (0x319D, 'M', '天'), - (0x319E, 'M', '地'), - (0x319F, 'M', '人'), - (0x31A0, 'V'), - (0x31E4, 'X'), - (0x31F0, 'V'), - (0x3200, '3', '(ᄀ)'), - (0x3201, '3', '(ᄂ)'), - (0x3202, '3', '(ᄃ)'), - (0x3203, '3', '(ᄅ)'), - (0x3204, '3', '(ᄆ)'), - (0x3205, '3', '(ᄇ)'), - (0x3206, '3', '(ᄉ)'), - (0x3207, '3', '(ᄋ)'), - (0x3208, '3', '(ᄌ)'), - (0x3209, '3', '(ᄎ)'), - (0x320A, '3', '(ᄏ)'), - (0x320B, '3', '(ᄐ)'), - (0x320C, '3', '(ᄑ)'), - (0x320D, '3', '(ᄒ)'), - (0x320E, '3', '(가)'), - (0x320F, '3', '(나)'), - (0x3210, '3', '(다)'), - (0x3211, '3', '(라)'), - (0x3212, '3', '(마)'), - (0x3213, '3', '(바)'), - (0x3214, '3', '(사)'), - (0x3215, '3', '(아)'), - (0x3216, '3', '(자)'), - (0x3217, '3', '(차)'), - (0x3218, '3', '(카)'), - (0x3219, '3', '(타)'), - (0x321A, '3', '(파)'), - (0x321B, '3', '(하)'), - (0x321C, '3', '(주)'), - (0x321D, '3', '(오전)'), - (0x321E, '3', '(오후)'), - (0x321F, 'X'), - (0x3220, '3', '(一)'), - (0x3221, '3', '(二)'), - (0x3222, '3', '(三)'), - (0x3223, '3', '(四)'), - (0x3224, '3', '(五)'), - (0x3225, '3', '(六)'), - (0x3226, '3', '(七)'), - (0x3227, '3', '(八)'), - (0x3228, '3', '(九)'), - (0x3229, '3', '(十)'), - (0x322A, '3', '(月)'), - (0x322B, '3', '(火)'), - (0x322C, '3', '(水)'), - (0x322D, '3', '(木)'), - (0x322E, '3', '(金)'), - (0x322F, '3', '(土)'), - (0x3230, '3', '(日)'), - (0x3231, '3', '(株)'), - (0x3232, '3', '(有)'), - (0x3233, '3', '(社)'), - (0x3234, '3', '(名)'), - (0x3235, '3', '(特)'), - (0x3236, '3', '(財)'), - (0x3237, '3', '(祝)'), - (0x3238, '3', '(労)'), - (0x3239, '3', '(代)'), - (0x323A, '3', '(呼)'), - (0x323B, '3', '(学)'), - (0x323C, '3', '(監)'), - (0x323D, '3', '(企)'), - (0x323E, '3', '(資)'), - (0x323F, '3', '(協)'), - (0x3240, '3', '(祭)'), - (0x3241, '3', '(休)'), - (0x3242, '3', '(自)'), - (0x3243, '3', '(至)'), - (0x3244, 'M', '問'), - (0x3245, 'M', '幼'), - (0x3246, 'M', '文'), - ] - -def _seg_31() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x3247, 'M', '箏'), - (0x3248, 'V'), - (0x3250, 'M', 'pte'), - (0x3251, 'M', '21'), - (0x3252, 'M', '22'), - (0x3253, 'M', '23'), - (0x3254, 'M', '24'), - (0x3255, 'M', '25'), - (0x3256, 'M', '26'), - (0x3257, 'M', '27'), - (0x3258, 'M', '28'), - (0x3259, 'M', '29'), - (0x325A, 'M', '30'), - (0x325B, 'M', '31'), - (0x325C, 'M', '32'), - (0x325D, 'M', '33'), - (0x325E, 'M', '34'), - (0x325F, 'M', '35'), - (0x3260, 'M', 'ᄀ'), - (0x3261, 'M', 'ᄂ'), - (0x3262, 'M', 'ᄃ'), - (0x3263, 'M', 'ᄅ'), - (0x3264, 'M', 'ᄆ'), - (0x3265, 'M', 'ᄇ'), - (0x3266, 'M', 'ᄉ'), - (0x3267, 'M', 'ᄋ'), - (0x3268, 'M', 'ᄌ'), - (0x3269, 'M', 'ᄎ'), - (0x326A, 'M', 'ᄏ'), - (0x326B, 'M', 'ᄐ'), - (0x326C, 'M', 'ᄑ'), - (0x326D, 'M', 'ᄒ'), - (0x326E, 'M', '가'), - (0x326F, 'M', '나'), - (0x3270, 'M', '다'), - (0x3271, 'M', '라'), - (0x3272, 'M', '마'), - (0x3273, 'M', '바'), - (0x3274, 'M', '사'), - (0x3275, 'M', '아'), - (0x3276, 'M', '자'), - (0x3277, 'M', '차'), - (0x3278, 'M', '카'), - (0x3279, 'M', '타'), - (0x327A, 'M', '파'), - (0x327B, 'M', '하'), - (0x327C, 'M', '참고'), - (0x327D, 'M', '주의'), - (0x327E, 'M', '우'), - (0x327F, 'V'), - (0x3280, 'M', '一'), - (0x3281, 'M', '二'), - (0x3282, 'M', '三'), - (0x3283, 'M', '四'), - (0x3284, 'M', '五'), - (0x3285, 'M', '六'), - (0x3286, 'M', '七'), - (0x3287, 'M', '八'), - (0x3288, 'M', '九'), - (0x3289, 'M', '十'), - (0x328A, 'M', '月'), - (0x328B, 'M', '火'), - (0x328C, 'M', '水'), - (0x328D, 'M', '木'), - (0x328E, 'M', '金'), - (0x328F, 'M', '土'), - (0x3290, 'M', '日'), - (0x3291, 'M', '株'), - (0x3292, 'M', '有'), - (0x3293, 'M', '社'), - (0x3294, 'M', '名'), - (0x3295, 'M', '特'), - (0x3296, 'M', '財'), - (0x3297, 'M', '祝'), - (0x3298, 'M', '労'), - (0x3299, 'M', '秘'), - (0x329A, 'M', '男'), - (0x329B, 'M', '女'), - (0x329C, 'M', '適'), - (0x329D, 'M', '優'), - (0x329E, 'M', '印'), - (0x329F, 'M', '注'), - (0x32A0, 'M', '項'), - (0x32A1, 'M', '休'), - (0x32A2, 'M', '写'), - (0x32A3, 'M', '正'), - (0x32A4, 'M', '上'), - (0x32A5, 'M', '中'), - (0x32A6, 'M', '下'), - (0x32A7, 'M', '左'), - (0x32A8, 'M', '右'), - (0x32A9, 'M', '医'), - (0x32AA, 'M', '宗'), - (0x32AB, 'M', '学'), - (0x32AC, 'M', '監'), - (0x32AD, 'M', '企'), - (0x32AE, 'M', '資'), - (0x32AF, 'M', '協'), - (0x32B0, 'M', '夜'), - (0x32B1, 'M', '36'), - ] - -def _seg_32() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x32B2, 'M', '37'), - (0x32B3, 'M', '38'), - (0x32B4, 'M', '39'), - (0x32B5, 'M', '40'), - (0x32B6, 'M', '41'), - (0x32B7, 'M', '42'), - (0x32B8, 'M', '43'), - (0x32B9, 'M', '44'), - (0x32BA, 'M', '45'), - (0x32BB, 'M', '46'), - (0x32BC, 'M', '47'), - (0x32BD, 'M', '48'), - (0x32BE, 'M', '49'), - (0x32BF, 'M', '50'), - (0x32C0, 'M', '1月'), - (0x32C1, 'M', '2月'), - (0x32C2, 'M', '3月'), - (0x32C3, 'M', '4月'), - (0x32C4, 'M', '5月'), - (0x32C5, 'M', '6月'), - (0x32C6, 'M', '7月'), - (0x32C7, 'M', '8月'), - (0x32C8, 'M', '9月'), - (0x32C9, 'M', '10月'), - (0x32CA, 'M', '11月'), - (0x32CB, 'M', '12月'), - (0x32CC, 'M', 'hg'), - (0x32CD, 'M', 'erg'), - (0x32CE, 'M', 'ev'), - (0x32CF, 'M', 'ltd'), - (0x32D0, 'M', 'ア'), - (0x32D1, 'M', 'イ'), - (0x32D2, 'M', 'ウ'), - (0x32D3, 'M', 'エ'), - (0x32D4, 'M', 'オ'), - (0x32D5, 'M', 'カ'), - (0x32D6, 'M', 'キ'), - (0x32D7, 'M', 'ク'), - (0x32D8, 'M', 'ケ'), - (0x32D9, 'M', 'コ'), - (0x32DA, 'M', 'サ'), - (0x32DB, 'M', 'シ'), - (0x32DC, 'M', 'ス'), - (0x32DD, 'M', 'セ'), - (0x32DE, 'M', 'ソ'), - (0x32DF, 'M', 'タ'), - (0x32E0, 'M', 'チ'), - (0x32E1, 'M', 'ツ'), - (0x32E2, 'M', 'テ'), - (0x32E3, 'M', 'ト'), - (0x32E4, 'M', 'ナ'), - (0x32E5, 'M', 'ニ'), - (0x32E6, 'M', 'ヌ'), - (0x32E7, 'M', 'ネ'), - (0x32E8, 'M', 'ノ'), - (0x32E9, 'M', 'ハ'), - (0x32EA, 'M', 'ヒ'), - (0x32EB, 'M', 'フ'), - (0x32EC, 'M', 'ヘ'), - (0x32ED, 'M', 'ホ'), - (0x32EE, 'M', 'マ'), - (0x32EF, 'M', 'ミ'), - (0x32F0, 'M', 'ム'), - (0x32F1, 'M', 'メ'), - (0x32F2, 'M', 'モ'), - (0x32F3, 'M', 'ヤ'), - (0x32F4, 'M', 'ユ'), - (0x32F5, 'M', 'ヨ'), - (0x32F6, 'M', 'ラ'), - (0x32F7, 'M', 'リ'), - (0x32F8, 'M', 'ル'), - (0x32F9, 'M', 'レ'), - (0x32FA, 'M', 'ロ'), - (0x32FB, 'M', 'ワ'), - (0x32FC, 'M', 'ヰ'), - (0x32FD, 'M', 'ヱ'), - (0x32FE, 'M', 'ヲ'), - (0x32FF, 'M', '令和'), - (0x3300, 'M', 'アパート'), - (0x3301, 'M', 'アルファ'), - (0x3302, 'M', 'アンペア'), - (0x3303, 'M', 'アール'), - (0x3304, 'M', 'イニング'), - (0x3305, 'M', 'インチ'), - (0x3306, 'M', 'ウォン'), - (0x3307, 'M', 'エスクード'), - (0x3308, 'M', 'エーカー'), - (0x3309, 'M', 'オンス'), - (0x330A, 'M', 'オーム'), - (0x330B, 'M', 'カイリ'), - (0x330C, 'M', 'カラット'), - (0x330D, 'M', 'カロリー'), - (0x330E, 'M', 'ガロン'), - (0x330F, 'M', 'ガンマ'), - (0x3310, 'M', 'ギガ'), - (0x3311, 'M', 'ギニー'), - (0x3312, 'M', 'キュリー'), - (0x3313, 'M', 'ギルダー'), - (0x3314, 'M', 'キロ'), - (0x3315, 'M', 'キログラム'), - ] - -def _seg_33() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x3316, 'M', 'キロメートル'), - (0x3317, 'M', 'キロワット'), - (0x3318, 'M', 'グラム'), - (0x3319, 'M', 'グラムトン'), - (0x331A, 'M', 'クルゼイロ'), - (0x331B, 'M', 'クローネ'), - (0x331C, 'M', 'ケース'), - (0x331D, 'M', 'コルナ'), - (0x331E, 'M', 'コーポ'), - (0x331F, 'M', 'サイクル'), - (0x3320, 'M', 'サンチーム'), - (0x3321, 'M', 'シリング'), - (0x3322, 'M', 'センチ'), - (0x3323, 'M', 'セント'), - (0x3324, 'M', 'ダース'), - (0x3325, 'M', 'デシ'), - (0x3326, 'M', 'ドル'), - (0x3327, 'M', 'トン'), - (0x3328, 'M', 'ナノ'), - (0x3329, 'M', 'ノット'), - (0x332A, 'M', 'ハイツ'), - (0x332B, 'M', 'パーセント'), - (0x332C, 'M', 'パーツ'), - (0x332D, 'M', 'バーレル'), - (0x332E, 'M', 'ピアストル'), - (0x332F, 'M', 'ピクル'), - (0x3330, 'M', 'ピコ'), - (0x3331, 'M', 'ビル'), - (0x3332, 'M', 'ファラッド'), - (0x3333, 'M', 'フィート'), - (0x3334, 'M', 'ブッシェル'), - (0x3335, 'M', 'フラン'), - (0x3336, 'M', 'ヘクタール'), - (0x3337, 'M', 'ペソ'), - (0x3338, 'M', 'ペニヒ'), - (0x3339, 'M', 'ヘルツ'), - (0x333A, 'M', 'ペンス'), - (0x333B, 'M', 'ページ'), - (0x333C, 'M', 'ベータ'), - (0x333D, 'M', 'ポイント'), - (0x333E, 'M', 'ボルト'), - (0x333F, 'M', 'ホン'), - (0x3340, 'M', 'ポンド'), - (0x3341, 'M', 'ホール'), - (0x3342, 'M', 'ホーン'), - (0x3343, 'M', 'マイクロ'), - (0x3344, 'M', 'マイル'), - (0x3345, 'M', 'マッハ'), - (0x3346, 'M', 'マルク'), - (0x3347, 'M', 'マンション'), - (0x3348, 'M', 'ミクロン'), - (0x3349, 'M', 'ミリ'), - (0x334A, 'M', 'ミリバール'), - (0x334B, 'M', 'メガ'), - (0x334C, 'M', 'メガトン'), - (0x334D, 'M', 'メートル'), - (0x334E, 'M', 'ヤード'), - (0x334F, 'M', 'ヤール'), - (0x3350, 'M', 'ユアン'), - (0x3351, 'M', 'リットル'), - (0x3352, 'M', 'リラ'), - (0x3353, 'M', 'ルピー'), - (0x3354, 'M', 'ルーブル'), - (0x3355, 'M', 'レム'), - (0x3356, 'M', 'レントゲン'), - (0x3357, 'M', 'ワット'), - (0x3358, 'M', '0点'), - (0x3359, 'M', '1点'), - (0x335A, 'M', '2点'), - (0x335B, 'M', '3点'), - (0x335C, 'M', '4点'), - (0x335D, 'M', '5点'), - (0x335E, 'M', '6点'), - (0x335F, 'M', '7点'), - (0x3360, 'M', '8点'), - (0x3361, 'M', '9点'), - (0x3362, 'M', '10点'), - (0x3363, 'M', '11点'), - (0x3364, 'M', '12点'), - (0x3365, 'M', '13点'), - (0x3366, 'M', '14点'), - (0x3367, 'M', '15点'), - (0x3368, 'M', '16点'), - (0x3369, 'M', '17点'), - (0x336A, 'M', '18点'), - (0x336B, 'M', '19点'), - (0x336C, 'M', '20点'), - (0x336D, 'M', '21点'), - (0x336E, 'M', '22点'), - (0x336F, 'M', '23点'), - (0x3370, 'M', '24点'), - (0x3371, 'M', 'hpa'), - (0x3372, 'M', 'da'), - (0x3373, 'M', 'au'), - (0x3374, 'M', 'bar'), - (0x3375, 'M', 'ov'), - (0x3376, 'M', 'pc'), - (0x3377, 'M', 'dm'), - (0x3378, 'M', 'dm2'), - (0x3379, 'M', 'dm3'), - ] - -def _seg_34() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x337A, 'M', 'iu'), - (0x337B, 'M', '平成'), - (0x337C, 'M', '昭和'), - (0x337D, 'M', '大正'), - (0x337E, 'M', '明治'), - (0x337F, 'M', '株式会社'), - (0x3380, 'M', 'pa'), - (0x3381, 'M', 'na'), - (0x3382, 'M', 'μa'), - (0x3383, 'M', 'ma'), - (0x3384, 'M', 'ka'), - (0x3385, 'M', 'kb'), - (0x3386, 'M', 'mb'), - (0x3387, 'M', 'gb'), - (0x3388, 'M', 'cal'), - (0x3389, 'M', 'kcal'), - (0x338A, 'M', 'pf'), - (0x338B, 'M', 'nf'), - (0x338C, 'M', 'μf'), - (0x338D, 'M', 'μg'), - (0x338E, 'M', 'mg'), - (0x338F, 'M', 'kg'), - (0x3390, 'M', 'hz'), - (0x3391, 'M', 'khz'), - (0x3392, 'M', 'mhz'), - (0x3393, 'M', 'ghz'), - (0x3394, 'M', 'thz'), - (0x3395, 'M', 'μl'), - (0x3396, 'M', 'ml'), - (0x3397, 'M', 'dl'), - (0x3398, 'M', 'kl'), - (0x3399, 'M', 'fm'), - (0x339A, 'M', 'nm'), - (0x339B, 'M', 'μm'), - (0x339C, 'M', 'mm'), - (0x339D, 'M', 'cm'), - (0x339E, 'M', 'km'), - (0x339F, 'M', 'mm2'), - (0x33A0, 'M', 'cm2'), - (0x33A1, 'M', 'm2'), - (0x33A2, 'M', 'km2'), - (0x33A3, 'M', 'mm3'), - (0x33A4, 'M', 'cm3'), - (0x33A5, 'M', 'm3'), - (0x33A6, 'M', 'km3'), - (0x33A7, 'M', 'm∕s'), - (0x33A8, 'M', 'm∕s2'), - (0x33A9, 'M', 'pa'), - (0x33AA, 'M', 'kpa'), - (0x33AB, 'M', 'mpa'), - (0x33AC, 'M', 'gpa'), - (0x33AD, 'M', 'rad'), - (0x33AE, 'M', 'rad∕s'), - (0x33AF, 'M', 'rad∕s2'), - (0x33B0, 'M', 'ps'), - (0x33B1, 'M', 'ns'), - (0x33B2, 'M', 'μs'), - (0x33B3, 'M', 'ms'), - (0x33B4, 'M', 'pv'), - (0x33B5, 'M', 'nv'), - (0x33B6, 'M', 'μv'), - (0x33B7, 'M', 'mv'), - (0x33B8, 'M', 'kv'), - (0x33B9, 'M', 'mv'), - (0x33BA, 'M', 'pw'), - (0x33BB, 'M', 'nw'), - (0x33BC, 'M', 'μw'), - (0x33BD, 'M', 'mw'), - (0x33BE, 'M', 'kw'), - (0x33BF, 'M', 'mw'), - (0x33C0, 'M', 'kω'), - (0x33C1, 'M', 'mω'), - (0x33C2, 'X'), - (0x33C3, 'M', 'bq'), - (0x33C4, 'M', 'cc'), - (0x33C5, 'M', 'cd'), - (0x33C6, 'M', 'c∕kg'), - (0x33C7, 'X'), - (0x33C8, 'M', 'db'), - (0x33C9, 'M', 'gy'), - (0x33CA, 'M', 'ha'), - (0x33CB, 'M', 'hp'), - (0x33CC, 'M', 'in'), - (0x33CD, 'M', 'kk'), - (0x33CE, 'M', 'km'), - (0x33CF, 'M', 'kt'), - (0x33D0, 'M', 'lm'), - (0x33D1, 'M', 'ln'), - (0x33D2, 'M', 'log'), - (0x33D3, 'M', 'lx'), - (0x33D4, 'M', 'mb'), - (0x33D5, 'M', 'mil'), - (0x33D6, 'M', 'mol'), - (0x33D7, 'M', 'ph'), - (0x33D8, 'X'), - (0x33D9, 'M', 'ppm'), - (0x33DA, 'M', 'pr'), - (0x33DB, 'M', 'sr'), - (0x33DC, 'M', 'sv'), - (0x33DD, 'M', 'wb'), - ] - -def _seg_35() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x33DE, 'M', 'v∕m'), - (0x33DF, 'M', 'a∕m'), - (0x33E0, 'M', '1日'), - (0x33E1, 'M', '2日'), - (0x33E2, 'M', '3日'), - (0x33E3, 'M', '4日'), - (0x33E4, 'M', '5日'), - (0x33E5, 'M', '6日'), - (0x33E6, 'M', '7日'), - (0x33E7, 'M', '8日'), - (0x33E8, 'M', '9日'), - (0x33E9, 'M', '10日'), - (0x33EA, 'M', '11日'), - (0x33EB, 'M', '12日'), - (0x33EC, 'M', '13日'), - (0x33ED, 'M', '14日'), - (0x33EE, 'M', '15日'), - (0x33EF, 'M', '16日'), - (0x33F0, 'M', '17日'), - (0x33F1, 'M', '18日'), - (0x33F2, 'M', '19日'), - (0x33F3, 'M', '20日'), - (0x33F4, 'M', '21日'), - (0x33F5, 'M', '22日'), - (0x33F6, 'M', '23日'), - (0x33F7, 'M', '24日'), - (0x33F8, 'M', '25日'), - (0x33F9, 'M', '26日'), - (0x33FA, 'M', '27日'), - (0x33FB, 'M', '28日'), - (0x33FC, 'M', '29日'), - (0x33FD, 'M', '30日'), - (0x33FE, 'M', '31日'), - (0x33FF, 'M', 'gal'), - (0x3400, 'V'), - (0xA48D, 'X'), - (0xA490, 'V'), - (0xA4C7, 'X'), - (0xA4D0, 'V'), - (0xA62C, 'X'), - (0xA640, 'M', 'ꙁ'), - (0xA641, 'V'), - (0xA642, 'M', 'ꙃ'), - (0xA643, 'V'), - (0xA644, 'M', 'ꙅ'), - (0xA645, 'V'), - (0xA646, 'M', 'ꙇ'), - (0xA647, 'V'), - (0xA648, 'M', 'ꙉ'), - (0xA649, 'V'), - (0xA64A, 'M', 'ꙋ'), - (0xA64B, 'V'), - (0xA64C, 'M', 'ꙍ'), - (0xA64D, 'V'), - (0xA64E, 'M', 'ꙏ'), - (0xA64F, 'V'), - (0xA650, 'M', 'ꙑ'), - (0xA651, 'V'), - (0xA652, 'M', 'ꙓ'), - (0xA653, 'V'), - (0xA654, 'M', 'ꙕ'), - (0xA655, 'V'), - (0xA656, 'M', 'ꙗ'), - (0xA657, 'V'), - (0xA658, 'M', 'ꙙ'), - (0xA659, 'V'), - (0xA65A, 'M', 'ꙛ'), - (0xA65B, 'V'), - (0xA65C, 'M', 'ꙝ'), - (0xA65D, 'V'), - (0xA65E, 'M', 'ꙟ'), - (0xA65F, 'V'), - (0xA660, 'M', 'ꙡ'), - (0xA661, 'V'), - (0xA662, 'M', 'ꙣ'), - (0xA663, 'V'), - (0xA664, 'M', 'ꙥ'), - (0xA665, 'V'), - (0xA666, 'M', 'ꙧ'), - (0xA667, 'V'), - (0xA668, 'M', 'ꙩ'), - (0xA669, 'V'), - (0xA66A, 'M', 'ꙫ'), - (0xA66B, 'V'), - (0xA66C, 'M', 'ꙭ'), - (0xA66D, 'V'), - (0xA680, 'M', 'ꚁ'), - (0xA681, 'V'), - (0xA682, 'M', 'ꚃ'), - (0xA683, 'V'), - (0xA684, 'M', 'ꚅ'), - (0xA685, 'V'), - (0xA686, 'M', 'ꚇ'), - (0xA687, 'V'), - (0xA688, 'M', 'ꚉ'), - (0xA689, 'V'), - (0xA68A, 'M', 'ꚋ'), - (0xA68B, 'V'), - (0xA68C, 'M', 'ꚍ'), - (0xA68D, 'V'), - ] - -def _seg_36() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xA68E, 'M', 'ꚏ'), - (0xA68F, 'V'), - (0xA690, 'M', 'ꚑ'), - (0xA691, 'V'), - (0xA692, 'M', 'ꚓ'), - (0xA693, 'V'), - (0xA694, 'M', 'ꚕ'), - (0xA695, 'V'), - (0xA696, 'M', 'ꚗ'), - (0xA697, 'V'), - (0xA698, 'M', 'ꚙ'), - (0xA699, 'V'), - (0xA69A, 'M', 'ꚛ'), - (0xA69B, 'V'), - (0xA69C, 'M', 'ъ'), - (0xA69D, 'M', 'ь'), - (0xA69E, 'V'), - (0xA6F8, 'X'), - (0xA700, 'V'), - (0xA722, 'M', 'ꜣ'), - (0xA723, 'V'), - (0xA724, 'M', 'ꜥ'), - (0xA725, 'V'), - (0xA726, 'M', 'ꜧ'), - (0xA727, 'V'), - (0xA728, 'M', 'ꜩ'), - (0xA729, 'V'), - (0xA72A, 'M', 'ꜫ'), - (0xA72B, 'V'), - (0xA72C, 'M', 'ꜭ'), - (0xA72D, 'V'), - (0xA72E, 'M', 'ꜯ'), - (0xA72F, 'V'), - (0xA732, 'M', 'ꜳ'), - (0xA733, 'V'), - (0xA734, 'M', 'ꜵ'), - (0xA735, 'V'), - (0xA736, 'M', 'ꜷ'), - (0xA737, 'V'), - (0xA738, 'M', 'ꜹ'), - (0xA739, 'V'), - (0xA73A, 'M', 'ꜻ'), - (0xA73B, 'V'), - (0xA73C, 'M', 'ꜽ'), - (0xA73D, 'V'), - (0xA73E, 'M', 'ꜿ'), - (0xA73F, 'V'), - (0xA740, 'M', 'ꝁ'), - (0xA741, 'V'), - (0xA742, 'M', 'ꝃ'), - (0xA743, 'V'), - (0xA744, 'M', 'ꝅ'), - (0xA745, 'V'), - (0xA746, 'M', 'ꝇ'), - (0xA747, 'V'), - (0xA748, 'M', 'ꝉ'), - (0xA749, 'V'), - (0xA74A, 'M', 'ꝋ'), - (0xA74B, 'V'), - (0xA74C, 'M', 'ꝍ'), - (0xA74D, 'V'), - (0xA74E, 'M', 'ꝏ'), - (0xA74F, 'V'), - (0xA750, 'M', 'ꝑ'), - (0xA751, 'V'), - (0xA752, 'M', 'ꝓ'), - (0xA753, 'V'), - (0xA754, 'M', 'ꝕ'), - (0xA755, 'V'), - (0xA756, 'M', 'ꝗ'), - (0xA757, 'V'), - (0xA758, 'M', 'ꝙ'), - (0xA759, 'V'), - (0xA75A, 'M', 'ꝛ'), - (0xA75B, 'V'), - (0xA75C, 'M', 'ꝝ'), - (0xA75D, 'V'), - (0xA75E, 'M', 'ꝟ'), - (0xA75F, 'V'), - (0xA760, 'M', 'ꝡ'), - (0xA761, 'V'), - (0xA762, 'M', 'ꝣ'), - (0xA763, 'V'), - (0xA764, 'M', 'ꝥ'), - (0xA765, 'V'), - (0xA766, 'M', 'ꝧ'), - (0xA767, 'V'), - (0xA768, 'M', 'ꝩ'), - (0xA769, 'V'), - (0xA76A, 'M', 'ꝫ'), - (0xA76B, 'V'), - (0xA76C, 'M', 'ꝭ'), - (0xA76D, 'V'), - (0xA76E, 'M', 'ꝯ'), - (0xA76F, 'V'), - (0xA770, 'M', 'ꝯ'), - (0xA771, 'V'), - (0xA779, 'M', 'ꝺ'), - (0xA77A, 'V'), - (0xA77B, 'M', 'ꝼ'), - ] - -def _seg_37() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xA77C, 'V'), - (0xA77D, 'M', 'ᵹ'), - (0xA77E, 'M', 'ꝿ'), - (0xA77F, 'V'), - (0xA780, 'M', 'ꞁ'), - (0xA781, 'V'), - (0xA782, 'M', 'ꞃ'), - (0xA783, 'V'), - (0xA784, 'M', 'ꞅ'), - (0xA785, 'V'), - (0xA786, 'M', 'ꞇ'), - (0xA787, 'V'), - (0xA78B, 'M', 'ꞌ'), - (0xA78C, 'V'), - (0xA78D, 'M', 'ɥ'), - (0xA78E, 'V'), - (0xA790, 'M', 'ꞑ'), - (0xA791, 'V'), - (0xA792, 'M', 'ꞓ'), - (0xA793, 'V'), - (0xA796, 'M', 'ꞗ'), - (0xA797, 'V'), - (0xA798, 'M', 'ꞙ'), - (0xA799, 'V'), - (0xA79A, 'M', 'ꞛ'), - (0xA79B, 'V'), - (0xA79C, 'M', 'ꞝ'), - (0xA79D, 'V'), - (0xA79E, 'M', 'ꞟ'), - (0xA79F, 'V'), - (0xA7A0, 'M', 'ꞡ'), - (0xA7A1, 'V'), - (0xA7A2, 'M', 'ꞣ'), - (0xA7A3, 'V'), - (0xA7A4, 'M', 'ꞥ'), - (0xA7A5, 'V'), - (0xA7A6, 'M', 'ꞧ'), - (0xA7A7, 'V'), - (0xA7A8, 'M', 'ꞩ'), - (0xA7A9, 'V'), - (0xA7AA, 'M', 'ɦ'), - (0xA7AB, 'M', 'ɜ'), - (0xA7AC, 'M', 'ɡ'), - (0xA7AD, 'M', 'ɬ'), - (0xA7AE, 'M', 'ɪ'), - (0xA7AF, 'V'), - (0xA7B0, 'M', 'ʞ'), - (0xA7B1, 'M', 'ʇ'), - (0xA7B2, 'M', 'ʝ'), - (0xA7B3, 'M', 'ꭓ'), - (0xA7B4, 'M', 'ꞵ'), - (0xA7B5, 'V'), - (0xA7B6, 'M', 'ꞷ'), - (0xA7B7, 'V'), - (0xA7B8, 'M', 'ꞹ'), - (0xA7B9, 'V'), - (0xA7BA, 'M', 'ꞻ'), - (0xA7BB, 'V'), - (0xA7BC, 'M', 'ꞽ'), - (0xA7BD, 'V'), - (0xA7BE, 'M', 'ꞿ'), - (0xA7BF, 'V'), - (0xA7C0, 'M', 'ꟁ'), - (0xA7C1, 'V'), - (0xA7C2, 'M', 'ꟃ'), - (0xA7C3, 'V'), - (0xA7C4, 'M', 'ꞔ'), - (0xA7C5, 'M', 'ʂ'), - (0xA7C6, 'M', 'ᶎ'), - (0xA7C7, 'M', 'ꟈ'), - (0xA7C8, 'V'), - (0xA7C9, 'M', 'ꟊ'), - (0xA7CA, 'V'), - (0xA7CB, 'X'), - (0xA7D0, 'M', 'ꟑ'), - (0xA7D1, 'V'), - (0xA7D2, 'X'), - (0xA7D3, 'V'), - (0xA7D4, 'X'), - (0xA7D5, 'V'), - (0xA7D6, 'M', 'ꟗ'), - (0xA7D7, 'V'), - (0xA7D8, 'M', 'ꟙ'), - (0xA7D9, 'V'), - (0xA7DA, 'X'), - (0xA7F2, 'M', 'c'), - (0xA7F3, 'M', 'f'), - (0xA7F4, 'M', 'q'), - (0xA7F5, 'M', 'ꟶ'), - (0xA7F6, 'V'), - (0xA7F8, 'M', 'ħ'), - (0xA7F9, 'M', 'œ'), - (0xA7FA, 'V'), - (0xA82D, 'X'), - (0xA830, 'V'), - (0xA83A, 'X'), - (0xA840, 'V'), - (0xA878, 'X'), - (0xA880, 'V'), - (0xA8C6, 'X'), - ] - -def _seg_38() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xA8CE, 'V'), - (0xA8DA, 'X'), - (0xA8E0, 'V'), - (0xA954, 'X'), - (0xA95F, 'V'), - (0xA97D, 'X'), - (0xA980, 'V'), - (0xA9CE, 'X'), - (0xA9CF, 'V'), - (0xA9DA, 'X'), - (0xA9DE, 'V'), - (0xA9FF, 'X'), - (0xAA00, 'V'), - (0xAA37, 'X'), - (0xAA40, 'V'), - (0xAA4E, 'X'), - (0xAA50, 'V'), - (0xAA5A, 'X'), - (0xAA5C, 'V'), - (0xAAC3, 'X'), - (0xAADB, 'V'), - (0xAAF7, 'X'), - (0xAB01, 'V'), - (0xAB07, 'X'), - (0xAB09, 'V'), - (0xAB0F, 'X'), - (0xAB11, 'V'), - (0xAB17, 'X'), - (0xAB20, 'V'), - (0xAB27, 'X'), - (0xAB28, 'V'), - (0xAB2F, 'X'), - (0xAB30, 'V'), - (0xAB5C, 'M', 'ꜧ'), - (0xAB5D, 'M', 'ꬷ'), - (0xAB5E, 'M', 'ɫ'), - (0xAB5F, 'M', 'ꭒ'), - (0xAB60, 'V'), - (0xAB69, 'M', 'ʍ'), - (0xAB6A, 'V'), - (0xAB6C, 'X'), - (0xAB70, 'M', 'Ꭰ'), - (0xAB71, 'M', 'Ꭱ'), - (0xAB72, 'M', 'Ꭲ'), - (0xAB73, 'M', 'Ꭳ'), - (0xAB74, 'M', 'Ꭴ'), - (0xAB75, 'M', 'Ꭵ'), - (0xAB76, 'M', 'Ꭶ'), - (0xAB77, 'M', 'Ꭷ'), - (0xAB78, 'M', 'Ꭸ'), - (0xAB79, 'M', 'Ꭹ'), - (0xAB7A, 'M', 'Ꭺ'), - (0xAB7B, 'M', 'Ꭻ'), - (0xAB7C, 'M', 'Ꭼ'), - (0xAB7D, 'M', 'Ꭽ'), - (0xAB7E, 'M', 'Ꭾ'), - (0xAB7F, 'M', 'Ꭿ'), - (0xAB80, 'M', 'Ꮀ'), - (0xAB81, 'M', 'Ꮁ'), - (0xAB82, 'M', 'Ꮂ'), - (0xAB83, 'M', 'Ꮃ'), - (0xAB84, 'M', 'Ꮄ'), - (0xAB85, 'M', 'Ꮅ'), - (0xAB86, 'M', 'Ꮆ'), - (0xAB87, 'M', 'Ꮇ'), - (0xAB88, 'M', 'Ꮈ'), - (0xAB89, 'M', 'Ꮉ'), - (0xAB8A, 'M', 'Ꮊ'), - (0xAB8B, 'M', 'Ꮋ'), - (0xAB8C, 'M', 'Ꮌ'), - (0xAB8D, 'M', 'Ꮍ'), - (0xAB8E, 'M', 'Ꮎ'), - (0xAB8F, 'M', 'Ꮏ'), - (0xAB90, 'M', 'Ꮐ'), - (0xAB91, 'M', 'Ꮑ'), - (0xAB92, 'M', 'Ꮒ'), - (0xAB93, 'M', 'Ꮓ'), - (0xAB94, 'M', 'Ꮔ'), - (0xAB95, 'M', 'Ꮕ'), - (0xAB96, 'M', 'Ꮖ'), - (0xAB97, 'M', 'Ꮗ'), - (0xAB98, 'M', 'Ꮘ'), - (0xAB99, 'M', 'Ꮙ'), - (0xAB9A, 'M', 'Ꮚ'), - (0xAB9B, 'M', 'Ꮛ'), - (0xAB9C, 'M', 'Ꮜ'), - (0xAB9D, 'M', 'Ꮝ'), - (0xAB9E, 'M', 'Ꮞ'), - (0xAB9F, 'M', 'Ꮟ'), - (0xABA0, 'M', 'Ꮠ'), - (0xABA1, 'M', 'Ꮡ'), - (0xABA2, 'M', 'Ꮢ'), - (0xABA3, 'M', 'Ꮣ'), - (0xABA4, 'M', 'Ꮤ'), - (0xABA5, 'M', 'Ꮥ'), - (0xABA6, 'M', 'Ꮦ'), - (0xABA7, 'M', 'Ꮧ'), - (0xABA8, 'M', 'Ꮨ'), - (0xABA9, 'M', 'Ꮩ'), - (0xABAA, 'M', 'Ꮪ'), - ] - -def _seg_39() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xABAB, 'M', 'Ꮫ'), - (0xABAC, 'M', 'Ꮬ'), - (0xABAD, 'M', 'Ꮭ'), - (0xABAE, 'M', 'Ꮮ'), - (0xABAF, 'M', 'Ꮯ'), - (0xABB0, 'M', 'Ꮰ'), - (0xABB1, 'M', 'Ꮱ'), - (0xABB2, 'M', 'Ꮲ'), - (0xABB3, 'M', 'Ꮳ'), - (0xABB4, 'M', 'Ꮴ'), - (0xABB5, 'M', 'Ꮵ'), - (0xABB6, 'M', 'Ꮶ'), - (0xABB7, 'M', 'Ꮷ'), - (0xABB8, 'M', 'Ꮸ'), - (0xABB9, 'M', 'Ꮹ'), - (0xABBA, 'M', 'Ꮺ'), - (0xABBB, 'M', 'Ꮻ'), - (0xABBC, 'M', 'Ꮼ'), - (0xABBD, 'M', 'Ꮽ'), - (0xABBE, 'M', 'Ꮾ'), - (0xABBF, 'M', 'Ꮿ'), - (0xABC0, 'V'), - (0xABEE, 'X'), - (0xABF0, 'V'), - (0xABFA, 'X'), - (0xAC00, 'V'), - (0xD7A4, 'X'), - (0xD7B0, 'V'), - (0xD7C7, 'X'), - (0xD7CB, 'V'), - (0xD7FC, 'X'), - (0xF900, 'M', '豈'), - (0xF901, 'M', '更'), - (0xF902, 'M', '車'), - (0xF903, 'M', '賈'), - (0xF904, 'M', '滑'), - (0xF905, 'M', '串'), - (0xF906, 'M', '句'), - (0xF907, 'M', '龜'), - (0xF909, 'M', '契'), - (0xF90A, 'M', '金'), - (0xF90B, 'M', '喇'), - (0xF90C, 'M', '奈'), - (0xF90D, 'M', '懶'), - (0xF90E, 'M', '癩'), - (0xF90F, 'M', '羅'), - (0xF910, 'M', '蘿'), - (0xF911, 'M', '螺'), - (0xF912, 'M', '裸'), - (0xF913, 'M', '邏'), - (0xF914, 'M', '樂'), - (0xF915, 'M', '洛'), - (0xF916, 'M', '烙'), - (0xF917, 'M', '珞'), - (0xF918, 'M', '落'), - (0xF919, 'M', '酪'), - (0xF91A, 'M', '駱'), - (0xF91B, 'M', '亂'), - (0xF91C, 'M', '卵'), - (0xF91D, 'M', '欄'), - (0xF91E, 'M', '爛'), - (0xF91F, 'M', '蘭'), - (0xF920, 'M', '鸞'), - (0xF921, 'M', '嵐'), - (0xF922, 'M', '濫'), - (0xF923, 'M', '藍'), - (0xF924, 'M', '襤'), - (0xF925, 'M', '拉'), - (0xF926, 'M', '臘'), - (0xF927, 'M', '蠟'), - (0xF928, 'M', '廊'), - (0xF929, 'M', '朗'), - (0xF92A, 'M', '浪'), - (0xF92B, 'M', '狼'), - (0xF92C, 'M', '郎'), - (0xF92D, 'M', '來'), - (0xF92E, 'M', '冷'), - (0xF92F, 'M', '勞'), - (0xF930, 'M', '擄'), - (0xF931, 'M', '櫓'), - (0xF932, 'M', '爐'), - (0xF933, 'M', '盧'), - (0xF934, 'M', '老'), - (0xF935, 'M', '蘆'), - (0xF936, 'M', '虜'), - (0xF937, 'M', '路'), - (0xF938, 'M', '露'), - (0xF939, 'M', '魯'), - (0xF93A, 'M', '鷺'), - (0xF93B, 'M', '碌'), - (0xF93C, 'M', '祿'), - (0xF93D, 'M', '綠'), - (0xF93E, 'M', '菉'), - (0xF93F, 'M', '錄'), - (0xF940, 'M', '鹿'), - (0xF941, 'M', '論'), - (0xF942, 'M', '壟'), - (0xF943, 'M', '弄'), - (0xF944, 'M', '籠'), - (0xF945, 'M', '聾'), - ] - -def _seg_40() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xF946, 'M', '牢'), - (0xF947, 'M', '磊'), - (0xF948, 'M', '賂'), - (0xF949, 'M', '雷'), - (0xF94A, 'M', '壘'), - (0xF94B, 'M', '屢'), - (0xF94C, 'M', '樓'), - (0xF94D, 'M', '淚'), - (0xF94E, 'M', '漏'), - (0xF94F, 'M', '累'), - (0xF950, 'M', '縷'), - (0xF951, 'M', '陋'), - (0xF952, 'M', '勒'), - (0xF953, 'M', '肋'), - (0xF954, 'M', '凜'), - (0xF955, 'M', '凌'), - (0xF956, 'M', '稜'), - (0xF957, 'M', '綾'), - (0xF958, 'M', '菱'), - (0xF959, 'M', '陵'), - (0xF95A, 'M', '讀'), - (0xF95B, 'M', '拏'), - (0xF95C, 'M', '樂'), - (0xF95D, 'M', '諾'), - (0xF95E, 'M', '丹'), - (0xF95F, 'M', '寧'), - (0xF960, 'M', '怒'), - (0xF961, 'M', '率'), - (0xF962, 'M', '異'), - (0xF963, 'M', '北'), - (0xF964, 'M', '磻'), - (0xF965, 'M', '便'), - (0xF966, 'M', '復'), - (0xF967, 'M', '不'), - (0xF968, 'M', '泌'), - (0xF969, 'M', '數'), - (0xF96A, 'M', '索'), - (0xF96B, 'M', '參'), - (0xF96C, 'M', '塞'), - (0xF96D, 'M', '省'), - (0xF96E, 'M', '葉'), - (0xF96F, 'M', '說'), - (0xF970, 'M', '殺'), - (0xF971, 'M', '辰'), - (0xF972, 'M', '沈'), - (0xF973, 'M', '拾'), - (0xF974, 'M', '若'), - (0xF975, 'M', '掠'), - (0xF976, 'M', '略'), - (0xF977, 'M', '亮'), - (0xF978, 'M', '兩'), - (0xF979, 'M', '凉'), - (0xF97A, 'M', '梁'), - (0xF97B, 'M', '糧'), - (0xF97C, 'M', '良'), - (0xF97D, 'M', '諒'), - (0xF97E, 'M', '量'), - (0xF97F, 'M', '勵'), - (0xF980, 'M', '呂'), - (0xF981, 'M', '女'), - (0xF982, 'M', '廬'), - (0xF983, 'M', '旅'), - (0xF984, 'M', '濾'), - (0xF985, 'M', '礪'), - (0xF986, 'M', '閭'), - (0xF987, 'M', '驪'), - (0xF988, 'M', '麗'), - (0xF989, 'M', '黎'), - (0xF98A, 'M', '力'), - (0xF98B, 'M', '曆'), - (0xF98C, 'M', '歷'), - (0xF98D, 'M', '轢'), - (0xF98E, 'M', '年'), - (0xF98F, 'M', '憐'), - (0xF990, 'M', '戀'), - (0xF991, 'M', '撚'), - (0xF992, 'M', '漣'), - (0xF993, 'M', '煉'), - (0xF994, 'M', '璉'), - (0xF995, 'M', '秊'), - (0xF996, 'M', '練'), - (0xF997, 'M', '聯'), - (0xF998, 'M', '輦'), - (0xF999, 'M', '蓮'), - (0xF99A, 'M', '連'), - (0xF99B, 'M', '鍊'), - (0xF99C, 'M', '列'), - (0xF99D, 'M', '劣'), - (0xF99E, 'M', '咽'), - (0xF99F, 'M', '烈'), - (0xF9A0, 'M', '裂'), - (0xF9A1, 'M', '說'), - (0xF9A2, 'M', '廉'), - (0xF9A3, 'M', '念'), - (0xF9A4, 'M', '捻'), - (0xF9A5, 'M', '殮'), - (0xF9A6, 'M', '簾'), - (0xF9A7, 'M', '獵'), - (0xF9A8, 'M', '令'), - (0xF9A9, 'M', '囹'), - ] - -def _seg_41() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xF9AA, 'M', '寧'), - (0xF9AB, 'M', '嶺'), - (0xF9AC, 'M', '怜'), - (0xF9AD, 'M', '玲'), - (0xF9AE, 'M', '瑩'), - (0xF9AF, 'M', '羚'), - (0xF9B0, 'M', '聆'), - (0xF9B1, 'M', '鈴'), - (0xF9B2, 'M', '零'), - (0xF9B3, 'M', '靈'), - (0xF9B4, 'M', '領'), - (0xF9B5, 'M', '例'), - (0xF9B6, 'M', '禮'), - (0xF9B7, 'M', '醴'), - (0xF9B8, 'M', '隸'), - (0xF9B9, 'M', '惡'), - (0xF9BA, 'M', '了'), - (0xF9BB, 'M', '僚'), - (0xF9BC, 'M', '寮'), - (0xF9BD, 'M', '尿'), - (0xF9BE, 'M', '料'), - (0xF9BF, 'M', '樂'), - (0xF9C0, 'M', '燎'), - (0xF9C1, 'M', '療'), - (0xF9C2, 'M', '蓼'), - (0xF9C3, 'M', '遼'), - (0xF9C4, 'M', '龍'), - (0xF9C5, 'M', '暈'), - (0xF9C6, 'M', '阮'), - (0xF9C7, 'M', '劉'), - (0xF9C8, 'M', '杻'), - (0xF9C9, 'M', '柳'), - (0xF9CA, 'M', '流'), - (0xF9CB, 'M', '溜'), - (0xF9CC, 'M', '琉'), - (0xF9CD, 'M', '留'), - (0xF9CE, 'M', '硫'), - (0xF9CF, 'M', '紐'), - (0xF9D0, 'M', '類'), - (0xF9D1, 'M', '六'), - (0xF9D2, 'M', '戮'), - (0xF9D3, 'M', '陸'), - (0xF9D4, 'M', '倫'), - (0xF9D5, 'M', '崙'), - (0xF9D6, 'M', '淪'), - (0xF9D7, 'M', '輪'), - (0xF9D8, 'M', '律'), - (0xF9D9, 'M', '慄'), - (0xF9DA, 'M', '栗'), - (0xF9DB, 'M', '率'), - (0xF9DC, 'M', '隆'), - (0xF9DD, 'M', '利'), - (0xF9DE, 'M', '吏'), - (0xF9DF, 'M', '履'), - (0xF9E0, 'M', '易'), - (0xF9E1, 'M', '李'), - (0xF9E2, 'M', '梨'), - (0xF9E3, 'M', '泥'), - (0xF9E4, 'M', '理'), - (0xF9E5, 'M', '痢'), - (0xF9E6, 'M', '罹'), - (0xF9E7, 'M', '裏'), - (0xF9E8, 'M', '裡'), - (0xF9E9, 'M', '里'), - (0xF9EA, 'M', '離'), - (0xF9EB, 'M', '匿'), - (0xF9EC, 'M', '溺'), - (0xF9ED, 'M', '吝'), - (0xF9EE, 'M', '燐'), - (0xF9EF, 'M', '璘'), - (0xF9F0, 'M', '藺'), - (0xF9F1, 'M', '隣'), - (0xF9F2, 'M', '鱗'), - (0xF9F3, 'M', '麟'), - (0xF9F4, 'M', '林'), - (0xF9F5, 'M', '淋'), - (0xF9F6, 'M', '臨'), - (0xF9F7, 'M', '立'), - (0xF9F8, 'M', '笠'), - (0xF9F9, 'M', '粒'), - (0xF9FA, 'M', '狀'), - (0xF9FB, 'M', '炙'), - (0xF9FC, 'M', '識'), - (0xF9FD, 'M', '什'), - (0xF9FE, 'M', '茶'), - (0xF9FF, 'M', '刺'), - (0xFA00, 'M', '切'), - (0xFA01, 'M', '度'), - (0xFA02, 'M', '拓'), - (0xFA03, 'M', '糖'), - (0xFA04, 'M', '宅'), - (0xFA05, 'M', '洞'), - (0xFA06, 'M', '暴'), - (0xFA07, 'M', '輻'), - (0xFA08, 'M', '行'), - (0xFA09, 'M', '降'), - (0xFA0A, 'M', '見'), - (0xFA0B, 'M', '廓'), - (0xFA0C, 'M', '兀'), - (0xFA0D, 'M', '嗀'), - ] - -def _seg_42() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFA0E, 'V'), - (0xFA10, 'M', '塚'), - (0xFA11, 'V'), - (0xFA12, 'M', '晴'), - (0xFA13, 'V'), - (0xFA15, 'M', '凞'), - (0xFA16, 'M', '猪'), - (0xFA17, 'M', '益'), - (0xFA18, 'M', '礼'), - (0xFA19, 'M', '神'), - (0xFA1A, 'M', '祥'), - (0xFA1B, 'M', '福'), - (0xFA1C, 'M', '靖'), - (0xFA1D, 'M', '精'), - (0xFA1E, 'M', '羽'), - (0xFA1F, 'V'), - (0xFA20, 'M', '蘒'), - (0xFA21, 'V'), - (0xFA22, 'M', '諸'), - (0xFA23, 'V'), - (0xFA25, 'M', '逸'), - (0xFA26, 'M', '都'), - (0xFA27, 'V'), - (0xFA2A, 'M', '飯'), - (0xFA2B, 'M', '飼'), - (0xFA2C, 'M', '館'), - (0xFA2D, 'M', '鶴'), - (0xFA2E, 'M', '郞'), - (0xFA2F, 'M', '隷'), - (0xFA30, 'M', '侮'), - (0xFA31, 'M', '僧'), - (0xFA32, 'M', '免'), - (0xFA33, 'M', '勉'), - (0xFA34, 'M', '勤'), - (0xFA35, 'M', '卑'), - (0xFA36, 'M', '喝'), - (0xFA37, 'M', '嘆'), - (0xFA38, 'M', '器'), - (0xFA39, 'M', '塀'), - (0xFA3A, 'M', '墨'), - (0xFA3B, 'M', '層'), - (0xFA3C, 'M', '屮'), - (0xFA3D, 'M', '悔'), - (0xFA3E, 'M', '慨'), - (0xFA3F, 'M', '憎'), - (0xFA40, 'M', '懲'), - (0xFA41, 'M', '敏'), - (0xFA42, 'M', '既'), - (0xFA43, 'M', '暑'), - (0xFA44, 'M', '梅'), - (0xFA45, 'M', '海'), - (0xFA46, 'M', '渚'), - (0xFA47, 'M', '漢'), - (0xFA48, 'M', '煮'), - (0xFA49, 'M', '爫'), - (0xFA4A, 'M', '琢'), - (0xFA4B, 'M', '碑'), - (0xFA4C, 'M', '社'), - (0xFA4D, 'M', '祉'), - (0xFA4E, 'M', '祈'), - (0xFA4F, 'M', '祐'), - (0xFA50, 'M', '祖'), - (0xFA51, 'M', '祝'), - (0xFA52, 'M', '禍'), - (0xFA53, 'M', '禎'), - (0xFA54, 'M', '穀'), - (0xFA55, 'M', '突'), - (0xFA56, 'M', '節'), - (0xFA57, 'M', '練'), - (0xFA58, 'M', '縉'), - (0xFA59, 'M', '繁'), - (0xFA5A, 'M', '署'), - (0xFA5B, 'M', '者'), - (0xFA5C, 'M', '臭'), - (0xFA5D, 'M', '艹'), - (0xFA5F, 'M', '著'), - (0xFA60, 'M', '褐'), - (0xFA61, 'M', '視'), - (0xFA62, 'M', '謁'), - (0xFA63, 'M', '謹'), - (0xFA64, 'M', '賓'), - (0xFA65, 'M', '贈'), - (0xFA66, 'M', '辶'), - (0xFA67, 'M', '逸'), - (0xFA68, 'M', '難'), - (0xFA69, 'M', '響'), - (0xFA6A, 'M', '頻'), - (0xFA6B, 'M', '恵'), - (0xFA6C, 'M', '𤋮'), - (0xFA6D, 'M', '舘'), - (0xFA6E, 'X'), - (0xFA70, 'M', '並'), - (0xFA71, 'M', '况'), - (0xFA72, 'M', '全'), - (0xFA73, 'M', '侀'), - (0xFA74, 'M', '充'), - (0xFA75, 'M', '冀'), - (0xFA76, 'M', '勇'), - (0xFA77, 'M', '勺'), - (0xFA78, 'M', '喝'), - ] - -def _seg_43() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFA79, 'M', '啕'), - (0xFA7A, 'M', '喙'), - (0xFA7B, 'M', '嗢'), - (0xFA7C, 'M', '塚'), - (0xFA7D, 'M', '墳'), - (0xFA7E, 'M', '奄'), - (0xFA7F, 'M', '奔'), - (0xFA80, 'M', '婢'), - (0xFA81, 'M', '嬨'), - (0xFA82, 'M', '廒'), - (0xFA83, 'M', '廙'), - (0xFA84, 'M', '彩'), - (0xFA85, 'M', '徭'), - (0xFA86, 'M', '惘'), - (0xFA87, 'M', '慎'), - (0xFA88, 'M', '愈'), - (0xFA89, 'M', '憎'), - (0xFA8A, 'M', '慠'), - (0xFA8B, 'M', '懲'), - (0xFA8C, 'M', '戴'), - (0xFA8D, 'M', '揄'), - (0xFA8E, 'M', '搜'), - (0xFA8F, 'M', '摒'), - (0xFA90, 'M', '敖'), - (0xFA91, 'M', '晴'), - (0xFA92, 'M', '朗'), - (0xFA93, 'M', '望'), - (0xFA94, 'M', '杖'), - (0xFA95, 'M', '歹'), - (0xFA96, 'M', '殺'), - (0xFA97, 'M', '流'), - (0xFA98, 'M', '滛'), - (0xFA99, 'M', '滋'), - (0xFA9A, 'M', '漢'), - (0xFA9B, 'M', '瀞'), - (0xFA9C, 'M', '煮'), - (0xFA9D, 'M', '瞧'), - (0xFA9E, 'M', '爵'), - (0xFA9F, 'M', '犯'), - (0xFAA0, 'M', '猪'), - (0xFAA1, 'M', '瑱'), - (0xFAA2, 'M', '甆'), - (0xFAA3, 'M', '画'), - (0xFAA4, 'M', '瘝'), - (0xFAA5, 'M', '瘟'), - (0xFAA6, 'M', '益'), - (0xFAA7, 'M', '盛'), - (0xFAA8, 'M', '直'), - (0xFAA9, 'M', '睊'), - (0xFAAA, 'M', '着'), - (0xFAAB, 'M', '磌'), - (0xFAAC, 'M', '窱'), - (0xFAAD, 'M', '節'), - (0xFAAE, 'M', '类'), - (0xFAAF, 'M', '絛'), - (0xFAB0, 'M', '練'), - (0xFAB1, 'M', '缾'), - (0xFAB2, 'M', '者'), - (0xFAB3, 'M', '荒'), - (0xFAB4, 'M', '華'), - (0xFAB5, 'M', '蝹'), - (0xFAB6, 'M', '襁'), - (0xFAB7, 'M', '覆'), - (0xFAB8, 'M', '視'), - (0xFAB9, 'M', '調'), - (0xFABA, 'M', '諸'), - (0xFABB, 'M', '請'), - (0xFABC, 'M', '謁'), - (0xFABD, 'M', '諾'), - (0xFABE, 'M', '諭'), - (0xFABF, 'M', '謹'), - (0xFAC0, 'M', '變'), - (0xFAC1, 'M', '贈'), - (0xFAC2, 'M', '輸'), - (0xFAC3, 'M', '遲'), - (0xFAC4, 'M', '醙'), - (0xFAC5, 'M', '鉶'), - (0xFAC6, 'M', '陼'), - (0xFAC7, 'M', '難'), - (0xFAC8, 'M', '靖'), - (0xFAC9, 'M', '韛'), - (0xFACA, 'M', '響'), - (0xFACB, 'M', '頋'), - (0xFACC, 'M', '頻'), - (0xFACD, 'M', '鬒'), - (0xFACE, 'M', '龜'), - (0xFACF, 'M', '𢡊'), - (0xFAD0, 'M', '𢡄'), - (0xFAD1, 'M', '𣏕'), - (0xFAD2, 'M', '㮝'), - (0xFAD3, 'M', '䀘'), - (0xFAD4, 'M', '䀹'), - (0xFAD5, 'M', '𥉉'), - (0xFAD6, 'M', '𥳐'), - (0xFAD7, 'M', '𧻓'), - (0xFAD8, 'M', '齃'), - (0xFAD9, 'M', '龎'), - (0xFADA, 'X'), - (0xFB00, 'M', 'ff'), - (0xFB01, 'M', 'fi'), - ] - -def _seg_44() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFB02, 'M', 'fl'), - (0xFB03, 'M', 'ffi'), - (0xFB04, 'M', 'ffl'), - (0xFB05, 'M', 'st'), - (0xFB07, 'X'), - (0xFB13, 'M', 'մն'), - (0xFB14, 'M', 'մե'), - (0xFB15, 'M', 'մի'), - (0xFB16, 'M', 'վն'), - (0xFB17, 'M', 'մխ'), - (0xFB18, 'X'), - (0xFB1D, 'M', 'יִ'), - (0xFB1E, 'V'), - (0xFB1F, 'M', 'ײַ'), - (0xFB20, 'M', 'ע'), - (0xFB21, 'M', 'א'), - (0xFB22, 'M', 'ד'), - (0xFB23, 'M', 'ה'), - (0xFB24, 'M', 'כ'), - (0xFB25, 'M', 'ל'), - (0xFB26, 'M', 'ם'), - (0xFB27, 'M', 'ר'), - (0xFB28, 'M', 'ת'), - (0xFB29, '3', '+'), - (0xFB2A, 'M', 'שׁ'), - (0xFB2B, 'M', 'שׂ'), - (0xFB2C, 'M', 'שּׁ'), - (0xFB2D, 'M', 'שּׂ'), - (0xFB2E, 'M', 'אַ'), - (0xFB2F, 'M', 'אָ'), - (0xFB30, 'M', 'אּ'), - (0xFB31, 'M', 'בּ'), - (0xFB32, 'M', 'גּ'), - (0xFB33, 'M', 'דּ'), - (0xFB34, 'M', 'הּ'), - (0xFB35, 'M', 'וּ'), - (0xFB36, 'M', 'זּ'), - (0xFB37, 'X'), - (0xFB38, 'M', 'טּ'), - (0xFB39, 'M', 'יּ'), - (0xFB3A, 'M', 'ךּ'), - (0xFB3B, 'M', 'כּ'), - (0xFB3C, 'M', 'לּ'), - (0xFB3D, 'X'), - (0xFB3E, 'M', 'מּ'), - (0xFB3F, 'X'), - (0xFB40, 'M', 'נּ'), - (0xFB41, 'M', 'סּ'), - (0xFB42, 'X'), - (0xFB43, 'M', 'ףּ'), - (0xFB44, 'M', 'פּ'), - (0xFB45, 'X'), - (0xFB46, 'M', 'צּ'), - (0xFB47, 'M', 'קּ'), - (0xFB48, 'M', 'רּ'), - (0xFB49, 'M', 'שּ'), - (0xFB4A, 'M', 'תּ'), - (0xFB4B, 'M', 'וֹ'), - (0xFB4C, 'M', 'בֿ'), - (0xFB4D, 'M', 'כֿ'), - (0xFB4E, 'M', 'פֿ'), - (0xFB4F, 'M', 'אל'), - (0xFB50, 'M', 'ٱ'), - (0xFB52, 'M', 'ٻ'), - (0xFB56, 'M', 'پ'), - (0xFB5A, 'M', 'ڀ'), - (0xFB5E, 'M', 'ٺ'), - (0xFB62, 'M', 'ٿ'), - (0xFB66, 'M', 'ٹ'), - (0xFB6A, 'M', 'ڤ'), - (0xFB6E, 'M', 'ڦ'), - (0xFB72, 'M', 'ڄ'), - (0xFB76, 'M', 'ڃ'), - (0xFB7A, 'M', 'چ'), - (0xFB7E, 'M', 'ڇ'), - (0xFB82, 'M', 'ڍ'), - (0xFB84, 'M', 'ڌ'), - (0xFB86, 'M', 'ڎ'), - (0xFB88, 'M', 'ڈ'), - (0xFB8A, 'M', 'ژ'), - (0xFB8C, 'M', 'ڑ'), - (0xFB8E, 'M', 'ک'), - (0xFB92, 'M', 'گ'), - (0xFB96, 'M', 'ڳ'), - (0xFB9A, 'M', 'ڱ'), - (0xFB9E, 'M', 'ں'), - (0xFBA0, 'M', 'ڻ'), - (0xFBA4, 'M', 'ۀ'), - (0xFBA6, 'M', 'ہ'), - (0xFBAA, 'M', 'ھ'), - (0xFBAE, 'M', 'ے'), - (0xFBB0, 'M', 'ۓ'), - (0xFBB2, 'V'), - (0xFBC3, 'X'), - (0xFBD3, 'M', 'ڭ'), - (0xFBD7, 'M', 'ۇ'), - (0xFBD9, 'M', 'ۆ'), - (0xFBDB, 'M', 'ۈ'), - (0xFBDD, 'M', 'ۇٴ'), - (0xFBDE, 'M', 'ۋ'), - ] - -def _seg_45() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFBE0, 'M', 'ۅ'), - (0xFBE2, 'M', 'ۉ'), - (0xFBE4, 'M', 'ې'), - (0xFBE8, 'M', 'ى'), - (0xFBEA, 'M', 'ئا'), - (0xFBEC, 'M', 'ئە'), - (0xFBEE, 'M', 'ئو'), - (0xFBF0, 'M', 'ئۇ'), - (0xFBF2, 'M', 'ئۆ'), - (0xFBF4, 'M', 'ئۈ'), - (0xFBF6, 'M', 'ئې'), - (0xFBF9, 'M', 'ئى'), - (0xFBFC, 'M', 'ی'), - (0xFC00, 'M', 'ئج'), - (0xFC01, 'M', 'ئح'), - (0xFC02, 'M', 'ئم'), - (0xFC03, 'M', 'ئى'), - (0xFC04, 'M', 'ئي'), - (0xFC05, 'M', 'بج'), - (0xFC06, 'M', 'بح'), - (0xFC07, 'M', 'بخ'), - (0xFC08, 'M', 'بم'), - (0xFC09, 'M', 'بى'), - (0xFC0A, 'M', 'بي'), - (0xFC0B, 'M', 'تج'), - (0xFC0C, 'M', 'تح'), - (0xFC0D, 'M', 'تخ'), - (0xFC0E, 'M', 'تم'), - (0xFC0F, 'M', 'تى'), - (0xFC10, 'M', 'تي'), - (0xFC11, 'M', 'ثج'), - (0xFC12, 'M', 'ثم'), - (0xFC13, 'M', 'ثى'), - (0xFC14, 'M', 'ثي'), - (0xFC15, 'M', 'جح'), - (0xFC16, 'M', 'جم'), - (0xFC17, 'M', 'حج'), - (0xFC18, 'M', 'حم'), - (0xFC19, 'M', 'خج'), - (0xFC1A, 'M', 'خح'), - (0xFC1B, 'M', 'خم'), - (0xFC1C, 'M', 'سج'), - (0xFC1D, 'M', 'سح'), - (0xFC1E, 'M', 'سخ'), - (0xFC1F, 'M', 'سم'), - (0xFC20, 'M', 'صح'), - (0xFC21, 'M', 'صم'), - (0xFC22, 'M', 'ضج'), - (0xFC23, 'M', 'ضح'), - (0xFC24, 'M', 'ضخ'), - (0xFC25, 'M', 'ضم'), - (0xFC26, 'M', 'طح'), - (0xFC27, 'M', 'طم'), - (0xFC28, 'M', 'ظم'), - (0xFC29, 'M', 'عج'), - (0xFC2A, 'M', 'عم'), - (0xFC2B, 'M', 'غج'), - (0xFC2C, 'M', 'غم'), - (0xFC2D, 'M', 'فج'), - (0xFC2E, 'M', 'فح'), - (0xFC2F, 'M', 'فخ'), - (0xFC30, 'M', 'فم'), - (0xFC31, 'M', 'فى'), - (0xFC32, 'M', 'في'), - (0xFC33, 'M', 'قح'), - (0xFC34, 'M', 'قم'), - (0xFC35, 'M', 'قى'), - (0xFC36, 'M', 'قي'), - (0xFC37, 'M', 'كا'), - (0xFC38, 'M', 'كج'), - (0xFC39, 'M', 'كح'), - (0xFC3A, 'M', 'كخ'), - (0xFC3B, 'M', 'كل'), - (0xFC3C, 'M', 'كم'), - (0xFC3D, 'M', 'كى'), - (0xFC3E, 'M', 'كي'), - (0xFC3F, 'M', 'لج'), - (0xFC40, 'M', 'لح'), - (0xFC41, 'M', 'لخ'), - (0xFC42, 'M', 'لم'), - (0xFC43, 'M', 'لى'), - (0xFC44, 'M', 'لي'), - (0xFC45, 'M', 'مج'), - (0xFC46, 'M', 'مح'), - (0xFC47, 'M', 'مخ'), - (0xFC48, 'M', 'مم'), - (0xFC49, 'M', 'مى'), - (0xFC4A, 'M', 'مي'), - (0xFC4B, 'M', 'نج'), - (0xFC4C, 'M', 'نح'), - (0xFC4D, 'M', 'نخ'), - (0xFC4E, 'M', 'نم'), - (0xFC4F, 'M', 'نى'), - (0xFC50, 'M', 'ني'), - (0xFC51, 'M', 'هج'), - (0xFC52, 'M', 'هم'), - (0xFC53, 'M', 'هى'), - (0xFC54, 'M', 'هي'), - (0xFC55, 'M', 'يج'), - (0xFC56, 'M', 'يح'), - ] - -def _seg_46() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFC57, 'M', 'يخ'), - (0xFC58, 'M', 'يم'), - (0xFC59, 'M', 'يى'), - (0xFC5A, 'M', 'يي'), - (0xFC5B, 'M', 'ذٰ'), - (0xFC5C, 'M', 'رٰ'), - (0xFC5D, 'M', 'ىٰ'), - (0xFC5E, '3', ' ٌّ'), - (0xFC5F, '3', ' ٍّ'), - (0xFC60, '3', ' َّ'), - (0xFC61, '3', ' ُّ'), - (0xFC62, '3', ' ِّ'), - (0xFC63, '3', ' ّٰ'), - (0xFC64, 'M', 'ئر'), - (0xFC65, 'M', 'ئز'), - (0xFC66, 'M', 'ئم'), - (0xFC67, 'M', 'ئن'), - (0xFC68, 'M', 'ئى'), - (0xFC69, 'M', 'ئي'), - (0xFC6A, 'M', 'بر'), - (0xFC6B, 'M', 'بز'), - (0xFC6C, 'M', 'بم'), - (0xFC6D, 'M', 'بن'), - (0xFC6E, 'M', 'بى'), - (0xFC6F, 'M', 'بي'), - (0xFC70, 'M', 'تر'), - (0xFC71, 'M', 'تز'), - (0xFC72, 'M', 'تم'), - (0xFC73, 'M', 'تن'), - (0xFC74, 'M', 'تى'), - (0xFC75, 'M', 'تي'), - (0xFC76, 'M', 'ثر'), - (0xFC77, 'M', 'ثز'), - (0xFC78, 'M', 'ثم'), - (0xFC79, 'M', 'ثن'), - (0xFC7A, 'M', 'ثى'), - (0xFC7B, 'M', 'ثي'), - (0xFC7C, 'M', 'فى'), - (0xFC7D, 'M', 'في'), - (0xFC7E, 'M', 'قى'), - (0xFC7F, 'M', 'قي'), - (0xFC80, 'M', 'كا'), - (0xFC81, 'M', 'كل'), - (0xFC82, 'M', 'كم'), - (0xFC83, 'M', 'كى'), - (0xFC84, 'M', 'كي'), - (0xFC85, 'M', 'لم'), - (0xFC86, 'M', 'لى'), - (0xFC87, 'M', 'لي'), - (0xFC88, 'M', 'ما'), - (0xFC89, 'M', 'مم'), - (0xFC8A, 'M', 'نر'), - (0xFC8B, 'M', 'نز'), - (0xFC8C, 'M', 'نم'), - (0xFC8D, 'M', 'نن'), - (0xFC8E, 'M', 'نى'), - (0xFC8F, 'M', 'ني'), - (0xFC90, 'M', 'ىٰ'), - (0xFC91, 'M', 'ير'), - (0xFC92, 'M', 'يز'), - (0xFC93, 'M', 'يم'), - (0xFC94, 'M', 'ين'), - (0xFC95, 'M', 'يى'), - (0xFC96, 'M', 'يي'), - (0xFC97, 'M', 'ئج'), - (0xFC98, 'M', 'ئح'), - (0xFC99, 'M', 'ئخ'), - (0xFC9A, 'M', 'ئم'), - (0xFC9B, 'M', 'ئه'), - (0xFC9C, 'M', 'بج'), - (0xFC9D, 'M', 'بح'), - (0xFC9E, 'M', 'بخ'), - (0xFC9F, 'M', 'بم'), - (0xFCA0, 'M', 'به'), - (0xFCA1, 'M', 'تج'), - (0xFCA2, 'M', 'تح'), - (0xFCA3, 'M', 'تخ'), - (0xFCA4, 'M', 'تم'), - (0xFCA5, 'M', 'ته'), - (0xFCA6, 'M', 'ثم'), - (0xFCA7, 'M', 'جح'), - (0xFCA8, 'M', 'جم'), - (0xFCA9, 'M', 'حج'), - (0xFCAA, 'M', 'حم'), - (0xFCAB, 'M', 'خج'), - (0xFCAC, 'M', 'خم'), - (0xFCAD, 'M', 'سج'), - (0xFCAE, 'M', 'سح'), - (0xFCAF, 'M', 'سخ'), - (0xFCB0, 'M', 'سم'), - (0xFCB1, 'M', 'صح'), - (0xFCB2, 'M', 'صخ'), - (0xFCB3, 'M', 'صم'), - (0xFCB4, 'M', 'ضج'), - (0xFCB5, 'M', 'ضح'), - (0xFCB6, 'M', 'ضخ'), - (0xFCB7, 'M', 'ضم'), - (0xFCB8, 'M', 'طح'), - (0xFCB9, 'M', 'ظم'), - (0xFCBA, 'M', 'عج'), - ] - -def _seg_47() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFCBB, 'M', 'عم'), - (0xFCBC, 'M', 'غج'), - (0xFCBD, 'M', 'غم'), - (0xFCBE, 'M', 'فج'), - (0xFCBF, 'M', 'فح'), - (0xFCC0, 'M', 'فخ'), - (0xFCC1, 'M', 'فم'), - (0xFCC2, 'M', 'قح'), - (0xFCC3, 'M', 'قم'), - (0xFCC4, 'M', 'كج'), - (0xFCC5, 'M', 'كح'), - (0xFCC6, 'M', 'كخ'), - (0xFCC7, 'M', 'كل'), - (0xFCC8, 'M', 'كم'), - (0xFCC9, 'M', 'لج'), - (0xFCCA, 'M', 'لح'), - (0xFCCB, 'M', 'لخ'), - (0xFCCC, 'M', 'لم'), - (0xFCCD, 'M', 'له'), - (0xFCCE, 'M', 'مج'), - (0xFCCF, 'M', 'مح'), - (0xFCD0, 'M', 'مخ'), - (0xFCD1, 'M', 'مم'), - (0xFCD2, 'M', 'نج'), - (0xFCD3, 'M', 'نح'), - (0xFCD4, 'M', 'نخ'), - (0xFCD5, 'M', 'نم'), - (0xFCD6, 'M', 'نه'), - (0xFCD7, 'M', 'هج'), - (0xFCD8, 'M', 'هم'), - (0xFCD9, 'M', 'هٰ'), - (0xFCDA, 'M', 'يج'), - (0xFCDB, 'M', 'يح'), - (0xFCDC, 'M', 'يخ'), - (0xFCDD, 'M', 'يم'), - (0xFCDE, 'M', 'يه'), - (0xFCDF, 'M', 'ئم'), - (0xFCE0, 'M', 'ئه'), - (0xFCE1, 'M', 'بم'), - (0xFCE2, 'M', 'به'), - (0xFCE3, 'M', 'تم'), - (0xFCE4, 'M', 'ته'), - (0xFCE5, 'M', 'ثم'), - (0xFCE6, 'M', 'ثه'), - (0xFCE7, 'M', 'سم'), - (0xFCE8, 'M', 'سه'), - (0xFCE9, 'M', 'شم'), - (0xFCEA, 'M', 'شه'), - (0xFCEB, 'M', 'كل'), - (0xFCEC, 'M', 'كم'), - (0xFCED, 'M', 'لم'), - (0xFCEE, 'M', 'نم'), - (0xFCEF, 'M', 'نه'), - (0xFCF0, 'M', 'يم'), - (0xFCF1, 'M', 'يه'), - (0xFCF2, 'M', 'ـَّ'), - (0xFCF3, 'M', 'ـُّ'), - (0xFCF4, 'M', 'ـِّ'), - (0xFCF5, 'M', 'طى'), - (0xFCF6, 'M', 'طي'), - (0xFCF7, 'M', 'عى'), - (0xFCF8, 'M', 'عي'), - (0xFCF9, 'M', 'غى'), - (0xFCFA, 'M', 'غي'), - (0xFCFB, 'M', 'سى'), - (0xFCFC, 'M', 'سي'), - (0xFCFD, 'M', 'شى'), - (0xFCFE, 'M', 'شي'), - (0xFCFF, 'M', 'حى'), - (0xFD00, 'M', 'حي'), - (0xFD01, 'M', 'جى'), - (0xFD02, 'M', 'جي'), - (0xFD03, 'M', 'خى'), - (0xFD04, 'M', 'خي'), - (0xFD05, 'M', 'صى'), - (0xFD06, 'M', 'صي'), - (0xFD07, 'M', 'ضى'), - (0xFD08, 'M', 'ضي'), - (0xFD09, 'M', 'شج'), - (0xFD0A, 'M', 'شح'), - (0xFD0B, 'M', 'شخ'), - (0xFD0C, 'M', 'شم'), - (0xFD0D, 'M', 'شر'), - (0xFD0E, 'M', 'سر'), - (0xFD0F, 'M', 'صر'), - (0xFD10, 'M', 'ضر'), - (0xFD11, 'M', 'طى'), - (0xFD12, 'M', 'طي'), - (0xFD13, 'M', 'عى'), - (0xFD14, 'M', 'عي'), - (0xFD15, 'M', 'غى'), - (0xFD16, 'M', 'غي'), - (0xFD17, 'M', 'سى'), - (0xFD18, 'M', 'سي'), - (0xFD19, 'M', 'شى'), - (0xFD1A, 'M', 'شي'), - (0xFD1B, 'M', 'حى'), - (0xFD1C, 'M', 'حي'), - (0xFD1D, 'M', 'جى'), - (0xFD1E, 'M', 'جي'), - ] - -def _seg_48() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFD1F, 'M', 'خى'), - (0xFD20, 'M', 'خي'), - (0xFD21, 'M', 'صى'), - (0xFD22, 'M', 'صي'), - (0xFD23, 'M', 'ضى'), - (0xFD24, 'M', 'ضي'), - (0xFD25, 'M', 'شج'), - (0xFD26, 'M', 'شح'), - (0xFD27, 'M', 'شخ'), - (0xFD28, 'M', 'شم'), - (0xFD29, 'M', 'شر'), - (0xFD2A, 'M', 'سر'), - (0xFD2B, 'M', 'صر'), - (0xFD2C, 'M', 'ضر'), - (0xFD2D, 'M', 'شج'), - (0xFD2E, 'M', 'شح'), - (0xFD2F, 'M', 'شخ'), - (0xFD30, 'M', 'شم'), - (0xFD31, 'M', 'سه'), - (0xFD32, 'M', 'شه'), - (0xFD33, 'M', 'طم'), - (0xFD34, 'M', 'سج'), - (0xFD35, 'M', 'سح'), - (0xFD36, 'M', 'سخ'), - (0xFD37, 'M', 'شج'), - (0xFD38, 'M', 'شح'), - (0xFD39, 'M', 'شخ'), - (0xFD3A, 'M', 'طم'), - (0xFD3B, 'M', 'ظم'), - (0xFD3C, 'M', 'اً'), - (0xFD3E, 'V'), - (0xFD50, 'M', 'تجم'), - (0xFD51, 'M', 'تحج'), - (0xFD53, 'M', 'تحم'), - (0xFD54, 'M', 'تخم'), - (0xFD55, 'M', 'تمج'), - (0xFD56, 'M', 'تمح'), - (0xFD57, 'M', 'تمخ'), - (0xFD58, 'M', 'جمح'), - (0xFD5A, 'M', 'حمي'), - (0xFD5B, 'M', 'حمى'), - (0xFD5C, 'M', 'سحج'), - (0xFD5D, 'M', 'سجح'), - (0xFD5E, 'M', 'سجى'), - (0xFD5F, 'M', 'سمح'), - (0xFD61, 'M', 'سمج'), - (0xFD62, 'M', 'سمم'), - (0xFD64, 'M', 'صحح'), - (0xFD66, 'M', 'صمم'), - (0xFD67, 'M', 'شحم'), - (0xFD69, 'M', 'شجي'), - (0xFD6A, 'M', 'شمخ'), - (0xFD6C, 'M', 'شمم'), - (0xFD6E, 'M', 'ضحى'), - (0xFD6F, 'M', 'ضخم'), - (0xFD71, 'M', 'طمح'), - (0xFD73, 'M', 'طمم'), - (0xFD74, 'M', 'طمي'), - (0xFD75, 'M', 'عجم'), - (0xFD76, 'M', 'عمم'), - (0xFD78, 'M', 'عمى'), - (0xFD79, 'M', 'غمم'), - (0xFD7A, 'M', 'غمي'), - (0xFD7B, 'M', 'غمى'), - (0xFD7C, 'M', 'فخم'), - (0xFD7E, 'M', 'قمح'), - (0xFD7F, 'M', 'قمم'), - (0xFD80, 'M', 'لحم'), - (0xFD81, 'M', 'لحي'), - (0xFD82, 'M', 'لحى'), - (0xFD83, 'M', 'لجج'), - (0xFD85, 'M', 'لخم'), - (0xFD87, 'M', 'لمح'), - (0xFD89, 'M', 'محج'), - (0xFD8A, 'M', 'محم'), - (0xFD8B, 'M', 'محي'), - (0xFD8C, 'M', 'مجح'), - (0xFD8D, 'M', 'مجم'), - (0xFD8E, 'M', 'مخج'), - (0xFD8F, 'M', 'مخم'), - (0xFD90, 'X'), - (0xFD92, 'M', 'مجخ'), - (0xFD93, 'M', 'همج'), - (0xFD94, 'M', 'همم'), - (0xFD95, 'M', 'نحم'), - (0xFD96, 'M', 'نحى'), - (0xFD97, 'M', 'نجم'), - (0xFD99, 'M', 'نجى'), - (0xFD9A, 'M', 'نمي'), - (0xFD9B, 'M', 'نمى'), - (0xFD9C, 'M', 'يمم'), - (0xFD9E, 'M', 'بخي'), - (0xFD9F, 'M', 'تجي'), - (0xFDA0, 'M', 'تجى'), - (0xFDA1, 'M', 'تخي'), - (0xFDA2, 'M', 'تخى'), - (0xFDA3, 'M', 'تمي'), - (0xFDA4, 'M', 'تمى'), - (0xFDA5, 'M', 'جمي'), - (0xFDA6, 'M', 'جحى'), - ] - -def _seg_49() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFDA7, 'M', 'جمى'), - (0xFDA8, 'M', 'سخى'), - (0xFDA9, 'M', 'صحي'), - (0xFDAA, 'M', 'شحي'), - (0xFDAB, 'M', 'ضحي'), - (0xFDAC, 'M', 'لجي'), - (0xFDAD, 'M', 'لمي'), - (0xFDAE, 'M', 'يحي'), - (0xFDAF, 'M', 'يجي'), - (0xFDB0, 'M', 'يمي'), - (0xFDB1, 'M', 'ممي'), - (0xFDB2, 'M', 'قمي'), - (0xFDB3, 'M', 'نحي'), - (0xFDB4, 'M', 'قمح'), - (0xFDB5, 'M', 'لحم'), - (0xFDB6, 'M', 'عمي'), - (0xFDB7, 'M', 'كمي'), - (0xFDB8, 'M', 'نجح'), - (0xFDB9, 'M', 'مخي'), - (0xFDBA, 'M', 'لجم'), - (0xFDBB, 'M', 'كمم'), - (0xFDBC, 'M', 'لجم'), - (0xFDBD, 'M', 'نجح'), - (0xFDBE, 'M', 'جحي'), - (0xFDBF, 'M', 'حجي'), - (0xFDC0, 'M', 'مجي'), - (0xFDC1, 'M', 'فمي'), - (0xFDC2, 'M', 'بحي'), - (0xFDC3, 'M', 'كمم'), - (0xFDC4, 'M', 'عجم'), - (0xFDC5, 'M', 'صمم'), - (0xFDC6, 'M', 'سخي'), - (0xFDC7, 'M', 'نجي'), - (0xFDC8, 'X'), - (0xFDCF, 'V'), - (0xFDD0, 'X'), - (0xFDF0, 'M', 'صلے'), - (0xFDF1, 'M', 'قلے'), - (0xFDF2, 'M', 'الله'), - (0xFDF3, 'M', 'اكبر'), - (0xFDF4, 'M', 'محمد'), - (0xFDF5, 'M', 'صلعم'), - (0xFDF6, 'M', 'رسول'), - (0xFDF7, 'M', 'عليه'), - (0xFDF8, 'M', 'وسلم'), - (0xFDF9, 'M', 'صلى'), - (0xFDFA, '3', 'صلى الله عليه وسلم'), - (0xFDFB, '3', 'جل جلاله'), - (0xFDFC, 'M', 'ریال'), - (0xFDFD, 'V'), - (0xFE00, 'I'), - (0xFE10, '3', ','), - (0xFE11, 'M', '、'), - (0xFE12, 'X'), - (0xFE13, '3', ':'), - (0xFE14, '3', ';'), - (0xFE15, '3', '!'), - (0xFE16, '3', '?'), - (0xFE17, 'M', '〖'), - (0xFE18, 'M', '〗'), - (0xFE19, 'X'), - (0xFE20, 'V'), - (0xFE30, 'X'), - (0xFE31, 'M', '—'), - (0xFE32, 'M', '–'), - (0xFE33, '3', '_'), - (0xFE35, '3', '('), - (0xFE36, '3', ')'), - (0xFE37, '3', '{'), - (0xFE38, '3', '}'), - (0xFE39, 'M', '〔'), - (0xFE3A, 'M', '〕'), - (0xFE3B, 'M', '【'), - (0xFE3C, 'M', '】'), - (0xFE3D, 'M', '《'), - (0xFE3E, 'M', '》'), - (0xFE3F, 'M', '〈'), - (0xFE40, 'M', '〉'), - (0xFE41, 'M', '「'), - (0xFE42, 'M', '」'), - (0xFE43, 'M', '『'), - (0xFE44, 'M', '』'), - (0xFE45, 'V'), - (0xFE47, '3', '['), - (0xFE48, '3', ']'), - (0xFE49, '3', ' ̅'), - (0xFE4D, '3', '_'), - (0xFE50, '3', ','), - (0xFE51, 'M', '、'), - (0xFE52, 'X'), - (0xFE54, '3', ';'), - (0xFE55, '3', ':'), - (0xFE56, '3', '?'), - (0xFE57, '3', '!'), - (0xFE58, 'M', '—'), - (0xFE59, '3', '('), - (0xFE5A, '3', ')'), - (0xFE5B, '3', '{'), - (0xFE5C, '3', '}'), - (0xFE5D, 'M', '〔'), - ] - -def _seg_50() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFE5E, 'M', '〕'), - (0xFE5F, '3', '#'), - (0xFE60, '3', '&'), - (0xFE61, '3', '*'), - (0xFE62, '3', '+'), - (0xFE63, 'M', '-'), - (0xFE64, '3', '<'), - (0xFE65, '3', '>'), - (0xFE66, '3', '='), - (0xFE67, 'X'), - (0xFE68, '3', '\\'), - (0xFE69, '3', '$'), - (0xFE6A, '3', '%'), - (0xFE6B, '3', '@'), - (0xFE6C, 'X'), - (0xFE70, '3', ' ً'), - (0xFE71, 'M', 'ـً'), - (0xFE72, '3', ' ٌ'), - (0xFE73, 'V'), - (0xFE74, '3', ' ٍ'), - (0xFE75, 'X'), - (0xFE76, '3', ' َ'), - (0xFE77, 'M', 'ـَ'), - (0xFE78, '3', ' ُ'), - (0xFE79, 'M', 'ـُ'), - (0xFE7A, '3', ' ِ'), - (0xFE7B, 'M', 'ـِ'), - (0xFE7C, '3', ' ّ'), - (0xFE7D, 'M', 'ـّ'), - (0xFE7E, '3', ' ْ'), - (0xFE7F, 'M', 'ـْ'), - (0xFE80, 'M', 'ء'), - (0xFE81, 'M', 'آ'), - (0xFE83, 'M', 'أ'), - (0xFE85, 'M', 'ؤ'), - (0xFE87, 'M', 'إ'), - (0xFE89, 'M', 'ئ'), - (0xFE8D, 'M', 'ا'), - (0xFE8F, 'M', 'ب'), - (0xFE93, 'M', 'ة'), - (0xFE95, 'M', 'ت'), - (0xFE99, 'M', 'ث'), - (0xFE9D, 'M', 'ج'), - (0xFEA1, 'M', 'ح'), - (0xFEA5, 'M', 'خ'), - (0xFEA9, 'M', 'د'), - (0xFEAB, 'M', 'ذ'), - (0xFEAD, 'M', 'ر'), - (0xFEAF, 'M', 'ز'), - (0xFEB1, 'M', 'س'), - (0xFEB5, 'M', 'ش'), - (0xFEB9, 'M', 'ص'), - (0xFEBD, 'M', 'ض'), - (0xFEC1, 'M', 'ط'), - (0xFEC5, 'M', 'ظ'), - (0xFEC9, 'M', 'ع'), - (0xFECD, 'M', 'غ'), - (0xFED1, 'M', 'ف'), - (0xFED5, 'M', 'ق'), - (0xFED9, 'M', 'ك'), - (0xFEDD, 'M', 'ل'), - (0xFEE1, 'M', 'م'), - (0xFEE5, 'M', 'ن'), - (0xFEE9, 'M', 'ه'), - (0xFEED, 'M', 'و'), - (0xFEEF, 'M', 'ى'), - (0xFEF1, 'M', 'ي'), - (0xFEF5, 'M', 'لآ'), - (0xFEF7, 'M', 'لأ'), - (0xFEF9, 'M', 'لإ'), - (0xFEFB, 'M', 'لا'), - (0xFEFD, 'X'), - (0xFEFF, 'I'), - (0xFF00, 'X'), - (0xFF01, '3', '!'), - (0xFF02, '3', '"'), - (0xFF03, '3', '#'), - (0xFF04, '3', '$'), - (0xFF05, '3', '%'), - (0xFF06, '3', '&'), - (0xFF07, '3', '\''), - (0xFF08, '3', '('), - (0xFF09, '3', ')'), - (0xFF0A, '3', '*'), - (0xFF0B, '3', '+'), - (0xFF0C, '3', ','), - (0xFF0D, 'M', '-'), - (0xFF0E, 'M', '.'), - (0xFF0F, '3', '/'), - (0xFF10, 'M', '0'), - (0xFF11, 'M', '1'), - (0xFF12, 'M', '2'), - (0xFF13, 'M', '3'), - (0xFF14, 'M', '4'), - (0xFF15, 'M', '5'), - (0xFF16, 'M', '6'), - (0xFF17, 'M', '7'), - (0xFF18, 'M', '8'), - (0xFF19, 'M', '9'), - (0xFF1A, '3', ':'), - ] - -def _seg_51() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFF1B, '3', ';'), - (0xFF1C, '3', '<'), - (0xFF1D, '3', '='), - (0xFF1E, '3', '>'), - (0xFF1F, '3', '?'), - (0xFF20, '3', '@'), - (0xFF21, 'M', 'a'), - (0xFF22, 'M', 'b'), - (0xFF23, 'M', 'c'), - (0xFF24, 'M', 'd'), - (0xFF25, 'M', 'e'), - (0xFF26, 'M', 'f'), - (0xFF27, 'M', 'g'), - (0xFF28, 'M', 'h'), - (0xFF29, 'M', 'i'), - (0xFF2A, 'M', 'j'), - (0xFF2B, 'M', 'k'), - (0xFF2C, 'M', 'l'), - (0xFF2D, 'M', 'm'), - (0xFF2E, 'M', 'n'), - (0xFF2F, 'M', 'o'), - (0xFF30, 'M', 'p'), - (0xFF31, 'M', 'q'), - (0xFF32, 'M', 'r'), - (0xFF33, 'M', 's'), - (0xFF34, 'M', 't'), - (0xFF35, 'M', 'u'), - (0xFF36, 'M', 'v'), - (0xFF37, 'M', 'w'), - (0xFF38, 'M', 'x'), - (0xFF39, 'M', 'y'), - (0xFF3A, 'M', 'z'), - (0xFF3B, '3', '['), - (0xFF3C, '3', '\\'), - (0xFF3D, '3', ']'), - (0xFF3E, '3', '^'), - (0xFF3F, '3', '_'), - (0xFF40, '3', '`'), - (0xFF41, 'M', 'a'), - (0xFF42, 'M', 'b'), - (0xFF43, 'M', 'c'), - (0xFF44, 'M', 'd'), - (0xFF45, 'M', 'e'), - (0xFF46, 'M', 'f'), - (0xFF47, 'M', 'g'), - (0xFF48, 'M', 'h'), - (0xFF49, 'M', 'i'), - (0xFF4A, 'M', 'j'), - (0xFF4B, 'M', 'k'), - (0xFF4C, 'M', 'l'), - (0xFF4D, 'M', 'm'), - (0xFF4E, 'M', 'n'), - (0xFF4F, 'M', 'o'), - (0xFF50, 'M', 'p'), - (0xFF51, 'M', 'q'), - (0xFF52, 'M', 'r'), - (0xFF53, 'M', 's'), - (0xFF54, 'M', 't'), - (0xFF55, 'M', 'u'), - (0xFF56, 'M', 'v'), - (0xFF57, 'M', 'w'), - (0xFF58, 'M', 'x'), - (0xFF59, 'M', 'y'), - (0xFF5A, 'M', 'z'), - (0xFF5B, '3', '{'), - (0xFF5C, '3', '|'), - (0xFF5D, '3', '}'), - (0xFF5E, '3', '~'), - (0xFF5F, 'M', '⦅'), - (0xFF60, 'M', '⦆'), - (0xFF61, 'M', '.'), - (0xFF62, 'M', '「'), - (0xFF63, 'M', '」'), - (0xFF64, 'M', '、'), - (0xFF65, 'M', '・'), - (0xFF66, 'M', 'ヲ'), - (0xFF67, 'M', 'ァ'), - (0xFF68, 'M', 'ィ'), - (0xFF69, 'M', 'ゥ'), - (0xFF6A, 'M', 'ェ'), - (0xFF6B, 'M', 'ォ'), - (0xFF6C, 'M', 'ャ'), - (0xFF6D, 'M', 'ュ'), - (0xFF6E, 'M', 'ョ'), - (0xFF6F, 'M', 'ッ'), - (0xFF70, 'M', 'ー'), - (0xFF71, 'M', 'ア'), - (0xFF72, 'M', 'イ'), - (0xFF73, 'M', 'ウ'), - (0xFF74, 'M', 'エ'), - (0xFF75, 'M', 'オ'), - (0xFF76, 'M', 'カ'), - (0xFF77, 'M', 'キ'), - (0xFF78, 'M', 'ク'), - (0xFF79, 'M', 'ケ'), - (0xFF7A, 'M', 'コ'), - (0xFF7B, 'M', 'サ'), - (0xFF7C, 'M', 'シ'), - (0xFF7D, 'M', 'ス'), - (0xFF7E, 'M', 'セ'), - ] - -def _seg_52() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFF7F, 'M', 'ソ'), - (0xFF80, 'M', 'タ'), - (0xFF81, 'M', 'チ'), - (0xFF82, 'M', 'ツ'), - (0xFF83, 'M', 'テ'), - (0xFF84, 'M', 'ト'), - (0xFF85, 'M', 'ナ'), - (0xFF86, 'M', 'ニ'), - (0xFF87, 'M', 'ヌ'), - (0xFF88, 'M', 'ネ'), - (0xFF89, 'M', 'ノ'), - (0xFF8A, 'M', 'ハ'), - (0xFF8B, 'M', 'ヒ'), - (0xFF8C, 'M', 'フ'), - (0xFF8D, 'M', 'ヘ'), - (0xFF8E, 'M', 'ホ'), - (0xFF8F, 'M', 'マ'), - (0xFF90, 'M', 'ミ'), - (0xFF91, 'M', 'ム'), - (0xFF92, 'M', 'メ'), - (0xFF93, 'M', 'モ'), - (0xFF94, 'M', 'ヤ'), - (0xFF95, 'M', 'ユ'), - (0xFF96, 'M', 'ヨ'), - (0xFF97, 'M', 'ラ'), - (0xFF98, 'M', 'リ'), - (0xFF99, 'M', 'ル'), - (0xFF9A, 'M', 'レ'), - (0xFF9B, 'M', 'ロ'), - (0xFF9C, 'M', 'ワ'), - (0xFF9D, 'M', 'ン'), - (0xFF9E, 'M', '゙'), - (0xFF9F, 'M', '゚'), - (0xFFA0, 'X'), - (0xFFA1, 'M', 'ᄀ'), - (0xFFA2, 'M', 'ᄁ'), - (0xFFA3, 'M', 'ᆪ'), - (0xFFA4, 'M', 'ᄂ'), - (0xFFA5, 'M', 'ᆬ'), - (0xFFA6, 'M', 'ᆭ'), - (0xFFA7, 'M', 'ᄃ'), - (0xFFA8, 'M', 'ᄄ'), - (0xFFA9, 'M', 'ᄅ'), - (0xFFAA, 'M', 'ᆰ'), - (0xFFAB, 'M', 'ᆱ'), - (0xFFAC, 'M', 'ᆲ'), - (0xFFAD, 'M', 'ᆳ'), - (0xFFAE, 'M', 'ᆴ'), - (0xFFAF, 'M', 'ᆵ'), - (0xFFB0, 'M', 'ᄚ'), - (0xFFB1, 'M', 'ᄆ'), - (0xFFB2, 'M', 'ᄇ'), - (0xFFB3, 'M', 'ᄈ'), - (0xFFB4, 'M', 'ᄡ'), - (0xFFB5, 'M', 'ᄉ'), - (0xFFB6, 'M', 'ᄊ'), - (0xFFB7, 'M', 'ᄋ'), - (0xFFB8, 'M', 'ᄌ'), - (0xFFB9, 'M', 'ᄍ'), - (0xFFBA, 'M', 'ᄎ'), - (0xFFBB, 'M', 'ᄏ'), - (0xFFBC, 'M', 'ᄐ'), - (0xFFBD, 'M', 'ᄑ'), - (0xFFBE, 'M', 'ᄒ'), - (0xFFBF, 'X'), - (0xFFC2, 'M', 'ᅡ'), - (0xFFC3, 'M', 'ᅢ'), - (0xFFC4, 'M', 'ᅣ'), - (0xFFC5, 'M', 'ᅤ'), - (0xFFC6, 'M', 'ᅥ'), - (0xFFC7, 'M', 'ᅦ'), - (0xFFC8, 'X'), - (0xFFCA, 'M', 'ᅧ'), - (0xFFCB, 'M', 'ᅨ'), - (0xFFCC, 'M', 'ᅩ'), - (0xFFCD, 'M', 'ᅪ'), - (0xFFCE, 'M', 'ᅫ'), - (0xFFCF, 'M', 'ᅬ'), - (0xFFD0, 'X'), - (0xFFD2, 'M', 'ᅭ'), - (0xFFD3, 'M', 'ᅮ'), - (0xFFD4, 'M', 'ᅯ'), - (0xFFD5, 'M', 'ᅰ'), - (0xFFD6, 'M', 'ᅱ'), - (0xFFD7, 'M', 'ᅲ'), - (0xFFD8, 'X'), - (0xFFDA, 'M', 'ᅳ'), - (0xFFDB, 'M', 'ᅴ'), - (0xFFDC, 'M', 'ᅵ'), - (0xFFDD, 'X'), - (0xFFE0, 'M', '¢'), - (0xFFE1, 'M', '£'), - (0xFFE2, 'M', '¬'), - (0xFFE3, '3', ' ̄'), - (0xFFE4, 'M', '¦'), - (0xFFE5, 'M', '¥'), - (0xFFE6, 'M', '₩'), - (0xFFE7, 'X'), - (0xFFE8, 'M', '│'), - (0xFFE9, 'M', '←'), - ] - -def _seg_53() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFFEA, 'M', '↑'), - (0xFFEB, 'M', '→'), - (0xFFEC, 'M', '↓'), - (0xFFED, 'M', '■'), - (0xFFEE, 'M', '○'), - (0xFFEF, 'X'), - (0x10000, 'V'), - (0x1000C, 'X'), - (0x1000D, 'V'), - (0x10027, 'X'), - (0x10028, 'V'), - (0x1003B, 'X'), - (0x1003C, 'V'), - (0x1003E, 'X'), - (0x1003F, 'V'), - (0x1004E, 'X'), - (0x10050, 'V'), - (0x1005E, 'X'), - (0x10080, 'V'), - (0x100FB, 'X'), - (0x10100, 'V'), - (0x10103, 'X'), - (0x10107, 'V'), - (0x10134, 'X'), - (0x10137, 'V'), - (0x1018F, 'X'), - (0x10190, 'V'), - (0x1019D, 'X'), - (0x101A0, 'V'), - (0x101A1, 'X'), - (0x101D0, 'V'), - (0x101FE, 'X'), - (0x10280, 'V'), - (0x1029D, 'X'), - (0x102A0, 'V'), - (0x102D1, 'X'), - (0x102E0, 'V'), - (0x102FC, 'X'), - (0x10300, 'V'), - (0x10324, 'X'), - (0x1032D, 'V'), - (0x1034B, 'X'), - (0x10350, 'V'), - (0x1037B, 'X'), - (0x10380, 'V'), - (0x1039E, 'X'), - (0x1039F, 'V'), - (0x103C4, 'X'), - (0x103C8, 'V'), - (0x103D6, 'X'), - (0x10400, 'M', '𐐨'), - (0x10401, 'M', '𐐩'), - (0x10402, 'M', '𐐪'), - (0x10403, 'M', '𐐫'), - (0x10404, 'M', '𐐬'), - (0x10405, 'M', '𐐭'), - (0x10406, 'M', '𐐮'), - (0x10407, 'M', '𐐯'), - (0x10408, 'M', '𐐰'), - (0x10409, 'M', '𐐱'), - (0x1040A, 'M', '𐐲'), - (0x1040B, 'M', '𐐳'), - (0x1040C, 'M', '𐐴'), - (0x1040D, 'M', '𐐵'), - (0x1040E, 'M', '𐐶'), - (0x1040F, 'M', '𐐷'), - (0x10410, 'M', '𐐸'), - (0x10411, 'M', '𐐹'), - (0x10412, 'M', '𐐺'), - (0x10413, 'M', '𐐻'), - (0x10414, 'M', '𐐼'), - (0x10415, 'M', '𐐽'), - (0x10416, 'M', '𐐾'), - (0x10417, 'M', '𐐿'), - (0x10418, 'M', '𐑀'), - (0x10419, 'M', '𐑁'), - (0x1041A, 'M', '𐑂'), - (0x1041B, 'M', '𐑃'), - (0x1041C, 'M', '𐑄'), - (0x1041D, 'M', '𐑅'), - (0x1041E, 'M', '𐑆'), - (0x1041F, 'M', '𐑇'), - (0x10420, 'M', '𐑈'), - (0x10421, 'M', '𐑉'), - (0x10422, 'M', '𐑊'), - (0x10423, 'M', '𐑋'), - (0x10424, 'M', '𐑌'), - (0x10425, 'M', '𐑍'), - (0x10426, 'M', '𐑎'), - (0x10427, 'M', '𐑏'), - (0x10428, 'V'), - (0x1049E, 'X'), - (0x104A0, 'V'), - (0x104AA, 'X'), - (0x104B0, 'M', '𐓘'), - (0x104B1, 'M', '𐓙'), - (0x104B2, 'M', '𐓚'), - (0x104B3, 'M', '𐓛'), - (0x104B4, 'M', '𐓜'), - (0x104B5, 'M', '𐓝'), - ] - -def _seg_54() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x104B6, 'M', '𐓞'), - (0x104B7, 'M', '𐓟'), - (0x104B8, 'M', '𐓠'), - (0x104B9, 'M', '𐓡'), - (0x104BA, 'M', '𐓢'), - (0x104BB, 'M', '𐓣'), - (0x104BC, 'M', '𐓤'), - (0x104BD, 'M', '𐓥'), - (0x104BE, 'M', '𐓦'), - (0x104BF, 'M', '𐓧'), - (0x104C0, 'M', '𐓨'), - (0x104C1, 'M', '𐓩'), - (0x104C2, 'M', '𐓪'), - (0x104C3, 'M', '𐓫'), - (0x104C4, 'M', '𐓬'), - (0x104C5, 'M', '𐓭'), - (0x104C6, 'M', '𐓮'), - (0x104C7, 'M', '𐓯'), - (0x104C8, 'M', '𐓰'), - (0x104C9, 'M', '𐓱'), - (0x104CA, 'M', '𐓲'), - (0x104CB, 'M', '𐓳'), - (0x104CC, 'M', '𐓴'), - (0x104CD, 'M', '𐓵'), - (0x104CE, 'M', '𐓶'), - (0x104CF, 'M', '𐓷'), - (0x104D0, 'M', '𐓸'), - (0x104D1, 'M', '𐓹'), - (0x104D2, 'M', '𐓺'), - (0x104D3, 'M', '𐓻'), - (0x104D4, 'X'), - (0x104D8, 'V'), - (0x104FC, 'X'), - (0x10500, 'V'), - (0x10528, 'X'), - (0x10530, 'V'), - (0x10564, 'X'), - (0x1056F, 'V'), - (0x10570, 'M', '𐖗'), - (0x10571, 'M', '𐖘'), - (0x10572, 'M', '𐖙'), - (0x10573, 'M', '𐖚'), - (0x10574, 'M', '𐖛'), - (0x10575, 'M', '𐖜'), - (0x10576, 'M', '𐖝'), - (0x10577, 'M', '𐖞'), - (0x10578, 'M', '𐖟'), - (0x10579, 'M', '𐖠'), - (0x1057A, 'M', '𐖡'), - (0x1057B, 'X'), - (0x1057C, 'M', '𐖣'), - (0x1057D, 'M', '𐖤'), - (0x1057E, 'M', '𐖥'), - (0x1057F, 'M', '𐖦'), - (0x10580, 'M', '𐖧'), - (0x10581, 'M', '𐖨'), - (0x10582, 'M', '𐖩'), - (0x10583, 'M', '𐖪'), - (0x10584, 'M', '𐖫'), - (0x10585, 'M', '𐖬'), - (0x10586, 'M', '𐖭'), - (0x10587, 'M', '𐖮'), - (0x10588, 'M', '𐖯'), - (0x10589, 'M', '𐖰'), - (0x1058A, 'M', '𐖱'), - (0x1058B, 'X'), - (0x1058C, 'M', '𐖳'), - (0x1058D, 'M', '𐖴'), - (0x1058E, 'M', '𐖵'), - (0x1058F, 'M', '𐖶'), - (0x10590, 'M', '𐖷'), - (0x10591, 'M', '𐖸'), - (0x10592, 'M', '𐖹'), - (0x10593, 'X'), - (0x10594, 'M', '𐖻'), - (0x10595, 'M', '𐖼'), - (0x10596, 'X'), - (0x10597, 'V'), - (0x105A2, 'X'), - (0x105A3, 'V'), - (0x105B2, 'X'), - (0x105B3, 'V'), - (0x105BA, 'X'), - (0x105BB, 'V'), - (0x105BD, 'X'), - (0x10600, 'V'), - (0x10737, 'X'), - (0x10740, 'V'), - (0x10756, 'X'), - (0x10760, 'V'), - (0x10768, 'X'), - (0x10780, 'V'), - (0x10781, 'M', 'ː'), - (0x10782, 'M', 'ˑ'), - (0x10783, 'M', 'æ'), - (0x10784, 'M', 'ʙ'), - (0x10785, 'M', 'ɓ'), - (0x10786, 'X'), - (0x10787, 'M', 'ʣ'), - (0x10788, 'M', 'ꭦ'), - ] - -def _seg_55() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x10789, 'M', 'ʥ'), - (0x1078A, 'M', 'ʤ'), - (0x1078B, 'M', 'ɖ'), - (0x1078C, 'M', 'ɗ'), - (0x1078D, 'M', 'ᶑ'), - (0x1078E, 'M', 'ɘ'), - (0x1078F, 'M', 'ɞ'), - (0x10790, 'M', 'ʩ'), - (0x10791, 'M', 'ɤ'), - (0x10792, 'M', 'ɢ'), - (0x10793, 'M', 'ɠ'), - (0x10794, 'M', 'ʛ'), - (0x10795, 'M', 'ħ'), - (0x10796, 'M', 'ʜ'), - (0x10797, 'M', 'ɧ'), - (0x10798, 'M', 'ʄ'), - (0x10799, 'M', 'ʪ'), - (0x1079A, 'M', 'ʫ'), - (0x1079B, 'M', 'ɬ'), - (0x1079C, 'M', '𝼄'), - (0x1079D, 'M', 'ꞎ'), - (0x1079E, 'M', 'ɮ'), - (0x1079F, 'M', '𝼅'), - (0x107A0, 'M', 'ʎ'), - (0x107A1, 'M', '𝼆'), - (0x107A2, 'M', 'ø'), - (0x107A3, 'M', 'ɶ'), - (0x107A4, 'M', 'ɷ'), - (0x107A5, 'M', 'q'), - (0x107A6, 'M', 'ɺ'), - (0x107A7, 'M', '𝼈'), - (0x107A8, 'M', 'ɽ'), - (0x107A9, 'M', 'ɾ'), - (0x107AA, 'M', 'ʀ'), - (0x107AB, 'M', 'ʨ'), - (0x107AC, 'M', 'ʦ'), - (0x107AD, 'M', 'ꭧ'), - (0x107AE, 'M', 'ʧ'), - (0x107AF, 'M', 'ʈ'), - (0x107B0, 'M', 'ⱱ'), - (0x107B1, 'X'), - (0x107B2, 'M', 'ʏ'), - (0x107B3, 'M', 'ʡ'), - (0x107B4, 'M', 'ʢ'), - (0x107B5, 'M', 'ʘ'), - (0x107B6, 'M', 'ǀ'), - (0x107B7, 'M', 'ǁ'), - (0x107B8, 'M', 'ǂ'), - (0x107B9, 'M', '𝼊'), - (0x107BA, 'M', '𝼞'), - (0x107BB, 'X'), - (0x10800, 'V'), - (0x10806, 'X'), - (0x10808, 'V'), - (0x10809, 'X'), - (0x1080A, 'V'), - (0x10836, 'X'), - (0x10837, 'V'), - (0x10839, 'X'), - (0x1083C, 'V'), - (0x1083D, 'X'), - (0x1083F, 'V'), - (0x10856, 'X'), - (0x10857, 'V'), - (0x1089F, 'X'), - (0x108A7, 'V'), - (0x108B0, 'X'), - (0x108E0, 'V'), - (0x108F3, 'X'), - (0x108F4, 'V'), - (0x108F6, 'X'), - (0x108FB, 'V'), - (0x1091C, 'X'), - (0x1091F, 'V'), - (0x1093A, 'X'), - (0x1093F, 'V'), - (0x10940, 'X'), - (0x10980, 'V'), - (0x109B8, 'X'), - (0x109BC, 'V'), - (0x109D0, 'X'), - (0x109D2, 'V'), - (0x10A04, 'X'), - (0x10A05, 'V'), - (0x10A07, 'X'), - (0x10A0C, 'V'), - (0x10A14, 'X'), - (0x10A15, 'V'), - (0x10A18, 'X'), - (0x10A19, 'V'), - (0x10A36, 'X'), - (0x10A38, 'V'), - (0x10A3B, 'X'), - (0x10A3F, 'V'), - (0x10A49, 'X'), - (0x10A50, 'V'), - (0x10A59, 'X'), - (0x10A60, 'V'), - (0x10AA0, 'X'), - (0x10AC0, 'V'), - ] - -def _seg_56() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x10AE7, 'X'), - (0x10AEB, 'V'), - (0x10AF7, 'X'), - (0x10B00, 'V'), - (0x10B36, 'X'), - (0x10B39, 'V'), - (0x10B56, 'X'), - (0x10B58, 'V'), - (0x10B73, 'X'), - (0x10B78, 'V'), - (0x10B92, 'X'), - (0x10B99, 'V'), - (0x10B9D, 'X'), - (0x10BA9, 'V'), - (0x10BB0, 'X'), - (0x10C00, 'V'), - (0x10C49, 'X'), - (0x10C80, 'M', '𐳀'), - (0x10C81, 'M', '𐳁'), - (0x10C82, 'M', '𐳂'), - (0x10C83, 'M', '𐳃'), - (0x10C84, 'M', '𐳄'), - (0x10C85, 'M', '𐳅'), - (0x10C86, 'M', '𐳆'), - (0x10C87, 'M', '𐳇'), - (0x10C88, 'M', '𐳈'), - (0x10C89, 'M', '𐳉'), - (0x10C8A, 'M', '𐳊'), - (0x10C8B, 'M', '𐳋'), - (0x10C8C, 'M', '𐳌'), - (0x10C8D, 'M', '𐳍'), - (0x10C8E, 'M', '𐳎'), - (0x10C8F, 'M', '𐳏'), - (0x10C90, 'M', '𐳐'), - (0x10C91, 'M', '𐳑'), - (0x10C92, 'M', '𐳒'), - (0x10C93, 'M', '𐳓'), - (0x10C94, 'M', '𐳔'), - (0x10C95, 'M', '𐳕'), - (0x10C96, 'M', '𐳖'), - (0x10C97, 'M', '𐳗'), - (0x10C98, 'M', '𐳘'), - (0x10C99, 'M', '𐳙'), - (0x10C9A, 'M', '𐳚'), - (0x10C9B, 'M', '𐳛'), - (0x10C9C, 'M', '𐳜'), - (0x10C9D, 'M', '𐳝'), - (0x10C9E, 'M', '𐳞'), - (0x10C9F, 'M', '𐳟'), - (0x10CA0, 'M', '𐳠'), - (0x10CA1, 'M', '𐳡'), - (0x10CA2, 'M', '𐳢'), - (0x10CA3, 'M', '𐳣'), - (0x10CA4, 'M', '𐳤'), - (0x10CA5, 'M', '𐳥'), - (0x10CA6, 'M', '𐳦'), - (0x10CA7, 'M', '𐳧'), - (0x10CA8, 'M', '𐳨'), - (0x10CA9, 'M', '𐳩'), - (0x10CAA, 'M', '𐳪'), - (0x10CAB, 'M', '𐳫'), - (0x10CAC, 'M', '𐳬'), - (0x10CAD, 'M', '𐳭'), - (0x10CAE, 'M', '𐳮'), - (0x10CAF, 'M', '𐳯'), - (0x10CB0, 'M', '𐳰'), - (0x10CB1, 'M', '𐳱'), - (0x10CB2, 'M', '𐳲'), - (0x10CB3, 'X'), - (0x10CC0, 'V'), - (0x10CF3, 'X'), - (0x10CFA, 'V'), - (0x10D28, 'X'), - (0x10D30, 'V'), - (0x10D3A, 'X'), - (0x10E60, 'V'), - (0x10E7F, 'X'), - (0x10E80, 'V'), - (0x10EAA, 'X'), - (0x10EAB, 'V'), - (0x10EAE, 'X'), - (0x10EB0, 'V'), - (0x10EB2, 'X'), - (0x10EFD, 'V'), - (0x10F28, 'X'), - (0x10F30, 'V'), - (0x10F5A, 'X'), - (0x10F70, 'V'), - (0x10F8A, 'X'), - (0x10FB0, 'V'), - (0x10FCC, 'X'), - (0x10FE0, 'V'), - (0x10FF7, 'X'), - (0x11000, 'V'), - (0x1104E, 'X'), - (0x11052, 'V'), - (0x11076, 'X'), - (0x1107F, 'V'), - (0x110BD, 'X'), - (0x110BE, 'V'), - ] - -def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x110C3, 'X'), - (0x110D0, 'V'), - (0x110E9, 'X'), - (0x110F0, 'V'), - (0x110FA, 'X'), - (0x11100, 'V'), - (0x11135, 'X'), - (0x11136, 'V'), - (0x11148, 'X'), - (0x11150, 'V'), - (0x11177, 'X'), - (0x11180, 'V'), - (0x111E0, 'X'), - (0x111E1, 'V'), - (0x111F5, 'X'), - (0x11200, 'V'), - (0x11212, 'X'), - (0x11213, 'V'), - (0x11242, 'X'), - (0x11280, 'V'), - (0x11287, 'X'), - (0x11288, 'V'), - (0x11289, 'X'), - (0x1128A, 'V'), - (0x1128E, 'X'), - (0x1128F, 'V'), - (0x1129E, 'X'), - (0x1129F, 'V'), - (0x112AA, 'X'), - (0x112B0, 'V'), - (0x112EB, 'X'), - (0x112F0, 'V'), - (0x112FA, 'X'), - (0x11300, 'V'), - (0x11304, 'X'), - (0x11305, 'V'), - (0x1130D, 'X'), - (0x1130F, 'V'), - (0x11311, 'X'), - (0x11313, 'V'), - (0x11329, 'X'), - (0x1132A, 'V'), - (0x11331, 'X'), - (0x11332, 'V'), - (0x11334, 'X'), - (0x11335, 'V'), - (0x1133A, 'X'), - (0x1133B, 'V'), - (0x11345, 'X'), - (0x11347, 'V'), - (0x11349, 'X'), - (0x1134B, 'V'), - (0x1134E, 'X'), - (0x11350, 'V'), - (0x11351, 'X'), - (0x11357, 'V'), - (0x11358, 'X'), - (0x1135D, 'V'), - (0x11364, 'X'), - (0x11366, 'V'), - (0x1136D, 'X'), - (0x11370, 'V'), - (0x11375, 'X'), - (0x11400, 'V'), - (0x1145C, 'X'), - (0x1145D, 'V'), - (0x11462, 'X'), - (0x11480, 'V'), - (0x114C8, 'X'), - (0x114D0, 'V'), - (0x114DA, 'X'), - (0x11580, 'V'), - (0x115B6, 'X'), - (0x115B8, 'V'), - (0x115DE, 'X'), - (0x11600, 'V'), - (0x11645, 'X'), - (0x11650, 'V'), - (0x1165A, 'X'), - (0x11660, 'V'), - (0x1166D, 'X'), - (0x11680, 'V'), - (0x116BA, 'X'), - (0x116C0, 'V'), - (0x116CA, 'X'), - (0x11700, 'V'), - (0x1171B, 'X'), - (0x1171D, 'V'), - (0x1172C, 'X'), - (0x11730, 'V'), - (0x11747, 'X'), - (0x11800, 'V'), - (0x1183C, 'X'), - (0x118A0, 'M', '𑣀'), - (0x118A1, 'M', '𑣁'), - (0x118A2, 'M', '𑣂'), - (0x118A3, 'M', '𑣃'), - (0x118A4, 'M', '𑣄'), - (0x118A5, 'M', '𑣅'), - (0x118A6, 'M', '𑣆'), - ] - -def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x118A7, 'M', '𑣇'), - (0x118A8, 'M', '𑣈'), - (0x118A9, 'M', '𑣉'), - (0x118AA, 'M', '𑣊'), - (0x118AB, 'M', '𑣋'), - (0x118AC, 'M', '𑣌'), - (0x118AD, 'M', '𑣍'), - (0x118AE, 'M', '𑣎'), - (0x118AF, 'M', '𑣏'), - (0x118B0, 'M', '𑣐'), - (0x118B1, 'M', '𑣑'), - (0x118B2, 'M', '𑣒'), - (0x118B3, 'M', '𑣓'), - (0x118B4, 'M', '𑣔'), - (0x118B5, 'M', '𑣕'), - (0x118B6, 'M', '𑣖'), - (0x118B7, 'M', '𑣗'), - (0x118B8, 'M', '𑣘'), - (0x118B9, 'M', '𑣙'), - (0x118BA, 'M', '𑣚'), - (0x118BB, 'M', '𑣛'), - (0x118BC, 'M', '𑣜'), - (0x118BD, 'M', '𑣝'), - (0x118BE, 'M', '𑣞'), - (0x118BF, 'M', '𑣟'), - (0x118C0, 'V'), - (0x118F3, 'X'), - (0x118FF, 'V'), - (0x11907, 'X'), - (0x11909, 'V'), - (0x1190A, 'X'), - (0x1190C, 'V'), - (0x11914, 'X'), - (0x11915, 'V'), - (0x11917, 'X'), - (0x11918, 'V'), - (0x11936, 'X'), - (0x11937, 'V'), - (0x11939, 'X'), - (0x1193B, 'V'), - (0x11947, 'X'), - (0x11950, 'V'), - (0x1195A, 'X'), - (0x119A0, 'V'), - (0x119A8, 'X'), - (0x119AA, 'V'), - (0x119D8, 'X'), - (0x119DA, 'V'), - (0x119E5, 'X'), - (0x11A00, 'V'), - (0x11A48, 'X'), - (0x11A50, 'V'), - (0x11AA3, 'X'), - (0x11AB0, 'V'), - (0x11AF9, 'X'), - (0x11B00, 'V'), - (0x11B0A, 'X'), - (0x11C00, 'V'), - (0x11C09, 'X'), - (0x11C0A, 'V'), - (0x11C37, 'X'), - (0x11C38, 'V'), - (0x11C46, 'X'), - (0x11C50, 'V'), - (0x11C6D, 'X'), - (0x11C70, 'V'), - (0x11C90, 'X'), - (0x11C92, 'V'), - (0x11CA8, 'X'), - (0x11CA9, 'V'), - (0x11CB7, 'X'), - (0x11D00, 'V'), - (0x11D07, 'X'), - (0x11D08, 'V'), - (0x11D0A, 'X'), - (0x11D0B, 'V'), - (0x11D37, 'X'), - (0x11D3A, 'V'), - (0x11D3B, 'X'), - (0x11D3C, 'V'), - (0x11D3E, 'X'), - (0x11D3F, 'V'), - (0x11D48, 'X'), - (0x11D50, 'V'), - (0x11D5A, 'X'), - (0x11D60, 'V'), - (0x11D66, 'X'), - (0x11D67, 'V'), - (0x11D69, 'X'), - (0x11D6A, 'V'), - (0x11D8F, 'X'), - (0x11D90, 'V'), - (0x11D92, 'X'), - (0x11D93, 'V'), - (0x11D99, 'X'), - (0x11DA0, 'V'), - (0x11DAA, 'X'), - (0x11EE0, 'V'), - (0x11EF9, 'X'), - (0x11F00, 'V'), - ] - -def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x11F11, 'X'), - (0x11F12, 'V'), - (0x11F3B, 'X'), - (0x11F3E, 'V'), - (0x11F5A, 'X'), - (0x11FB0, 'V'), - (0x11FB1, 'X'), - (0x11FC0, 'V'), - (0x11FF2, 'X'), - (0x11FFF, 'V'), - (0x1239A, 'X'), - (0x12400, 'V'), - (0x1246F, 'X'), - (0x12470, 'V'), - (0x12475, 'X'), - (0x12480, 'V'), - (0x12544, 'X'), - (0x12F90, 'V'), - (0x12FF3, 'X'), - (0x13000, 'V'), - (0x13430, 'X'), - (0x13440, 'V'), - (0x13456, 'X'), - (0x14400, 'V'), - (0x14647, 'X'), - (0x16800, 'V'), - (0x16A39, 'X'), - (0x16A40, 'V'), - (0x16A5F, 'X'), - (0x16A60, 'V'), - (0x16A6A, 'X'), - (0x16A6E, 'V'), - (0x16ABF, 'X'), - (0x16AC0, 'V'), - (0x16ACA, 'X'), - (0x16AD0, 'V'), - (0x16AEE, 'X'), - (0x16AF0, 'V'), - (0x16AF6, 'X'), - (0x16B00, 'V'), - (0x16B46, 'X'), - (0x16B50, 'V'), - (0x16B5A, 'X'), - (0x16B5B, 'V'), - (0x16B62, 'X'), - (0x16B63, 'V'), - (0x16B78, 'X'), - (0x16B7D, 'V'), - (0x16B90, 'X'), - (0x16E40, 'M', '𖹠'), - (0x16E41, 'M', '𖹡'), - (0x16E42, 'M', '𖹢'), - (0x16E43, 'M', '𖹣'), - (0x16E44, 'M', '𖹤'), - (0x16E45, 'M', '𖹥'), - (0x16E46, 'M', '𖹦'), - (0x16E47, 'M', '𖹧'), - (0x16E48, 'M', '𖹨'), - (0x16E49, 'M', '𖹩'), - (0x16E4A, 'M', '𖹪'), - (0x16E4B, 'M', '𖹫'), - (0x16E4C, 'M', '𖹬'), - (0x16E4D, 'M', '𖹭'), - (0x16E4E, 'M', '𖹮'), - (0x16E4F, 'M', '𖹯'), - (0x16E50, 'M', '𖹰'), - (0x16E51, 'M', '𖹱'), - (0x16E52, 'M', '𖹲'), - (0x16E53, 'M', '𖹳'), - (0x16E54, 'M', '𖹴'), - (0x16E55, 'M', '𖹵'), - (0x16E56, 'M', '𖹶'), - (0x16E57, 'M', '𖹷'), - (0x16E58, 'M', '𖹸'), - (0x16E59, 'M', '𖹹'), - (0x16E5A, 'M', '𖹺'), - (0x16E5B, 'M', '𖹻'), - (0x16E5C, 'M', '𖹼'), - (0x16E5D, 'M', '𖹽'), - (0x16E5E, 'M', '𖹾'), - (0x16E5F, 'M', '𖹿'), - (0x16E60, 'V'), - (0x16E9B, 'X'), - (0x16F00, 'V'), - (0x16F4B, 'X'), - (0x16F4F, 'V'), - (0x16F88, 'X'), - (0x16F8F, 'V'), - (0x16FA0, 'X'), - (0x16FE0, 'V'), - (0x16FE5, 'X'), - (0x16FF0, 'V'), - (0x16FF2, 'X'), - (0x17000, 'V'), - (0x187F8, 'X'), - (0x18800, 'V'), - (0x18CD6, 'X'), - (0x18D00, 'V'), - (0x18D09, 'X'), - (0x1AFF0, 'V'), - ] - -def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1AFF4, 'X'), - (0x1AFF5, 'V'), - (0x1AFFC, 'X'), - (0x1AFFD, 'V'), - (0x1AFFF, 'X'), - (0x1B000, 'V'), - (0x1B123, 'X'), - (0x1B132, 'V'), - (0x1B133, 'X'), - (0x1B150, 'V'), - (0x1B153, 'X'), - (0x1B155, 'V'), - (0x1B156, 'X'), - (0x1B164, 'V'), - (0x1B168, 'X'), - (0x1B170, 'V'), - (0x1B2FC, 'X'), - (0x1BC00, 'V'), - (0x1BC6B, 'X'), - (0x1BC70, 'V'), - (0x1BC7D, 'X'), - (0x1BC80, 'V'), - (0x1BC89, 'X'), - (0x1BC90, 'V'), - (0x1BC9A, 'X'), - (0x1BC9C, 'V'), - (0x1BCA0, 'I'), - (0x1BCA4, 'X'), - (0x1CF00, 'V'), - (0x1CF2E, 'X'), - (0x1CF30, 'V'), - (0x1CF47, 'X'), - (0x1CF50, 'V'), - (0x1CFC4, 'X'), - (0x1D000, 'V'), - (0x1D0F6, 'X'), - (0x1D100, 'V'), - (0x1D127, 'X'), - (0x1D129, 'V'), - (0x1D15E, 'M', '𝅗𝅥'), - (0x1D15F, 'M', '𝅘𝅥'), - (0x1D160, 'M', '𝅘𝅥𝅮'), - (0x1D161, 'M', '𝅘𝅥𝅯'), - (0x1D162, 'M', '𝅘𝅥𝅰'), - (0x1D163, 'M', '𝅘𝅥𝅱'), - (0x1D164, 'M', '𝅘𝅥𝅲'), - (0x1D165, 'V'), - (0x1D173, 'X'), - (0x1D17B, 'V'), - (0x1D1BB, 'M', '𝆹𝅥'), - (0x1D1BC, 'M', '𝆺𝅥'), - (0x1D1BD, 'M', '𝆹𝅥𝅮'), - (0x1D1BE, 'M', '𝆺𝅥𝅮'), - (0x1D1BF, 'M', '𝆹𝅥𝅯'), - (0x1D1C0, 'M', '𝆺𝅥𝅯'), - (0x1D1C1, 'V'), - (0x1D1EB, 'X'), - (0x1D200, 'V'), - (0x1D246, 'X'), - (0x1D2C0, 'V'), - (0x1D2D4, 'X'), - (0x1D2E0, 'V'), - (0x1D2F4, 'X'), - (0x1D300, 'V'), - (0x1D357, 'X'), - (0x1D360, 'V'), - (0x1D379, 'X'), - (0x1D400, 'M', 'a'), - (0x1D401, 'M', 'b'), - (0x1D402, 'M', 'c'), - (0x1D403, 'M', 'd'), - (0x1D404, 'M', 'e'), - (0x1D405, 'M', 'f'), - (0x1D406, 'M', 'g'), - (0x1D407, 'M', 'h'), - (0x1D408, 'M', 'i'), - (0x1D409, 'M', 'j'), - (0x1D40A, 'M', 'k'), - (0x1D40B, 'M', 'l'), - (0x1D40C, 'M', 'm'), - (0x1D40D, 'M', 'n'), - (0x1D40E, 'M', 'o'), - (0x1D40F, 'M', 'p'), - (0x1D410, 'M', 'q'), - (0x1D411, 'M', 'r'), - (0x1D412, 'M', 's'), - (0x1D413, 'M', 't'), - (0x1D414, 'M', 'u'), - (0x1D415, 'M', 'v'), - (0x1D416, 'M', 'w'), - (0x1D417, 'M', 'x'), - (0x1D418, 'M', 'y'), - (0x1D419, 'M', 'z'), - (0x1D41A, 'M', 'a'), - (0x1D41B, 'M', 'b'), - (0x1D41C, 'M', 'c'), - (0x1D41D, 'M', 'd'), - (0x1D41E, 'M', 'e'), - (0x1D41F, 'M', 'f'), - (0x1D420, 'M', 'g'), - ] - -def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D421, 'M', 'h'), - (0x1D422, 'M', 'i'), - (0x1D423, 'M', 'j'), - (0x1D424, 'M', 'k'), - (0x1D425, 'M', 'l'), - (0x1D426, 'M', 'm'), - (0x1D427, 'M', 'n'), - (0x1D428, 'M', 'o'), - (0x1D429, 'M', 'p'), - (0x1D42A, 'M', 'q'), - (0x1D42B, 'M', 'r'), - (0x1D42C, 'M', 's'), - (0x1D42D, 'M', 't'), - (0x1D42E, 'M', 'u'), - (0x1D42F, 'M', 'v'), - (0x1D430, 'M', 'w'), - (0x1D431, 'M', 'x'), - (0x1D432, 'M', 'y'), - (0x1D433, 'M', 'z'), - (0x1D434, 'M', 'a'), - (0x1D435, 'M', 'b'), - (0x1D436, 'M', 'c'), - (0x1D437, 'M', 'd'), - (0x1D438, 'M', 'e'), - (0x1D439, 'M', 'f'), - (0x1D43A, 'M', 'g'), - (0x1D43B, 'M', 'h'), - (0x1D43C, 'M', 'i'), - (0x1D43D, 'M', 'j'), - (0x1D43E, 'M', 'k'), - (0x1D43F, 'M', 'l'), - (0x1D440, 'M', 'm'), - (0x1D441, 'M', 'n'), - (0x1D442, 'M', 'o'), - (0x1D443, 'M', 'p'), - (0x1D444, 'M', 'q'), - (0x1D445, 'M', 'r'), - (0x1D446, 'M', 's'), - (0x1D447, 'M', 't'), - (0x1D448, 'M', 'u'), - (0x1D449, 'M', 'v'), - (0x1D44A, 'M', 'w'), - (0x1D44B, 'M', 'x'), - (0x1D44C, 'M', 'y'), - (0x1D44D, 'M', 'z'), - (0x1D44E, 'M', 'a'), - (0x1D44F, 'M', 'b'), - (0x1D450, 'M', 'c'), - (0x1D451, 'M', 'd'), - (0x1D452, 'M', 'e'), - (0x1D453, 'M', 'f'), - (0x1D454, 'M', 'g'), - (0x1D455, 'X'), - (0x1D456, 'M', 'i'), - (0x1D457, 'M', 'j'), - (0x1D458, 'M', 'k'), - (0x1D459, 'M', 'l'), - (0x1D45A, 'M', 'm'), - (0x1D45B, 'M', 'n'), - (0x1D45C, 'M', 'o'), - (0x1D45D, 'M', 'p'), - (0x1D45E, 'M', 'q'), - (0x1D45F, 'M', 'r'), - (0x1D460, 'M', 's'), - (0x1D461, 'M', 't'), - (0x1D462, 'M', 'u'), - (0x1D463, 'M', 'v'), - (0x1D464, 'M', 'w'), - (0x1D465, 'M', 'x'), - (0x1D466, 'M', 'y'), - (0x1D467, 'M', 'z'), - (0x1D468, 'M', 'a'), - (0x1D469, 'M', 'b'), - (0x1D46A, 'M', 'c'), - (0x1D46B, 'M', 'd'), - (0x1D46C, 'M', 'e'), - (0x1D46D, 'M', 'f'), - (0x1D46E, 'M', 'g'), - (0x1D46F, 'M', 'h'), - (0x1D470, 'M', 'i'), - (0x1D471, 'M', 'j'), - (0x1D472, 'M', 'k'), - (0x1D473, 'M', 'l'), - (0x1D474, 'M', 'm'), - (0x1D475, 'M', 'n'), - (0x1D476, 'M', 'o'), - (0x1D477, 'M', 'p'), - (0x1D478, 'M', 'q'), - (0x1D479, 'M', 'r'), - (0x1D47A, 'M', 's'), - (0x1D47B, 'M', 't'), - (0x1D47C, 'M', 'u'), - (0x1D47D, 'M', 'v'), - (0x1D47E, 'M', 'w'), - (0x1D47F, 'M', 'x'), - (0x1D480, 'M', 'y'), - (0x1D481, 'M', 'z'), - (0x1D482, 'M', 'a'), - (0x1D483, 'M', 'b'), - (0x1D484, 'M', 'c'), - ] - -def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D485, 'M', 'd'), - (0x1D486, 'M', 'e'), - (0x1D487, 'M', 'f'), - (0x1D488, 'M', 'g'), - (0x1D489, 'M', 'h'), - (0x1D48A, 'M', 'i'), - (0x1D48B, 'M', 'j'), - (0x1D48C, 'M', 'k'), - (0x1D48D, 'M', 'l'), - (0x1D48E, 'M', 'm'), - (0x1D48F, 'M', 'n'), - (0x1D490, 'M', 'o'), - (0x1D491, 'M', 'p'), - (0x1D492, 'M', 'q'), - (0x1D493, 'M', 'r'), - (0x1D494, 'M', 's'), - (0x1D495, 'M', 't'), - (0x1D496, 'M', 'u'), - (0x1D497, 'M', 'v'), - (0x1D498, 'M', 'w'), - (0x1D499, 'M', 'x'), - (0x1D49A, 'M', 'y'), - (0x1D49B, 'M', 'z'), - (0x1D49C, 'M', 'a'), - (0x1D49D, 'X'), - (0x1D49E, 'M', 'c'), - (0x1D49F, 'M', 'd'), - (0x1D4A0, 'X'), - (0x1D4A2, 'M', 'g'), - (0x1D4A3, 'X'), - (0x1D4A5, 'M', 'j'), - (0x1D4A6, 'M', 'k'), - (0x1D4A7, 'X'), - (0x1D4A9, 'M', 'n'), - (0x1D4AA, 'M', 'o'), - (0x1D4AB, 'M', 'p'), - (0x1D4AC, 'M', 'q'), - (0x1D4AD, 'X'), - (0x1D4AE, 'M', 's'), - (0x1D4AF, 'M', 't'), - (0x1D4B0, 'M', 'u'), - (0x1D4B1, 'M', 'v'), - (0x1D4B2, 'M', 'w'), - (0x1D4B3, 'M', 'x'), - (0x1D4B4, 'M', 'y'), - (0x1D4B5, 'M', 'z'), - (0x1D4B6, 'M', 'a'), - (0x1D4B7, 'M', 'b'), - (0x1D4B8, 'M', 'c'), - (0x1D4B9, 'M', 'd'), - (0x1D4BA, 'X'), - (0x1D4BB, 'M', 'f'), - (0x1D4BC, 'X'), - (0x1D4BD, 'M', 'h'), - (0x1D4BE, 'M', 'i'), - (0x1D4BF, 'M', 'j'), - (0x1D4C0, 'M', 'k'), - (0x1D4C1, 'M', 'l'), - (0x1D4C2, 'M', 'm'), - (0x1D4C3, 'M', 'n'), - (0x1D4C4, 'X'), - (0x1D4C5, 'M', 'p'), - (0x1D4C6, 'M', 'q'), - (0x1D4C7, 'M', 'r'), - (0x1D4C8, 'M', 's'), - (0x1D4C9, 'M', 't'), - (0x1D4CA, 'M', 'u'), - (0x1D4CB, 'M', 'v'), - (0x1D4CC, 'M', 'w'), - (0x1D4CD, 'M', 'x'), - (0x1D4CE, 'M', 'y'), - (0x1D4CF, 'M', 'z'), - (0x1D4D0, 'M', 'a'), - (0x1D4D1, 'M', 'b'), - (0x1D4D2, 'M', 'c'), - (0x1D4D3, 'M', 'd'), - (0x1D4D4, 'M', 'e'), - (0x1D4D5, 'M', 'f'), - (0x1D4D6, 'M', 'g'), - (0x1D4D7, 'M', 'h'), - (0x1D4D8, 'M', 'i'), - (0x1D4D9, 'M', 'j'), - (0x1D4DA, 'M', 'k'), - (0x1D4DB, 'M', 'l'), - (0x1D4DC, 'M', 'm'), - (0x1D4DD, 'M', 'n'), - (0x1D4DE, 'M', 'o'), - (0x1D4DF, 'M', 'p'), - (0x1D4E0, 'M', 'q'), - (0x1D4E1, 'M', 'r'), - (0x1D4E2, 'M', 's'), - (0x1D4E3, 'M', 't'), - (0x1D4E4, 'M', 'u'), - (0x1D4E5, 'M', 'v'), - (0x1D4E6, 'M', 'w'), - (0x1D4E7, 'M', 'x'), - (0x1D4E8, 'M', 'y'), - (0x1D4E9, 'M', 'z'), - (0x1D4EA, 'M', 'a'), - (0x1D4EB, 'M', 'b'), - ] - -def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D4EC, 'M', 'c'), - (0x1D4ED, 'M', 'd'), - (0x1D4EE, 'M', 'e'), - (0x1D4EF, 'M', 'f'), - (0x1D4F0, 'M', 'g'), - (0x1D4F1, 'M', 'h'), - (0x1D4F2, 'M', 'i'), - (0x1D4F3, 'M', 'j'), - (0x1D4F4, 'M', 'k'), - (0x1D4F5, 'M', 'l'), - (0x1D4F6, 'M', 'm'), - (0x1D4F7, 'M', 'n'), - (0x1D4F8, 'M', 'o'), - (0x1D4F9, 'M', 'p'), - (0x1D4FA, 'M', 'q'), - (0x1D4FB, 'M', 'r'), - (0x1D4FC, 'M', 's'), - (0x1D4FD, 'M', 't'), - (0x1D4FE, 'M', 'u'), - (0x1D4FF, 'M', 'v'), - (0x1D500, 'M', 'w'), - (0x1D501, 'M', 'x'), - (0x1D502, 'M', 'y'), - (0x1D503, 'M', 'z'), - (0x1D504, 'M', 'a'), - (0x1D505, 'M', 'b'), - (0x1D506, 'X'), - (0x1D507, 'M', 'd'), - (0x1D508, 'M', 'e'), - (0x1D509, 'M', 'f'), - (0x1D50A, 'M', 'g'), - (0x1D50B, 'X'), - (0x1D50D, 'M', 'j'), - (0x1D50E, 'M', 'k'), - (0x1D50F, 'M', 'l'), - (0x1D510, 'M', 'm'), - (0x1D511, 'M', 'n'), - (0x1D512, 'M', 'o'), - (0x1D513, 'M', 'p'), - (0x1D514, 'M', 'q'), - (0x1D515, 'X'), - (0x1D516, 'M', 's'), - (0x1D517, 'M', 't'), - (0x1D518, 'M', 'u'), - (0x1D519, 'M', 'v'), - (0x1D51A, 'M', 'w'), - (0x1D51B, 'M', 'x'), - (0x1D51C, 'M', 'y'), - (0x1D51D, 'X'), - (0x1D51E, 'M', 'a'), - (0x1D51F, 'M', 'b'), - (0x1D520, 'M', 'c'), - (0x1D521, 'M', 'd'), - (0x1D522, 'M', 'e'), - (0x1D523, 'M', 'f'), - (0x1D524, 'M', 'g'), - (0x1D525, 'M', 'h'), - (0x1D526, 'M', 'i'), - (0x1D527, 'M', 'j'), - (0x1D528, 'M', 'k'), - (0x1D529, 'M', 'l'), - (0x1D52A, 'M', 'm'), - (0x1D52B, 'M', 'n'), - (0x1D52C, 'M', 'o'), - (0x1D52D, 'M', 'p'), - (0x1D52E, 'M', 'q'), - (0x1D52F, 'M', 'r'), - (0x1D530, 'M', 's'), - (0x1D531, 'M', 't'), - (0x1D532, 'M', 'u'), - (0x1D533, 'M', 'v'), - (0x1D534, 'M', 'w'), - (0x1D535, 'M', 'x'), - (0x1D536, 'M', 'y'), - (0x1D537, 'M', 'z'), - (0x1D538, 'M', 'a'), - (0x1D539, 'M', 'b'), - (0x1D53A, 'X'), - (0x1D53B, 'M', 'd'), - (0x1D53C, 'M', 'e'), - (0x1D53D, 'M', 'f'), - (0x1D53E, 'M', 'g'), - (0x1D53F, 'X'), - (0x1D540, 'M', 'i'), - (0x1D541, 'M', 'j'), - (0x1D542, 'M', 'k'), - (0x1D543, 'M', 'l'), - (0x1D544, 'M', 'm'), - (0x1D545, 'X'), - (0x1D546, 'M', 'o'), - (0x1D547, 'X'), - (0x1D54A, 'M', 's'), - (0x1D54B, 'M', 't'), - (0x1D54C, 'M', 'u'), - (0x1D54D, 'M', 'v'), - (0x1D54E, 'M', 'w'), - (0x1D54F, 'M', 'x'), - (0x1D550, 'M', 'y'), - (0x1D551, 'X'), - (0x1D552, 'M', 'a'), - ] - -def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D553, 'M', 'b'), - (0x1D554, 'M', 'c'), - (0x1D555, 'M', 'd'), - (0x1D556, 'M', 'e'), - (0x1D557, 'M', 'f'), - (0x1D558, 'M', 'g'), - (0x1D559, 'M', 'h'), - (0x1D55A, 'M', 'i'), - (0x1D55B, 'M', 'j'), - (0x1D55C, 'M', 'k'), - (0x1D55D, 'M', 'l'), - (0x1D55E, 'M', 'm'), - (0x1D55F, 'M', 'n'), - (0x1D560, 'M', 'o'), - (0x1D561, 'M', 'p'), - (0x1D562, 'M', 'q'), - (0x1D563, 'M', 'r'), - (0x1D564, 'M', 's'), - (0x1D565, 'M', 't'), - (0x1D566, 'M', 'u'), - (0x1D567, 'M', 'v'), - (0x1D568, 'M', 'w'), - (0x1D569, 'M', 'x'), - (0x1D56A, 'M', 'y'), - (0x1D56B, 'M', 'z'), - (0x1D56C, 'M', 'a'), - (0x1D56D, 'M', 'b'), - (0x1D56E, 'M', 'c'), - (0x1D56F, 'M', 'd'), - (0x1D570, 'M', 'e'), - (0x1D571, 'M', 'f'), - (0x1D572, 'M', 'g'), - (0x1D573, 'M', 'h'), - (0x1D574, 'M', 'i'), - (0x1D575, 'M', 'j'), - (0x1D576, 'M', 'k'), - (0x1D577, 'M', 'l'), - (0x1D578, 'M', 'm'), - (0x1D579, 'M', 'n'), - (0x1D57A, 'M', 'o'), - (0x1D57B, 'M', 'p'), - (0x1D57C, 'M', 'q'), - (0x1D57D, 'M', 'r'), - (0x1D57E, 'M', 's'), - (0x1D57F, 'M', 't'), - (0x1D580, 'M', 'u'), - (0x1D581, 'M', 'v'), - (0x1D582, 'M', 'w'), - (0x1D583, 'M', 'x'), - (0x1D584, 'M', 'y'), - (0x1D585, 'M', 'z'), - (0x1D586, 'M', 'a'), - (0x1D587, 'M', 'b'), - (0x1D588, 'M', 'c'), - (0x1D589, 'M', 'd'), - (0x1D58A, 'M', 'e'), - (0x1D58B, 'M', 'f'), - (0x1D58C, 'M', 'g'), - (0x1D58D, 'M', 'h'), - (0x1D58E, 'M', 'i'), - (0x1D58F, 'M', 'j'), - (0x1D590, 'M', 'k'), - (0x1D591, 'M', 'l'), - (0x1D592, 'M', 'm'), - (0x1D593, 'M', 'n'), - (0x1D594, 'M', 'o'), - (0x1D595, 'M', 'p'), - (0x1D596, 'M', 'q'), - (0x1D597, 'M', 'r'), - (0x1D598, 'M', 's'), - (0x1D599, 'M', 't'), - (0x1D59A, 'M', 'u'), - (0x1D59B, 'M', 'v'), - (0x1D59C, 'M', 'w'), - (0x1D59D, 'M', 'x'), - (0x1D59E, 'M', 'y'), - (0x1D59F, 'M', 'z'), - (0x1D5A0, 'M', 'a'), - (0x1D5A1, 'M', 'b'), - (0x1D5A2, 'M', 'c'), - (0x1D5A3, 'M', 'd'), - (0x1D5A4, 'M', 'e'), - (0x1D5A5, 'M', 'f'), - (0x1D5A6, 'M', 'g'), - (0x1D5A7, 'M', 'h'), - (0x1D5A8, 'M', 'i'), - (0x1D5A9, 'M', 'j'), - (0x1D5AA, 'M', 'k'), - (0x1D5AB, 'M', 'l'), - (0x1D5AC, 'M', 'm'), - (0x1D5AD, 'M', 'n'), - (0x1D5AE, 'M', 'o'), - (0x1D5AF, 'M', 'p'), - (0x1D5B0, 'M', 'q'), - (0x1D5B1, 'M', 'r'), - (0x1D5B2, 'M', 's'), - (0x1D5B3, 'M', 't'), - (0x1D5B4, 'M', 'u'), - (0x1D5B5, 'M', 'v'), - (0x1D5B6, 'M', 'w'), - ] - -def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D5B7, 'M', 'x'), - (0x1D5B8, 'M', 'y'), - (0x1D5B9, 'M', 'z'), - (0x1D5BA, 'M', 'a'), - (0x1D5BB, 'M', 'b'), - (0x1D5BC, 'M', 'c'), - (0x1D5BD, 'M', 'd'), - (0x1D5BE, 'M', 'e'), - (0x1D5BF, 'M', 'f'), - (0x1D5C0, 'M', 'g'), - (0x1D5C1, 'M', 'h'), - (0x1D5C2, 'M', 'i'), - (0x1D5C3, 'M', 'j'), - (0x1D5C4, 'M', 'k'), - (0x1D5C5, 'M', 'l'), - (0x1D5C6, 'M', 'm'), - (0x1D5C7, 'M', 'n'), - (0x1D5C8, 'M', 'o'), - (0x1D5C9, 'M', 'p'), - (0x1D5CA, 'M', 'q'), - (0x1D5CB, 'M', 'r'), - (0x1D5CC, 'M', 's'), - (0x1D5CD, 'M', 't'), - (0x1D5CE, 'M', 'u'), - (0x1D5CF, 'M', 'v'), - (0x1D5D0, 'M', 'w'), - (0x1D5D1, 'M', 'x'), - (0x1D5D2, 'M', 'y'), - (0x1D5D3, 'M', 'z'), - (0x1D5D4, 'M', 'a'), - (0x1D5D5, 'M', 'b'), - (0x1D5D6, 'M', 'c'), - (0x1D5D7, 'M', 'd'), - (0x1D5D8, 'M', 'e'), - (0x1D5D9, 'M', 'f'), - (0x1D5DA, 'M', 'g'), - (0x1D5DB, 'M', 'h'), - (0x1D5DC, 'M', 'i'), - (0x1D5DD, 'M', 'j'), - (0x1D5DE, 'M', 'k'), - (0x1D5DF, 'M', 'l'), - (0x1D5E0, 'M', 'm'), - (0x1D5E1, 'M', 'n'), - (0x1D5E2, 'M', 'o'), - (0x1D5E3, 'M', 'p'), - (0x1D5E4, 'M', 'q'), - (0x1D5E5, 'M', 'r'), - (0x1D5E6, 'M', 's'), - (0x1D5E7, 'M', 't'), - (0x1D5E8, 'M', 'u'), - (0x1D5E9, 'M', 'v'), - (0x1D5EA, 'M', 'w'), - (0x1D5EB, 'M', 'x'), - (0x1D5EC, 'M', 'y'), - (0x1D5ED, 'M', 'z'), - (0x1D5EE, 'M', 'a'), - (0x1D5EF, 'M', 'b'), - (0x1D5F0, 'M', 'c'), - (0x1D5F1, 'M', 'd'), - (0x1D5F2, 'M', 'e'), - (0x1D5F3, 'M', 'f'), - (0x1D5F4, 'M', 'g'), - (0x1D5F5, 'M', 'h'), - (0x1D5F6, 'M', 'i'), - (0x1D5F7, 'M', 'j'), - (0x1D5F8, 'M', 'k'), - (0x1D5F9, 'M', 'l'), - (0x1D5FA, 'M', 'm'), - (0x1D5FB, 'M', 'n'), - (0x1D5FC, 'M', 'o'), - (0x1D5FD, 'M', 'p'), - (0x1D5FE, 'M', 'q'), - (0x1D5FF, 'M', 'r'), - (0x1D600, 'M', 's'), - (0x1D601, 'M', 't'), - (0x1D602, 'M', 'u'), - (0x1D603, 'M', 'v'), - (0x1D604, 'M', 'w'), - (0x1D605, 'M', 'x'), - (0x1D606, 'M', 'y'), - (0x1D607, 'M', 'z'), - (0x1D608, 'M', 'a'), - (0x1D609, 'M', 'b'), - (0x1D60A, 'M', 'c'), - (0x1D60B, 'M', 'd'), - (0x1D60C, 'M', 'e'), - (0x1D60D, 'M', 'f'), - (0x1D60E, 'M', 'g'), - (0x1D60F, 'M', 'h'), - (0x1D610, 'M', 'i'), - (0x1D611, 'M', 'j'), - (0x1D612, 'M', 'k'), - (0x1D613, 'M', 'l'), - (0x1D614, 'M', 'm'), - (0x1D615, 'M', 'n'), - (0x1D616, 'M', 'o'), - (0x1D617, 'M', 'p'), - (0x1D618, 'M', 'q'), - (0x1D619, 'M', 'r'), - (0x1D61A, 'M', 's'), - ] - -def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D61B, 'M', 't'), - (0x1D61C, 'M', 'u'), - (0x1D61D, 'M', 'v'), - (0x1D61E, 'M', 'w'), - (0x1D61F, 'M', 'x'), - (0x1D620, 'M', 'y'), - (0x1D621, 'M', 'z'), - (0x1D622, 'M', 'a'), - (0x1D623, 'M', 'b'), - (0x1D624, 'M', 'c'), - (0x1D625, 'M', 'd'), - (0x1D626, 'M', 'e'), - (0x1D627, 'M', 'f'), - (0x1D628, 'M', 'g'), - (0x1D629, 'M', 'h'), - (0x1D62A, 'M', 'i'), - (0x1D62B, 'M', 'j'), - (0x1D62C, 'M', 'k'), - (0x1D62D, 'M', 'l'), - (0x1D62E, 'M', 'm'), - (0x1D62F, 'M', 'n'), - (0x1D630, 'M', 'o'), - (0x1D631, 'M', 'p'), - (0x1D632, 'M', 'q'), - (0x1D633, 'M', 'r'), - (0x1D634, 'M', 's'), - (0x1D635, 'M', 't'), - (0x1D636, 'M', 'u'), - (0x1D637, 'M', 'v'), - (0x1D638, 'M', 'w'), - (0x1D639, 'M', 'x'), - (0x1D63A, 'M', 'y'), - (0x1D63B, 'M', 'z'), - (0x1D63C, 'M', 'a'), - (0x1D63D, 'M', 'b'), - (0x1D63E, 'M', 'c'), - (0x1D63F, 'M', 'd'), - (0x1D640, 'M', 'e'), - (0x1D641, 'M', 'f'), - (0x1D642, 'M', 'g'), - (0x1D643, 'M', 'h'), - (0x1D644, 'M', 'i'), - (0x1D645, 'M', 'j'), - (0x1D646, 'M', 'k'), - (0x1D647, 'M', 'l'), - (0x1D648, 'M', 'm'), - (0x1D649, 'M', 'n'), - (0x1D64A, 'M', 'o'), - (0x1D64B, 'M', 'p'), - (0x1D64C, 'M', 'q'), - (0x1D64D, 'M', 'r'), - (0x1D64E, 'M', 's'), - (0x1D64F, 'M', 't'), - (0x1D650, 'M', 'u'), - (0x1D651, 'M', 'v'), - (0x1D652, 'M', 'w'), - (0x1D653, 'M', 'x'), - (0x1D654, 'M', 'y'), - (0x1D655, 'M', 'z'), - (0x1D656, 'M', 'a'), - (0x1D657, 'M', 'b'), - (0x1D658, 'M', 'c'), - (0x1D659, 'M', 'd'), - (0x1D65A, 'M', 'e'), - (0x1D65B, 'M', 'f'), - (0x1D65C, 'M', 'g'), - (0x1D65D, 'M', 'h'), - (0x1D65E, 'M', 'i'), - (0x1D65F, 'M', 'j'), - (0x1D660, 'M', 'k'), - (0x1D661, 'M', 'l'), - (0x1D662, 'M', 'm'), - (0x1D663, 'M', 'n'), - (0x1D664, 'M', 'o'), - (0x1D665, 'M', 'p'), - (0x1D666, 'M', 'q'), - (0x1D667, 'M', 'r'), - (0x1D668, 'M', 's'), - (0x1D669, 'M', 't'), - (0x1D66A, 'M', 'u'), - (0x1D66B, 'M', 'v'), - (0x1D66C, 'M', 'w'), - (0x1D66D, 'M', 'x'), - (0x1D66E, 'M', 'y'), - (0x1D66F, 'M', 'z'), - (0x1D670, 'M', 'a'), - (0x1D671, 'M', 'b'), - (0x1D672, 'M', 'c'), - (0x1D673, 'M', 'd'), - (0x1D674, 'M', 'e'), - (0x1D675, 'M', 'f'), - (0x1D676, 'M', 'g'), - (0x1D677, 'M', 'h'), - (0x1D678, 'M', 'i'), - (0x1D679, 'M', 'j'), - (0x1D67A, 'M', 'k'), - (0x1D67B, 'M', 'l'), - (0x1D67C, 'M', 'm'), - (0x1D67D, 'M', 'n'), - (0x1D67E, 'M', 'o'), - ] - -def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D67F, 'M', 'p'), - (0x1D680, 'M', 'q'), - (0x1D681, 'M', 'r'), - (0x1D682, 'M', 's'), - (0x1D683, 'M', 't'), - (0x1D684, 'M', 'u'), - (0x1D685, 'M', 'v'), - (0x1D686, 'M', 'w'), - (0x1D687, 'M', 'x'), - (0x1D688, 'M', 'y'), - (0x1D689, 'M', 'z'), - (0x1D68A, 'M', 'a'), - (0x1D68B, 'M', 'b'), - (0x1D68C, 'M', 'c'), - (0x1D68D, 'M', 'd'), - (0x1D68E, 'M', 'e'), - (0x1D68F, 'M', 'f'), - (0x1D690, 'M', 'g'), - (0x1D691, 'M', 'h'), - (0x1D692, 'M', 'i'), - (0x1D693, 'M', 'j'), - (0x1D694, 'M', 'k'), - (0x1D695, 'M', 'l'), - (0x1D696, 'M', 'm'), - (0x1D697, 'M', 'n'), - (0x1D698, 'M', 'o'), - (0x1D699, 'M', 'p'), - (0x1D69A, 'M', 'q'), - (0x1D69B, 'M', 'r'), - (0x1D69C, 'M', 's'), - (0x1D69D, 'M', 't'), - (0x1D69E, 'M', 'u'), - (0x1D69F, 'M', 'v'), - (0x1D6A0, 'M', 'w'), - (0x1D6A1, 'M', 'x'), - (0x1D6A2, 'M', 'y'), - (0x1D6A3, 'M', 'z'), - (0x1D6A4, 'M', 'ı'), - (0x1D6A5, 'M', 'ȷ'), - (0x1D6A6, 'X'), - (0x1D6A8, 'M', 'α'), - (0x1D6A9, 'M', 'β'), - (0x1D6AA, 'M', 'γ'), - (0x1D6AB, 'M', 'δ'), - (0x1D6AC, 'M', 'ε'), - (0x1D6AD, 'M', 'ζ'), - (0x1D6AE, 'M', 'η'), - (0x1D6AF, 'M', 'θ'), - (0x1D6B0, 'M', 'ι'), - (0x1D6B1, 'M', 'κ'), - (0x1D6B2, 'M', 'λ'), - (0x1D6B3, 'M', 'μ'), - (0x1D6B4, 'M', 'ν'), - (0x1D6B5, 'M', 'ξ'), - (0x1D6B6, 'M', 'ο'), - (0x1D6B7, 'M', 'π'), - (0x1D6B8, 'M', 'ρ'), - (0x1D6B9, 'M', 'θ'), - (0x1D6BA, 'M', 'σ'), - (0x1D6BB, 'M', 'τ'), - (0x1D6BC, 'M', 'υ'), - (0x1D6BD, 'M', 'φ'), - (0x1D6BE, 'M', 'χ'), - (0x1D6BF, 'M', 'ψ'), - (0x1D6C0, 'M', 'ω'), - (0x1D6C1, 'M', '∇'), - (0x1D6C2, 'M', 'α'), - (0x1D6C3, 'M', 'β'), - (0x1D6C4, 'M', 'γ'), - (0x1D6C5, 'M', 'δ'), - (0x1D6C6, 'M', 'ε'), - (0x1D6C7, 'M', 'ζ'), - (0x1D6C8, 'M', 'η'), - (0x1D6C9, 'M', 'θ'), - (0x1D6CA, 'M', 'ι'), - (0x1D6CB, 'M', 'κ'), - (0x1D6CC, 'M', 'λ'), - (0x1D6CD, 'M', 'μ'), - (0x1D6CE, 'M', 'ν'), - (0x1D6CF, 'M', 'ξ'), - (0x1D6D0, 'M', 'ο'), - (0x1D6D1, 'M', 'π'), - (0x1D6D2, 'M', 'ρ'), - (0x1D6D3, 'M', 'σ'), - (0x1D6D5, 'M', 'τ'), - (0x1D6D6, 'M', 'υ'), - (0x1D6D7, 'M', 'φ'), - (0x1D6D8, 'M', 'χ'), - (0x1D6D9, 'M', 'ψ'), - (0x1D6DA, 'M', 'ω'), - (0x1D6DB, 'M', '∂'), - (0x1D6DC, 'M', 'ε'), - (0x1D6DD, 'M', 'θ'), - (0x1D6DE, 'M', 'κ'), - (0x1D6DF, 'M', 'φ'), - (0x1D6E0, 'M', 'ρ'), - (0x1D6E1, 'M', 'π'), - (0x1D6E2, 'M', 'α'), - (0x1D6E3, 'M', 'β'), - (0x1D6E4, 'M', 'γ'), - ] - -def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D6E5, 'M', 'δ'), - (0x1D6E6, 'M', 'ε'), - (0x1D6E7, 'M', 'ζ'), - (0x1D6E8, 'M', 'η'), - (0x1D6E9, 'M', 'θ'), - (0x1D6EA, 'M', 'ι'), - (0x1D6EB, 'M', 'κ'), - (0x1D6EC, 'M', 'λ'), - (0x1D6ED, 'M', 'μ'), - (0x1D6EE, 'M', 'ν'), - (0x1D6EF, 'M', 'ξ'), - (0x1D6F0, 'M', 'ο'), - (0x1D6F1, 'M', 'π'), - (0x1D6F2, 'M', 'ρ'), - (0x1D6F3, 'M', 'θ'), - (0x1D6F4, 'M', 'σ'), - (0x1D6F5, 'M', 'τ'), - (0x1D6F6, 'M', 'υ'), - (0x1D6F7, 'M', 'φ'), - (0x1D6F8, 'M', 'χ'), - (0x1D6F9, 'M', 'ψ'), - (0x1D6FA, 'M', 'ω'), - (0x1D6FB, 'M', '∇'), - (0x1D6FC, 'M', 'α'), - (0x1D6FD, 'M', 'β'), - (0x1D6FE, 'M', 'γ'), - (0x1D6FF, 'M', 'δ'), - (0x1D700, 'M', 'ε'), - (0x1D701, 'M', 'ζ'), - (0x1D702, 'M', 'η'), - (0x1D703, 'M', 'θ'), - (0x1D704, 'M', 'ι'), - (0x1D705, 'M', 'κ'), - (0x1D706, 'M', 'λ'), - (0x1D707, 'M', 'μ'), - (0x1D708, 'M', 'ν'), - (0x1D709, 'M', 'ξ'), - (0x1D70A, 'M', 'ο'), - (0x1D70B, 'M', 'π'), - (0x1D70C, 'M', 'ρ'), - (0x1D70D, 'M', 'σ'), - (0x1D70F, 'M', 'τ'), - (0x1D710, 'M', 'υ'), - (0x1D711, 'M', 'φ'), - (0x1D712, 'M', 'χ'), - (0x1D713, 'M', 'ψ'), - (0x1D714, 'M', 'ω'), - (0x1D715, 'M', '∂'), - (0x1D716, 'M', 'ε'), - (0x1D717, 'M', 'θ'), - (0x1D718, 'M', 'κ'), - (0x1D719, 'M', 'φ'), - (0x1D71A, 'M', 'ρ'), - (0x1D71B, 'M', 'π'), - (0x1D71C, 'M', 'α'), - (0x1D71D, 'M', 'β'), - (0x1D71E, 'M', 'γ'), - (0x1D71F, 'M', 'δ'), - (0x1D720, 'M', 'ε'), - (0x1D721, 'M', 'ζ'), - (0x1D722, 'M', 'η'), - (0x1D723, 'M', 'θ'), - (0x1D724, 'M', 'ι'), - (0x1D725, 'M', 'κ'), - (0x1D726, 'M', 'λ'), - (0x1D727, 'M', 'μ'), - (0x1D728, 'M', 'ν'), - (0x1D729, 'M', 'ξ'), - (0x1D72A, 'M', 'ο'), - (0x1D72B, 'M', 'π'), - (0x1D72C, 'M', 'ρ'), - (0x1D72D, 'M', 'θ'), - (0x1D72E, 'M', 'σ'), - (0x1D72F, 'M', 'τ'), - (0x1D730, 'M', 'υ'), - (0x1D731, 'M', 'φ'), - (0x1D732, 'M', 'χ'), - (0x1D733, 'M', 'ψ'), - (0x1D734, 'M', 'ω'), - (0x1D735, 'M', '∇'), - (0x1D736, 'M', 'α'), - (0x1D737, 'M', 'β'), - (0x1D738, 'M', 'γ'), - (0x1D739, 'M', 'δ'), - (0x1D73A, 'M', 'ε'), - (0x1D73B, 'M', 'ζ'), - (0x1D73C, 'M', 'η'), - (0x1D73D, 'M', 'θ'), - (0x1D73E, 'M', 'ι'), - (0x1D73F, 'M', 'κ'), - (0x1D740, 'M', 'λ'), - (0x1D741, 'M', 'μ'), - (0x1D742, 'M', 'ν'), - (0x1D743, 'M', 'ξ'), - (0x1D744, 'M', 'ο'), - (0x1D745, 'M', 'π'), - (0x1D746, 'M', 'ρ'), - (0x1D747, 'M', 'σ'), - (0x1D749, 'M', 'τ'), - (0x1D74A, 'M', 'υ'), - ] - -def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D74B, 'M', 'φ'), - (0x1D74C, 'M', 'χ'), - (0x1D74D, 'M', 'ψ'), - (0x1D74E, 'M', 'ω'), - (0x1D74F, 'M', '∂'), - (0x1D750, 'M', 'ε'), - (0x1D751, 'M', 'θ'), - (0x1D752, 'M', 'κ'), - (0x1D753, 'M', 'φ'), - (0x1D754, 'M', 'ρ'), - (0x1D755, 'M', 'π'), - (0x1D756, 'M', 'α'), - (0x1D757, 'M', 'β'), - (0x1D758, 'M', 'γ'), - (0x1D759, 'M', 'δ'), - (0x1D75A, 'M', 'ε'), - (0x1D75B, 'M', 'ζ'), - (0x1D75C, 'M', 'η'), - (0x1D75D, 'M', 'θ'), - (0x1D75E, 'M', 'ι'), - (0x1D75F, 'M', 'κ'), - (0x1D760, 'M', 'λ'), - (0x1D761, 'M', 'μ'), - (0x1D762, 'M', 'ν'), - (0x1D763, 'M', 'ξ'), - (0x1D764, 'M', 'ο'), - (0x1D765, 'M', 'π'), - (0x1D766, 'M', 'ρ'), - (0x1D767, 'M', 'θ'), - (0x1D768, 'M', 'σ'), - (0x1D769, 'M', 'τ'), - (0x1D76A, 'M', 'υ'), - (0x1D76B, 'M', 'φ'), - (0x1D76C, 'M', 'χ'), - (0x1D76D, 'M', 'ψ'), - (0x1D76E, 'M', 'ω'), - (0x1D76F, 'M', '∇'), - (0x1D770, 'M', 'α'), - (0x1D771, 'M', 'β'), - (0x1D772, 'M', 'γ'), - (0x1D773, 'M', 'δ'), - (0x1D774, 'M', 'ε'), - (0x1D775, 'M', 'ζ'), - (0x1D776, 'M', 'η'), - (0x1D777, 'M', 'θ'), - (0x1D778, 'M', 'ι'), - (0x1D779, 'M', 'κ'), - (0x1D77A, 'M', 'λ'), - (0x1D77B, 'M', 'μ'), - (0x1D77C, 'M', 'ν'), - (0x1D77D, 'M', 'ξ'), - (0x1D77E, 'M', 'ο'), - (0x1D77F, 'M', 'π'), - (0x1D780, 'M', 'ρ'), - (0x1D781, 'M', 'σ'), - (0x1D783, 'M', 'τ'), - (0x1D784, 'M', 'υ'), - (0x1D785, 'M', 'φ'), - (0x1D786, 'M', 'χ'), - (0x1D787, 'M', 'ψ'), - (0x1D788, 'M', 'ω'), - (0x1D789, 'M', '∂'), - (0x1D78A, 'M', 'ε'), - (0x1D78B, 'M', 'θ'), - (0x1D78C, 'M', 'κ'), - (0x1D78D, 'M', 'φ'), - (0x1D78E, 'M', 'ρ'), - (0x1D78F, 'M', 'π'), - (0x1D790, 'M', 'α'), - (0x1D791, 'M', 'β'), - (0x1D792, 'M', 'γ'), - (0x1D793, 'M', 'δ'), - (0x1D794, 'M', 'ε'), - (0x1D795, 'M', 'ζ'), - (0x1D796, 'M', 'η'), - (0x1D797, 'M', 'θ'), - (0x1D798, 'M', 'ι'), - (0x1D799, 'M', 'κ'), - (0x1D79A, 'M', 'λ'), - (0x1D79B, 'M', 'μ'), - (0x1D79C, 'M', 'ν'), - (0x1D79D, 'M', 'ξ'), - (0x1D79E, 'M', 'ο'), - (0x1D79F, 'M', 'π'), - (0x1D7A0, 'M', 'ρ'), - (0x1D7A1, 'M', 'θ'), - (0x1D7A2, 'M', 'σ'), - (0x1D7A3, 'M', 'τ'), - (0x1D7A4, 'M', 'υ'), - (0x1D7A5, 'M', 'φ'), - (0x1D7A6, 'M', 'χ'), - (0x1D7A7, 'M', 'ψ'), - (0x1D7A8, 'M', 'ω'), - (0x1D7A9, 'M', '∇'), - (0x1D7AA, 'M', 'α'), - (0x1D7AB, 'M', 'β'), - (0x1D7AC, 'M', 'γ'), - (0x1D7AD, 'M', 'δ'), - (0x1D7AE, 'M', 'ε'), - (0x1D7AF, 'M', 'ζ'), - ] - -def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D7B0, 'M', 'η'), - (0x1D7B1, 'M', 'θ'), - (0x1D7B2, 'M', 'ι'), - (0x1D7B3, 'M', 'κ'), - (0x1D7B4, 'M', 'λ'), - (0x1D7B5, 'M', 'μ'), - (0x1D7B6, 'M', 'ν'), - (0x1D7B7, 'M', 'ξ'), - (0x1D7B8, 'M', 'ο'), - (0x1D7B9, 'M', 'π'), - (0x1D7BA, 'M', 'ρ'), - (0x1D7BB, 'M', 'σ'), - (0x1D7BD, 'M', 'τ'), - (0x1D7BE, 'M', 'υ'), - (0x1D7BF, 'M', 'φ'), - (0x1D7C0, 'M', 'χ'), - (0x1D7C1, 'M', 'ψ'), - (0x1D7C2, 'M', 'ω'), - (0x1D7C3, 'M', '∂'), - (0x1D7C4, 'M', 'ε'), - (0x1D7C5, 'M', 'θ'), - (0x1D7C6, 'M', 'κ'), - (0x1D7C7, 'M', 'φ'), - (0x1D7C8, 'M', 'ρ'), - (0x1D7C9, 'M', 'π'), - (0x1D7CA, 'M', 'ϝ'), - (0x1D7CC, 'X'), - (0x1D7CE, 'M', '0'), - (0x1D7CF, 'M', '1'), - (0x1D7D0, 'M', '2'), - (0x1D7D1, 'M', '3'), - (0x1D7D2, 'M', '4'), - (0x1D7D3, 'M', '5'), - (0x1D7D4, 'M', '6'), - (0x1D7D5, 'M', '7'), - (0x1D7D6, 'M', '8'), - (0x1D7D7, 'M', '9'), - (0x1D7D8, 'M', '0'), - (0x1D7D9, 'M', '1'), - (0x1D7DA, 'M', '2'), - (0x1D7DB, 'M', '3'), - (0x1D7DC, 'M', '4'), - (0x1D7DD, 'M', '5'), - (0x1D7DE, 'M', '6'), - (0x1D7DF, 'M', '7'), - (0x1D7E0, 'M', '8'), - (0x1D7E1, 'M', '9'), - (0x1D7E2, 'M', '0'), - (0x1D7E3, 'M', '1'), - (0x1D7E4, 'M', '2'), - (0x1D7E5, 'M', '3'), - (0x1D7E6, 'M', '4'), - (0x1D7E7, 'M', '5'), - (0x1D7E8, 'M', '6'), - (0x1D7E9, 'M', '7'), - (0x1D7EA, 'M', '8'), - (0x1D7EB, 'M', '9'), - (0x1D7EC, 'M', '0'), - (0x1D7ED, 'M', '1'), - (0x1D7EE, 'M', '2'), - (0x1D7EF, 'M', '3'), - (0x1D7F0, 'M', '4'), - (0x1D7F1, 'M', '5'), - (0x1D7F2, 'M', '6'), - (0x1D7F3, 'M', '7'), - (0x1D7F4, 'M', '8'), - (0x1D7F5, 'M', '9'), - (0x1D7F6, 'M', '0'), - (0x1D7F7, 'M', '1'), - (0x1D7F8, 'M', '2'), - (0x1D7F9, 'M', '3'), - (0x1D7FA, 'M', '4'), - (0x1D7FB, 'M', '5'), - (0x1D7FC, 'M', '6'), - (0x1D7FD, 'M', '7'), - (0x1D7FE, 'M', '8'), - (0x1D7FF, 'M', '9'), - (0x1D800, 'V'), - (0x1DA8C, 'X'), - (0x1DA9B, 'V'), - (0x1DAA0, 'X'), - (0x1DAA1, 'V'), - (0x1DAB0, 'X'), - (0x1DF00, 'V'), - (0x1DF1F, 'X'), - (0x1DF25, 'V'), - (0x1DF2B, 'X'), - (0x1E000, 'V'), - (0x1E007, 'X'), - (0x1E008, 'V'), - (0x1E019, 'X'), - (0x1E01B, 'V'), - (0x1E022, 'X'), - (0x1E023, 'V'), - (0x1E025, 'X'), - (0x1E026, 'V'), - (0x1E02B, 'X'), - (0x1E030, 'M', 'а'), - (0x1E031, 'M', 'б'), - (0x1E032, 'M', 'в'), - ] - -def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1E033, 'M', 'г'), - (0x1E034, 'M', 'д'), - (0x1E035, 'M', 'е'), - (0x1E036, 'M', 'ж'), - (0x1E037, 'M', 'з'), - (0x1E038, 'M', 'и'), - (0x1E039, 'M', 'к'), - (0x1E03A, 'M', 'л'), - (0x1E03B, 'M', 'м'), - (0x1E03C, 'M', 'о'), - (0x1E03D, 'M', 'п'), - (0x1E03E, 'M', 'р'), - (0x1E03F, 'M', 'с'), - (0x1E040, 'M', 'т'), - (0x1E041, 'M', 'у'), - (0x1E042, 'M', 'ф'), - (0x1E043, 'M', 'х'), - (0x1E044, 'M', 'ц'), - (0x1E045, 'M', 'ч'), - (0x1E046, 'M', 'ш'), - (0x1E047, 'M', 'ы'), - (0x1E048, 'M', 'э'), - (0x1E049, 'M', 'ю'), - (0x1E04A, 'M', 'ꚉ'), - (0x1E04B, 'M', 'ә'), - (0x1E04C, 'M', 'і'), - (0x1E04D, 'M', 'ј'), - (0x1E04E, 'M', 'ө'), - (0x1E04F, 'M', 'ү'), - (0x1E050, 'M', 'ӏ'), - (0x1E051, 'M', 'а'), - (0x1E052, 'M', 'б'), - (0x1E053, 'M', 'в'), - (0x1E054, 'M', 'г'), - (0x1E055, 'M', 'д'), - (0x1E056, 'M', 'е'), - (0x1E057, 'M', 'ж'), - (0x1E058, 'M', 'з'), - (0x1E059, 'M', 'и'), - (0x1E05A, 'M', 'к'), - (0x1E05B, 'M', 'л'), - (0x1E05C, 'M', 'о'), - (0x1E05D, 'M', 'п'), - (0x1E05E, 'M', 'с'), - (0x1E05F, 'M', 'у'), - (0x1E060, 'M', 'ф'), - (0x1E061, 'M', 'х'), - (0x1E062, 'M', 'ц'), - (0x1E063, 'M', 'ч'), - (0x1E064, 'M', 'ш'), - (0x1E065, 'M', 'ъ'), - (0x1E066, 'M', 'ы'), - (0x1E067, 'M', 'ґ'), - (0x1E068, 'M', 'і'), - (0x1E069, 'M', 'ѕ'), - (0x1E06A, 'M', 'џ'), - (0x1E06B, 'M', 'ҫ'), - (0x1E06C, 'M', 'ꙑ'), - (0x1E06D, 'M', 'ұ'), - (0x1E06E, 'X'), - (0x1E08F, 'V'), - (0x1E090, 'X'), - (0x1E100, 'V'), - (0x1E12D, 'X'), - (0x1E130, 'V'), - (0x1E13E, 'X'), - (0x1E140, 'V'), - (0x1E14A, 'X'), - (0x1E14E, 'V'), - (0x1E150, 'X'), - (0x1E290, 'V'), - (0x1E2AF, 'X'), - (0x1E2C0, 'V'), - (0x1E2FA, 'X'), - (0x1E2FF, 'V'), - (0x1E300, 'X'), - (0x1E4D0, 'V'), - (0x1E4FA, 'X'), - (0x1E7E0, 'V'), - (0x1E7E7, 'X'), - (0x1E7E8, 'V'), - (0x1E7EC, 'X'), - (0x1E7ED, 'V'), - (0x1E7EF, 'X'), - (0x1E7F0, 'V'), - (0x1E7FF, 'X'), - (0x1E800, 'V'), - (0x1E8C5, 'X'), - (0x1E8C7, 'V'), - (0x1E8D7, 'X'), - (0x1E900, 'M', '𞤢'), - (0x1E901, 'M', '𞤣'), - (0x1E902, 'M', '𞤤'), - (0x1E903, 'M', '𞤥'), - (0x1E904, 'M', '𞤦'), - (0x1E905, 'M', '𞤧'), - (0x1E906, 'M', '𞤨'), - (0x1E907, 'M', '𞤩'), - (0x1E908, 'M', '𞤪'), - (0x1E909, 'M', '𞤫'), - ] - -def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1E90A, 'M', '𞤬'), - (0x1E90B, 'M', '𞤭'), - (0x1E90C, 'M', '𞤮'), - (0x1E90D, 'M', '𞤯'), - (0x1E90E, 'M', '𞤰'), - (0x1E90F, 'M', '𞤱'), - (0x1E910, 'M', '𞤲'), - (0x1E911, 'M', '𞤳'), - (0x1E912, 'M', '𞤴'), - (0x1E913, 'M', '𞤵'), - (0x1E914, 'M', '𞤶'), - (0x1E915, 'M', '𞤷'), - (0x1E916, 'M', '𞤸'), - (0x1E917, 'M', '𞤹'), - (0x1E918, 'M', '𞤺'), - (0x1E919, 'M', '𞤻'), - (0x1E91A, 'M', '𞤼'), - (0x1E91B, 'M', '𞤽'), - (0x1E91C, 'M', '𞤾'), - (0x1E91D, 'M', '𞤿'), - (0x1E91E, 'M', '𞥀'), - (0x1E91F, 'M', '𞥁'), - (0x1E920, 'M', '𞥂'), - (0x1E921, 'M', '𞥃'), - (0x1E922, 'V'), - (0x1E94C, 'X'), - (0x1E950, 'V'), - (0x1E95A, 'X'), - (0x1E95E, 'V'), - (0x1E960, 'X'), - (0x1EC71, 'V'), - (0x1ECB5, 'X'), - (0x1ED01, 'V'), - (0x1ED3E, 'X'), - (0x1EE00, 'M', 'ا'), - (0x1EE01, 'M', 'ب'), - (0x1EE02, 'M', 'ج'), - (0x1EE03, 'M', 'د'), - (0x1EE04, 'X'), - (0x1EE05, 'M', 'و'), - (0x1EE06, 'M', 'ز'), - (0x1EE07, 'M', 'ح'), - (0x1EE08, 'M', 'ط'), - (0x1EE09, 'M', 'ي'), - (0x1EE0A, 'M', 'ك'), - (0x1EE0B, 'M', 'ل'), - (0x1EE0C, 'M', 'م'), - (0x1EE0D, 'M', 'ن'), - (0x1EE0E, 'M', 'س'), - (0x1EE0F, 'M', 'ع'), - (0x1EE10, 'M', 'ف'), - (0x1EE11, 'M', 'ص'), - (0x1EE12, 'M', 'ق'), - (0x1EE13, 'M', 'ر'), - (0x1EE14, 'M', 'ش'), - (0x1EE15, 'M', 'ت'), - (0x1EE16, 'M', 'ث'), - (0x1EE17, 'M', 'خ'), - (0x1EE18, 'M', 'ذ'), - (0x1EE19, 'M', 'ض'), - (0x1EE1A, 'M', 'ظ'), - (0x1EE1B, 'M', 'غ'), - (0x1EE1C, 'M', 'ٮ'), - (0x1EE1D, 'M', 'ں'), - (0x1EE1E, 'M', 'ڡ'), - (0x1EE1F, 'M', 'ٯ'), - (0x1EE20, 'X'), - (0x1EE21, 'M', 'ب'), - (0x1EE22, 'M', 'ج'), - (0x1EE23, 'X'), - (0x1EE24, 'M', 'ه'), - (0x1EE25, 'X'), - (0x1EE27, 'M', 'ح'), - (0x1EE28, 'X'), - (0x1EE29, 'M', 'ي'), - (0x1EE2A, 'M', 'ك'), - (0x1EE2B, 'M', 'ل'), - (0x1EE2C, 'M', 'م'), - (0x1EE2D, 'M', 'ن'), - (0x1EE2E, 'M', 'س'), - (0x1EE2F, 'M', 'ع'), - (0x1EE30, 'M', 'ف'), - (0x1EE31, 'M', 'ص'), - (0x1EE32, 'M', 'ق'), - (0x1EE33, 'X'), - (0x1EE34, 'M', 'ش'), - (0x1EE35, 'M', 'ت'), - (0x1EE36, 'M', 'ث'), - (0x1EE37, 'M', 'خ'), - (0x1EE38, 'X'), - (0x1EE39, 'M', 'ض'), - (0x1EE3A, 'X'), - (0x1EE3B, 'M', 'غ'), - (0x1EE3C, 'X'), - (0x1EE42, 'M', 'ج'), - (0x1EE43, 'X'), - (0x1EE47, 'M', 'ح'), - (0x1EE48, 'X'), - (0x1EE49, 'M', 'ي'), - (0x1EE4A, 'X'), - ] - -def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1EE4B, 'M', 'ل'), - (0x1EE4C, 'X'), - (0x1EE4D, 'M', 'ن'), - (0x1EE4E, 'M', 'س'), - (0x1EE4F, 'M', 'ع'), - (0x1EE50, 'X'), - (0x1EE51, 'M', 'ص'), - (0x1EE52, 'M', 'ق'), - (0x1EE53, 'X'), - (0x1EE54, 'M', 'ش'), - (0x1EE55, 'X'), - (0x1EE57, 'M', 'خ'), - (0x1EE58, 'X'), - (0x1EE59, 'M', 'ض'), - (0x1EE5A, 'X'), - (0x1EE5B, 'M', 'غ'), - (0x1EE5C, 'X'), - (0x1EE5D, 'M', 'ں'), - (0x1EE5E, 'X'), - (0x1EE5F, 'M', 'ٯ'), - (0x1EE60, 'X'), - (0x1EE61, 'M', 'ب'), - (0x1EE62, 'M', 'ج'), - (0x1EE63, 'X'), - (0x1EE64, 'M', 'ه'), - (0x1EE65, 'X'), - (0x1EE67, 'M', 'ح'), - (0x1EE68, 'M', 'ط'), - (0x1EE69, 'M', 'ي'), - (0x1EE6A, 'M', 'ك'), - (0x1EE6B, 'X'), - (0x1EE6C, 'M', 'م'), - (0x1EE6D, 'M', 'ن'), - (0x1EE6E, 'M', 'س'), - (0x1EE6F, 'M', 'ع'), - (0x1EE70, 'M', 'ف'), - (0x1EE71, 'M', 'ص'), - (0x1EE72, 'M', 'ق'), - (0x1EE73, 'X'), - (0x1EE74, 'M', 'ش'), - (0x1EE75, 'M', 'ت'), - (0x1EE76, 'M', 'ث'), - (0x1EE77, 'M', 'خ'), - (0x1EE78, 'X'), - (0x1EE79, 'M', 'ض'), - (0x1EE7A, 'M', 'ظ'), - (0x1EE7B, 'M', 'غ'), - (0x1EE7C, 'M', 'ٮ'), - (0x1EE7D, 'X'), - (0x1EE7E, 'M', 'ڡ'), - (0x1EE7F, 'X'), - (0x1EE80, 'M', 'ا'), - (0x1EE81, 'M', 'ب'), - (0x1EE82, 'M', 'ج'), - (0x1EE83, 'M', 'د'), - (0x1EE84, 'M', 'ه'), - (0x1EE85, 'M', 'و'), - (0x1EE86, 'M', 'ز'), - (0x1EE87, 'M', 'ح'), - (0x1EE88, 'M', 'ط'), - (0x1EE89, 'M', 'ي'), - (0x1EE8A, 'X'), - (0x1EE8B, 'M', 'ل'), - (0x1EE8C, 'M', 'م'), - (0x1EE8D, 'M', 'ن'), - (0x1EE8E, 'M', 'س'), - (0x1EE8F, 'M', 'ع'), - (0x1EE90, 'M', 'ف'), - (0x1EE91, 'M', 'ص'), - (0x1EE92, 'M', 'ق'), - (0x1EE93, 'M', 'ر'), - (0x1EE94, 'M', 'ش'), - (0x1EE95, 'M', 'ت'), - (0x1EE96, 'M', 'ث'), - (0x1EE97, 'M', 'خ'), - (0x1EE98, 'M', 'ذ'), - (0x1EE99, 'M', 'ض'), - (0x1EE9A, 'M', 'ظ'), - (0x1EE9B, 'M', 'غ'), - (0x1EE9C, 'X'), - (0x1EEA1, 'M', 'ب'), - (0x1EEA2, 'M', 'ج'), - (0x1EEA3, 'M', 'د'), - (0x1EEA4, 'X'), - (0x1EEA5, 'M', 'و'), - (0x1EEA6, 'M', 'ز'), - (0x1EEA7, 'M', 'ح'), - (0x1EEA8, 'M', 'ط'), - (0x1EEA9, 'M', 'ي'), - (0x1EEAA, 'X'), - (0x1EEAB, 'M', 'ل'), - (0x1EEAC, 'M', 'م'), - (0x1EEAD, 'M', 'ن'), - (0x1EEAE, 'M', 'س'), - (0x1EEAF, 'M', 'ع'), - (0x1EEB0, 'M', 'ف'), - (0x1EEB1, 'M', 'ص'), - (0x1EEB2, 'M', 'ق'), - (0x1EEB3, 'M', 'ر'), - (0x1EEB4, 'M', 'ش'), - ] - -def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1EEB5, 'M', 'ت'), - (0x1EEB6, 'M', 'ث'), - (0x1EEB7, 'M', 'خ'), - (0x1EEB8, 'M', 'ذ'), - (0x1EEB9, 'M', 'ض'), - (0x1EEBA, 'M', 'ظ'), - (0x1EEBB, 'M', 'غ'), - (0x1EEBC, 'X'), - (0x1EEF0, 'V'), - (0x1EEF2, 'X'), - (0x1F000, 'V'), - (0x1F02C, 'X'), - (0x1F030, 'V'), - (0x1F094, 'X'), - (0x1F0A0, 'V'), - (0x1F0AF, 'X'), - (0x1F0B1, 'V'), - (0x1F0C0, 'X'), - (0x1F0C1, 'V'), - (0x1F0D0, 'X'), - (0x1F0D1, 'V'), - (0x1F0F6, 'X'), - (0x1F101, '3', '0,'), - (0x1F102, '3', '1,'), - (0x1F103, '3', '2,'), - (0x1F104, '3', '3,'), - (0x1F105, '3', '4,'), - (0x1F106, '3', '5,'), - (0x1F107, '3', '6,'), - (0x1F108, '3', '7,'), - (0x1F109, '3', '8,'), - (0x1F10A, '3', '9,'), - (0x1F10B, 'V'), - (0x1F110, '3', '(a)'), - (0x1F111, '3', '(b)'), - (0x1F112, '3', '(c)'), - (0x1F113, '3', '(d)'), - (0x1F114, '3', '(e)'), - (0x1F115, '3', '(f)'), - (0x1F116, '3', '(g)'), - (0x1F117, '3', '(h)'), - (0x1F118, '3', '(i)'), - (0x1F119, '3', '(j)'), - (0x1F11A, '3', '(k)'), - (0x1F11B, '3', '(l)'), - (0x1F11C, '3', '(m)'), - (0x1F11D, '3', '(n)'), - (0x1F11E, '3', '(o)'), - (0x1F11F, '3', '(p)'), - (0x1F120, '3', '(q)'), - (0x1F121, '3', '(r)'), - (0x1F122, '3', '(s)'), - (0x1F123, '3', '(t)'), - (0x1F124, '3', '(u)'), - (0x1F125, '3', '(v)'), - (0x1F126, '3', '(w)'), - (0x1F127, '3', '(x)'), - (0x1F128, '3', '(y)'), - (0x1F129, '3', '(z)'), - (0x1F12A, 'M', '〔s〕'), - (0x1F12B, 'M', 'c'), - (0x1F12C, 'M', 'r'), - (0x1F12D, 'M', 'cd'), - (0x1F12E, 'M', 'wz'), - (0x1F12F, 'V'), - (0x1F130, 'M', 'a'), - (0x1F131, 'M', 'b'), - (0x1F132, 'M', 'c'), - (0x1F133, 'M', 'd'), - (0x1F134, 'M', 'e'), - (0x1F135, 'M', 'f'), - (0x1F136, 'M', 'g'), - (0x1F137, 'M', 'h'), - (0x1F138, 'M', 'i'), - (0x1F139, 'M', 'j'), - (0x1F13A, 'M', 'k'), - (0x1F13B, 'M', 'l'), - (0x1F13C, 'M', 'm'), - (0x1F13D, 'M', 'n'), - (0x1F13E, 'M', 'o'), - (0x1F13F, 'M', 'p'), - (0x1F140, 'M', 'q'), - (0x1F141, 'M', 'r'), - (0x1F142, 'M', 's'), - (0x1F143, 'M', 't'), - (0x1F144, 'M', 'u'), - (0x1F145, 'M', 'v'), - (0x1F146, 'M', 'w'), - (0x1F147, 'M', 'x'), - (0x1F148, 'M', 'y'), - (0x1F149, 'M', 'z'), - (0x1F14A, 'M', 'hv'), - (0x1F14B, 'M', 'mv'), - (0x1F14C, 'M', 'sd'), - (0x1F14D, 'M', 'ss'), - (0x1F14E, 'M', 'ppv'), - (0x1F14F, 'M', 'wc'), - (0x1F150, 'V'), - (0x1F16A, 'M', 'mc'), - (0x1F16B, 'M', 'md'), - ] - -def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1F16C, 'M', 'mr'), - (0x1F16D, 'V'), - (0x1F190, 'M', 'dj'), - (0x1F191, 'V'), - (0x1F1AE, 'X'), - (0x1F1E6, 'V'), - (0x1F200, 'M', 'ほか'), - (0x1F201, 'M', 'ココ'), - (0x1F202, 'M', 'サ'), - (0x1F203, 'X'), - (0x1F210, 'M', '手'), - (0x1F211, 'M', '字'), - (0x1F212, 'M', '双'), - (0x1F213, 'M', 'デ'), - (0x1F214, 'M', '二'), - (0x1F215, 'M', '多'), - (0x1F216, 'M', '解'), - (0x1F217, 'M', '天'), - (0x1F218, 'M', '交'), - (0x1F219, 'M', '映'), - (0x1F21A, 'M', '無'), - (0x1F21B, 'M', '料'), - (0x1F21C, 'M', '前'), - (0x1F21D, 'M', '後'), - (0x1F21E, 'M', '再'), - (0x1F21F, 'M', '新'), - (0x1F220, 'M', '初'), - (0x1F221, 'M', '終'), - (0x1F222, 'M', '生'), - (0x1F223, 'M', '販'), - (0x1F224, 'M', '声'), - (0x1F225, 'M', '吹'), - (0x1F226, 'M', '演'), - (0x1F227, 'M', '投'), - (0x1F228, 'M', '捕'), - (0x1F229, 'M', '一'), - (0x1F22A, 'M', '三'), - (0x1F22B, 'M', '遊'), - (0x1F22C, 'M', '左'), - (0x1F22D, 'M', '中'), - (0x1F22E, 'M', '右'), - (0x1F22F, 'M', '指'), - (0x1F230, 'M', '走'), - (0x1F231, 'M', '打'), - (0x1F232, 'M', '禁'), - (0x1F233, 'M', '空'), - (0x1F234, 'M', '合'), - (0x1F235, 'M', '満'), - (0x1F236, 'M', '有'), - (0x1F237, 'M', '月'), - (0x1F238, 'M', '申'), - (0x1F239, 'M', '割'), - (0x1F23A, 'M', '営'), - (0x1F23B, 'M', '配'), - (0x1F23C, 'X'), - (0x1F240, 'M', '〔本〕'), - (0x1F241, 'M', '〔三〕'), - (0x1F242, 'M', '〔二〕'), - (0x1F243, 'M', '〔安〕'), - (0x1F244, 'M', '〔点〕'), - (0x1F245, 'M', '〔打〕'), - (0x1F246, 'M', '〔盗〕'), - (0x1F247, 'M', '〔勝〕'), - (0x1F248, 'M', '〔敗〕'), - (0x1F249, 'X'), - (0x1F250, 'M', '得'), - (0x1F251, 'M', '可'), - (0x1F252, 'X'), - (0x1F260, 'V'), - (0x1F266, 'X'), - (0x1F300, 'V'), - (0x1F6D8, 'X'), - (0x1F6DC, 'V'), - (0x1F6ED, 'X'), - (0x1F6F0, 'V'), - (0x1F6FD, 'X'), - (0x1F700, 'V'), - (0x1F777, 'X'), - (0x1F77B, 'V'), - (0x1F7DA, 'X'), - (0x1F7E0, 'V'), - (0x1F7EC, 'X'), - (0x1F7F0, 'V'), - (0x1F7F1, 'X'), - (0x1F800, 'V'), - (0x1F80C, 'X'), - (0x1F810, 'V'), - (0x1F848, 'X'), - (0x1F850, 'V'), - (0x1F85A, 'X'), - (0x1F860, 'V'), - (0x1F888, 'X'), - (0x1F890, 'V'), - (0x1F8AE, 'X'), - (0x1F8B0, 'V'), - (0x1F8B2, 'X'), - (0x1F900, 'V'), - (0x1FA54, 'X'), - (0x1FA60, 'V'), - (0x1FA6E, 'X'), - ] - -def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1FA70, 'V'), - (0x1FA7D, 'X'), - (0x1FA80, 'V'), - (0x1FA89, 'X'), - (0x1FA90, 'V'), - (0x1FABE, 'X'), - (0x1FABF, 'V'), - (0x1FAC6, 'X'), - (0x1FACE, 'V'), - (0x1FADC, 'X'), - (0x1FAE0, 'V'), - (0x1FAE9, 'X'), - (0x1FAF0, 'V'), - (0x1FAF9, 'X'), - (0x1FB00, 'V'), - (0x1FB93, 'X'), - (0x1FB94, 'V'), - (0x1FBCB, 'X'), - (0x1FBF0, 'M', '0'), - (0x1FBF1, 'M', '1'), - (0x1FBF2, 'M', '2'), - (0x1FBF3, 'M', '3'), - (0x1FBF4, 'M', '4'), - (0x1FBF5, 'M', '5'), - (0x1FBF6, 'M', '6'), - (0x1FBF7, 'M', '7'), - (0x1FBF8, 'M', '8'), - (0x1FBF9, 'M', '9'), - (0x1FBFA, 'X'), - (0x20000, 'V'), - (0x2A6E0, 'X'), - (0x2A700, 'V'), - (0x2B73A, 'X'), - (0x2B740, 'V'), - (0x2B81E, 'X'), - (0x2B820, 'V'), - (0x2CEA2, 'X'), - (0x2CEB0, 'V'), - (0x2EBE1, 'X'), - (0x2F800, 'M', '丽'), - (0x2F801, 'M', '丸'), - (0x2F802, 'M', '乁'), - (0x2F803, 'M', '𠄢'), - (0x2F804, 'M', '你'), - (0x2F805, 'M', '侮'), - (0x2F806, 'M', '侻'), - (0x2F807, 'M', '倂'), - (0x2F808, 'M', '偺'), - (0x2F809, 'M', '備'), - (0x2F80A, 'M', '僧'), - (0x2F80B, 'M', '像'), - (0x2F80C, 'M', '㒞'), - (0x2F80D, 'M', '𠘺'), - (0x2F80E, 'M', '免'), - (0x2F80F, 'M', '兔'), - (0x2F810, 'M', '兤'), - (0x2F811, 'M', '具'), - (0x2F812, 'M', '𠔜'), - (0x2F813, 'M', '㒹'), - (0x2F814, 'M', '內'), - (0x2F815, 'M', '再'), - (0x2F816, 'M', '𠕋'), - (0x2F817, 'M', '冗'), - (0x2F818, 'M', '冤'), - (0x2F819, 'M', '仌'), - (0x2F81A, 'M', '冬'), - (0x2F81B, 'M', '况'), - (0x2F81C, 'M', '𩇟'), - (0x2F81D, 'M', '凵'), - (0x2F81E, 'M', '刃'), - (0x2F81F, 'M', '㓟'), - (0x2F820, 'M', '刻'), - (0x2F821, 'M', '剆'), - (0x2F822, 'M', '割'), - (0x2F823, 'M', '剷'), - (0x2F824, 'M', '㔕'), - (0x2F825, 'M', '勇'), - (0x2F826, 'M', '勉'), - (0x2F827, 'M', '勤'), - (0x2F828, 'M', '勺'), - (0x2F829, 'M', '包'), - (0x2F82A, 'M', '匆'), - (0x2F82B, 'M', '北'), - (0x2F82C, 'M', '卉'), - (0x2F82D, 'M', '卑'), - (0x2F82E, 'M', '博'), - (0x2F82F, 'M', '即'), - (0x2F830, 'M', '卽'), - (0x2F831, 'M', '卿'), - (0x2F834, 'M', '𠨬'), - (0x2F835, 'M', '灰'), - (0x2F836, 'M', '及'), - (0x2F837, 'M', '叟'), - (0x2F838, 'M', '𠭣'), - (0x2F839, 'M', '叫'), - (0x2F83A, 'M', '叱'), - (0x2F83B, 'M', '吆'), - (0x2F83C, 'M', '咞'), - (0x2F83D, 'M', '吸'), - (0x2F83E, 'M', '呈'), - ] - -def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F83F, 'M', '周'), - (0x2F840, 'M', '咢'), - (0x2F841, 'M', '哶'), - (0x2F842, 'M', '唐'), - (0x2F843, 'M', '啓'), - (0x2F844, 'M', '啣'), - (0x2F845, 'M', '善'), - (0x2F847, 'M', '喙'), - (0x2F848, 'M', '喫'), - (0x2F849, 'M', '喳'), - (0x2F84A, 'M', '嗂'), - (0x2F84B, 'M', '圖'), - (0x2F84C, 'M', '嘆'), - (0x2F84D, 'M', '圗'), - (0x2F84E, 'M', '噑'), - (0x2F84F, 'M', '噴'), - (0x2F850, 'M', '切'), - (0x2F851, 'M', '壮'), - (0x2F852, 'M', '城'), - (0x2F853, 'M', '埴'), - (0x2F854, 'M', '堍'), - (0x2F855, 'M', '型'), - (0x2F856, 'M', '堲'), - (0x2F857, 'M', '報'), - (0x2F858, 'M', '墬'), - (0x2F859, 'M', '𡓤'), - (0x2F85A, 'M', '売'), - (0x2F85B, 'M', '壷'), - (0x2F85C, 'M', '夆'), - (0x2F85D, 'M', '多'), - (0x2F85E, 'M', '夢'), - (0x2F85F, 'M', '奢'), - (0x2F860, 'M', '𡚨'), - (0x2F861, 'M', '𡛪'), - (0x2F862, 'M', '姬'), - (0x2F863, 'M', '娛'), - (0x2F864, 'M', '娧'), - (0x2F865, 'M', '姘'), - (0x2F866, 'M', '婦'), - (0x2F867, 'M', '㛮'), - (0x2F868, 'X'), - (0x2F869, 'M', '嬈'), - (0x2F86A, 'M', '嬾'), - (0x2F86C, 'M', '𡧈'), - (0x2F86D, 'M', '寃'), - (0x2F86E, 'M', '寘'), - (0x2F86F, 'M', '寧'), - (0x2F870, 'M', '寳'), - (0x2F871, 'M', '𡬘'), - (0x2F872, 'M', '寿'), - (0x2F873, 'M', '将'), - (0x2F874, 'X'), - (0x2F875, 'M', '尢'), - (0x2F876, 'M', '㞁'), - (0x2F877, 'M', '屠'), - (0x2F878, 'M', '屮'), - (0x2F879, 'M', '峀'), - (0x2F87A, 'M', '岍'), - (0x2F87B, 'M', '𡷤'), - (0x2F87C, 'M', '嵃'), - (0x2F87D, 'M', '𡷦'), - (0x2F87E, 'M', '嵮'), - (0x2F87F, 'M', '嵫'), - (0x2F880, 'M', '嵼'), - (0x2F881, 'M', '巡'), - (0x2F882, 'M', '巢'), - (0x2F883, 'M', '㠯'), - (0x2F884, 'M', '巽'), - (0x2F885, 'M', '帨'), - (0x2F886, 'M', '帽'), - (0x2F887, 'M', '幩'), - (0x2F888, 'M', '㡢'), - (0x2F889, 'M', '𢆃'), - (0x2F88A, 'M', '㡼'), - (0x2F88B, 'M', '庰'), - (0x2F88C, 'M', '庳'), - (0x2F88D, 'M', '庶'), - (0x2F88E, 'M', '廊'), - (0x2F88F, 'M', '𪎒'), - (0x2F890, 'M', '廾'), - (0x2F891, 'M', '𢌱'), - (0x2F893, 'M', '舁'), - (0x2F894, 'M', '弢'), - (0x2F896, 'M', '㣇'), - (0x2F897, 'M', '𣊸'), - (0x2F898, 'M', '𦇚'), - (0x2F899, 'M', '形'), - (0x2F89A, 'M', '彫'), - (0x2F89B, 'M', '㣣'), - (0x2F89C, 'M', '徚'), - (0x2F89D, 'M', '忍'), - (0x2F89E, 'M', '志'), - (0x2F89F, 'M', '忹'), - (0x2F8A0, 'M', '悁'), - (0x2F8A1, 'M', '㤺'), - (0x2F8A2, 'M', '㤜'), - (0x2F8A3, 'M', '悔'), - (0x2F8A4, 'M', '𢛔'), - (0x2F8A5, 'M', '惇'), - (0x2F8A6, 'M', '慈'), - ] - -def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F8A7, 'M', '慌'), - (0x2F8A8, 'M', '慎'), - (0x2F8A9, 'M', '慌'), - (0x2F8AA, 'M', '慺'), - (0x2F8AB, 'M', '憎'), - (0x2F8AC, 'M', '憲'), - (0x2F8AD, 'M', '憤'), - (0x2F8AE, 'M', '憯'), - (0x2F8AF, 'M', '懞'), - (0x2F8B0, 'M', '懲'), - (0x2F8B1, 'M', '懶'), - (0x2F8B2, 'M', '成'), - (0x2F8B3, 'M', '戛'), - (0x2F8B4, 'M', '扝'), - (0x2F8B5, 'M', '抱'), - (0x2F8B6, 'M', '拔'), - (0x2F8B7, 'M', '捐'), - (0x2F8B8, 'M', '𢬌'), - (0x2F8B9, 'M', '挽'), - (0x2F8BA, 'M', '拼'), - (0x2F8BB, 'M', '捨'), - (0x2F8BC, 'M', '掃'), - (0x2F8BD, 'M', '揤'), - (0x2F8BE, 'M', '𢯱'), - (0x2F8BF, 'M', '搢'), - (0x2F8C0, 'M', '揅'), - (0x2F8C1, 'M', '掩'), - (0x2F8C2, 'M', '㨮'), - (0x2F8C3, 'M', '摩'), - (0x2F8C4, 'M', '摾'), - (0x2F8C5, 'M', '撝'), - (0x2F8C6, 'M', '摷'), - (0x2F8C7, 'M', '㩬'), - (0x2F8C8, 'M', '敏'), - (0x2F8C9, 'M', '敬'), - (0x2F8CA, 'M', '𣀊'), - (0x2F8CB, 'M', '旣'), - (0x2F8CC, 'M', '書'), - (0x2F8CD, 'M', '晉'), - (0x2F8CE, 'M', '㬙'), - (0x2F8CF, 'M', '暑'), - (0x2F8D0, 'M', '㬈'), - (0x2F8D1, 'M', '㫤'), - (0x2F8D2, 'M', '冒'), - (0x2F8D3, 'M', '冕'), - (0x2F8D4, 'M', '最'), - (0x2F8D5, 'M', '暜'), - (0x2F8D6, 'M', '肭'), - (0x2F8D7, 'M', '䏙'), - (0x2F8D8, 'M', '朗'), - (0x2F8D9, 'M', '望'), - (0x2F8DA, 'M', '朡'), - (0x2F8DB, 'M', '杞'), - (0x2F8DC, 'M', '杓'), - (0x2F8DD, 'M', '𣏃'), - (0x2F8DE, 'M', '㭉'), - (0x2F8DF, 'M', '柺'), - (0x2F8E0, 'M', '枅'), - (0x2F8E1, 'M', '桒'), - (0x2F8E2, 'M', '梅'), - (0x2F8E3, 'M', '𣑭'), - (0x2F8E4, 'M', '梎'), - (0x2F8E5, 'M', '栟'), - (0x2F8E6, 'M', '椔'), - (0x2F8E7, 'M', '㮝'), - (0x2F8E8, 'M', '楂'), - (0x2F8E9, 'M', '榣'), - (0x2F8EA, 'M', '槪'), - (0x2F8EB, 'M', '檨'), - (0x2F8EC, 'M', '𣚣'), - (0x2F8ED, 'M', '櫛'), - (0x2F8EE, 'M', '㰘'), - (0x2F8EF, 'M', '次'), - (0x2F8F0, 'M', '𣢧'), - (0x2F8F1, 'M', '歔'), - (0x2F8F2, 'M', '㱎'), - (0x2F8F3, 'M', '歲'), - (0x2F8F4, 'M', '殟'), - (0x2F8F5, 'M', '殺'), - (0x2F8F6, 'M', '殻'), - (0x2F8F7, 'M', '𣪍'), - (0x2F8F8, 'M', '𡴋'), - (0x2F8F9, 'M', '𣫺'), - (0x2F8FA, 'M', '汎'), - (0x2F8FB, 'M', '𣲼'), - (0x2F8FC, 'M', '沿'), - (0x2F8FD, 'M', '泍'), - (0x2F8FE, 'M', '汧'), - (0x2F8FF, 'M', '洖'), - (0x2F900, 'M', '派'), - (0x2F901, 'M', '海'), - (0x2F902, 'M', '流'), - (0x2F903, 'M', '浩'), - (0x2F904, 'M', '浸'), - (0x2F905, 'M', '涅'), - (0x2F906, 'M', '𣴞'), - (0x2F907, 'M', '洴'), - (0x2F908, 'M', '港'), - (0x2F909, 'M', '湮'), - (0x2F90A, 'M', '㴳'), - ] - -def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F90B, 'M', '滋'), - (0x2F90C, 'M', '滇'), - (0x2F90D, 'M', '𣻑'), - (0x2F90E, 'M', '淹'), - (0x2F90F, 'M', '潮'), - (0x2F910, 'M', '𣽞'), - (0x2F911, 'M', '𣾎'), - (0x2F912, 'M', '濆'), - (0x2F913, 'M', '瀹'), - (0x2F914, 'M', '瀞'), - (0x2F915, 'M', '瀛'), - (0x2F916, 'M', '㶖'), - (0x2F917, 'M', '灊'), - (0x2F918, 'M', '災'), - (0x2F919, 'M', '灷'), - (0x2F91A, 'M', '炭'), - (0x2F91B, 'M', '𠔥'), - (0x2F91C, 'M', '煅'), - (0x2F91D, 'M', '𤉣'), - (0x2F91E, 'M', '熜'), - (0x2F91F, 'X'), - (0x2F920, 'M', '爨'), - (0x2F921, 'M', '爵'), - (0x2F922, 'M', '牐'), - (0x2F923, 'M', '𤘈'), - (0x2F924, 'M', '犀'), - (0x2F925, 'M', '犕'), - (0x2F926, 'M', '𤜵'), - (0x2F927, 'M', '𤠔'), - (0x2F928, 'M', '獺'), - (0x2F929, 'M', '王'), - (0x2F92A, 'M', '㺬'), - (0x2F92B, 'M', '玥'), - (0x2F92C, 'M', '㺸'), - (0x2F92E, 'M', '瑇'), - (0x2F92F, 'M', '瑜'), - (0x2F930, 'M', '瑱'), - (0x2F931, 'M', '璅'), - (0x2F932, 'M', '瓊'), - (0x2F933, 'M', '㼛'), - (0x2F934, 'M', '甤'), - (0x2F935, 'M', '𤰶'), - (0x2F936, 'M', '甾'), - (0x2F937, 'M', '𤲒'), - (0x2F938, 'M', '異'), - (0x2F939, 'M', '𢆟'), - (0x2F93A, 'M', '瘐'), - (0x2F93B, 'M', '𤾡'), - (0x2F93C, 'M', '𤾸'), - (0x2F93D, 'M', '𥁄'), - (0x2F93E, 'M', '㿼'), - (0x2F93F, 'M', '䀈'), - (0x2F940, 'M', '直'), - (0x2F941, 'M', '𥃳'), - (0x2F942, 'M', '𥃲'), - (0x2F943, 'M', '𥄙'), - (0x2F944, 'M', '𥄳'), - (0x2F945, 'M', '眞'), - (0x2F946, 'M', '真'), - (0x2F948, 'M', '睊'), - (0x2F949, 'M', '䀹'), - (0x2F94A, 'M', '瞋'), - (0x2F94B, 'M', '䁆'), - (0x2F94C, 'M', '䂖'), - (0x2F94D, 'M', '𥐝'), - (0x2F94E, 'M', '硎'), - (0x2F94F, 'M', '碌'), - (0x2F950, 'M', '磌'), - (0x2F951, 'M', '䃣'), - (0x2F952, 'M', '𥘦'), - (0x2F953, 'M', '祖'), - (0x2F954, 'M', '𥚚'), - (0x2F955, 'M', '𥛅'), - (0x2F956, 'M', '福'), - (0x2F957, 'M', '秫'), - (0x2F958, 'M', '䄯'), - (0x2F959, 'M', '穀'), - (0x2F95A, 'M', '穊'), - (0x2F95B, 'M', '穏'), - (0x2F95C, 'M', '𥥼'), - (0x2F95D, 'M', '𥪧'), - (0x2F95F, 'X'), - (0x2F960, 'M', '䈂'), - (0x2F961, 'M', '𥮫'), - (0x2F962, 'M', '篆'), - (0x2F963, 'M', '築'), - (0x2F964, 'M', '䈧'), - (0x2F965, 'M', '𥲀'), - (0x2F966, 'M', '糒'), - (0x2F967, 'M', '䊠'), - (0x2F968, 'M', '糨'), - (0x2F969, 'M', '糣'), - (0x2F96A, 'M', '紀'), - (0x2F96B, 'M', '𥾆'), - (0x2F96C, 'M', '絣'), - (0x2F96D, 'M', '䌁'), - (0x2F96E, 'M', '緇'), - (0x2F96F, 'M', '縂'), - (0x2F970, 'M', '繅'), - (0x2F971, 'M', '䌴'), - ] - -def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F972, 'M', '𦈨'), - (0x2F973, 'M', '𦉇'), - (0x2F974, 'M', '䍙'), - (0x2F975, 'M', '𦋙'), - (0x2F976, 'M', '罺'), - (0x2F977, 'M', '𦌾'), - (0x2F978, 'M', '羕'), - (0x2F979, 'M', '翺'), - (0x2F97A, 'M', '者'), - (0x2F97B, 'M', '𦓚'), - (0x2F97C, 'M', '𦔣'), - (0x2F97D, 'M', '聠'), - (0x2F97E, 'M', '𦖨'), - (0x2F97F, 'M', '聰'), - (0x2F980, 'M', '𣍟'), - (0x2F981, 'M', '䏕'), - (0x2F982, 'M', '育'), - (0x2F983, 'M', '脃'), - (0x2F984, 'M', '䐋'), - (0x2F985, 'M', '脾'), - (0x2F986, 'M', '媵'), - (0x2F987, 'M', '𦞧'), - (0x2F988, 'M', '𦞵'), - (0x2F989, 'M', '𣎓'), - (0x2F98A, 'M', '𣎜'), - (0x2F98B, 'M', '舁'), - (0x2F98C, 'M', '舄'), - (0x2F98D, 'M', '辞'), - (0x2F98E, 'M', '䑫'), - (0x2F98F, 'M', '芑'), - (0x2F990, 'M', '芋'), - (0x2F991, 'M', '芝'), - (0x2F992, 'M', '劳'), - (0x2F993, 'M', '花'), - (0x2F994, 'M', '芳'), - (0x2F995, 'M', '芽'), - (0x2F996, 'M', '苦'), - (0x2F997, 'M', '𦬼'), - (0x2F998, 'M', '若'), - (0x2F999, 'M', '茝'), - (0x2F99A, 'M', '荣'), - (0x2F99B, 'M', '莭'), - (0x2F99C, 'M', '茣'), - (0x2F99D, 'M', '莽'), - (0x2F99E, 'M', '菧'), - (0x2F99F, 'M', '著'), - (0x2F9A0, 'M', '荓'), - (0x2F9A1, 'M', '菊'), - (0x2F9A2, 'M', '菌'), - (0x2F9A3, 'M', '菜'), - (0x2F9A4, 'M', '𦰶'), - (0x2F9A5, 'M', '𦵫'), - (0x2F9A6, 'M', '𦳕'), - (0x2F9A7, 'M', '䔫'), - (0x2F9A8, 'M', '蓱'), - (0x2F9A9, 'M', '蓳'), - (0x2F9AA, 'M', '蔖'), - (0x2F9AB, 'M', '𧏊'), - (0x2F9AC, 'M', '蕤'), - (0x2F9AD, 'M', '𦼬'), - (0x2F9AE, 'M', '䕝'), - (0x2F9AF, 'M', '䕡'), - (0x2F9B0, 'M', '𦾱'), - (0x2F9B1, 'M', '𧃒'), - (0x2F9B2, 'M', '䕫'), - (0x2F9B3, 'M', '虐'), - (0x2F9B4, 'M', '虜'), - (0x2F9B5, 'M', '虧'), - (0x2F9B6, 'M', '虩'), - (0x2F9B7, 'M', '蚩'), - (0x2F9B8, 'M', '蚈'), - (0x2F9B9, 'M', '蜎'), - (0x2F9BA, 'M', '蛢'), - (0x2F9BB, 'M', '蝹'), - (0x2F9BC, 'M', '蜨'), - (0x2F9BD, 'M', '蝫'), - (0x2F9BE, 'M', '螆'), - (0x2F9BF, 'X'), - (0x2F9C0, 'M', '蟡'), - (0x2F9C1, 'M', '蠁'), - (0x2F9C2, 'M', '䗹'), - (0x2F9C3, 'M', '衠'), - (0x2F9C4, 'M', '衣'), - (0x2F9C5, 'M', '𧙧'), - (0x2F9C6, 'M', '裗'), - (0x2F9C7, 'M', '裞'), - (0x2F9C8, 'M', '䘵'), - (0x2F9C9, 'M', '裺'), - (0x2F9CA, 'M', '㒻'), - (0x2F9CB, 'M', '𧢮'), - (0x2F9CC, 'M', '𧥦'), - (0x2F9CD, 'M', '䚾'), - (0x2F9CE, 'M', '䛇'), - (0x2F9CF, 'M', '誠'), - (0x2F9D0, 'M', '諭'), - (0x2F9D1, 'M', '變'), - (0x2F9D2, 'M', '豕'), - (0x2F9D3, 'M', '𧲨'), - (0x2F9D4, 'M', '貫'), - (0x2F9D5, 'M', '賁'), - ] - -def _seg_81() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F9D6, 'M', '贛'), - (0x2F9D7, 'M', '起'), - (0x2F9D8, 'M', '𧼯'), - (0x2F9D9, 'M', '𠠄'), - (0x2F9DA, 'M', '跋'), - (0x2F9DB, 'M', '趼'), - (0x2F9DC, 'M', '跰'), - (0x2F9DD, 'M', '𠣞'), - (0x2F9DE, 'M', '軔'), - (0x2F9DF, 'M', '輸'), - (0x2F9E0, 'M', '𨗒'), - (0x2F9E1, 'M', '𨗭'), - (0x2F9E2, 'M', '邔'), - (0x2F9E3, 'M', '郱'), - (0x2F9E4, 'M', '鄑'), - (0x2F9E5, 'M', '𨜮'), - (0x2F9E6, 'M', '鄛'), - (0x2F9E7, 'M', '鈸'), - (0x2F9E8, 'M', '鋗'), - (0x2F9E9, 'M', '鋘'), - (0x2F9EA, 'M', '鉼'), - (0x2F9EB, 'M', '鏹'), - (0x2F9EC, 'M', '鐕'), - (0x2F9ED, 'M', '𨯺'), - (0x2F9EE, 'M', '開'), - (0x2F9EF, 'M', '䦕'), - (0x2F9F0, 'M', '閷'), - (0x2F9F1, 'M', '𨵷'), - (0x2F9F2, 'M', '䧦'), - (0x2F9F3, 'M', '雃'), - (0x2F9F4, 'M', '嶲'), - (0x2F9F5, 'M', '霣'), - (0x2F9F6, 'M', '𩅅'), - (0x2F9F7, 'M', '𩈚'), - (0x2F9F8, 'M', '䩮'), - (0x2F9F9, 'M', '䩶'), - (0x2F9FA, 'M', '韠'), - (0x2F9FB, 'M', '𩐊'), - (0x2F9FC, 'M', '䪲'), - (0x2F9FD, 'M', '𩒖'), - (0x2F9FE, 'M', '頋'), - (0x2FA00, 'M', '頩'), - (0x2FA01, 'M', '𩖶'), - (0x2FA02, 'M', '飢'), - (0x2FA03, 'M', '䬳'), - (0x2FA04, 'M', '餩'), - (0x2FA05, 'M', '馧'), - (0x2FA06, 'M', '駂'), - (0x2FA07, 'M', '駾'), - (0x2FA08, 'M', '䯎'), - (0x2FA09, 'M', '𩬰'), - (0x2FA0A, 'M', '鬒'), - (0x2FA0B, 'M', '鱀'), - (0x2FA0C, 'M', '鳽'), - (0x2FA0D, 'M', '䳎'), - (0x2FA0E, 'M', '䳭'), - (0x2FA0F, 'M', '鵧'), - (0x2FA10, 'M', '𪃎'), - (0x2FA11, 'M', '䳸'), - (0x2FA12, 'M', '𪄅'), - (0x2FA13, 'M', '𪈎'), - (0x2FA14, 'M', '𪊑'), - (0x2FA15, 'M', '麻'), - (0x2FA16, 'M', '䵖'), - (0x2FA17, 'M', '黹'), - (0x2FA18, 'M', '黾'), - (0x2FA19, 'M', '鼅'), - (0x2FA1A, 'M', '鼏'), - (0x2FA1B, 'M', '鼖'), - (0x2FA1C, 'M', '鼻'), - (0x2FA1D, 'M', '𪘀'), - (0x2FA1E, 'X'), - (0x30000, 'V'), - (0x3134B, 'X'), - (0x31350, 'V'), - (0x323B0, 'X'), - (0xE0100, 'I'), - (0xE01F0, 'X'), - ] - -uts46data = tuple( - _seg_0() - + _seg_1() - + _seg_2() - + _seg_3() - + _seg_4() - + _seg_5() - + _seg_6() - + _seg_7() - + _seg_8() - + _seg_9() - + _seg_10() - + _seg_11() - + _seg_12() - + _seg_13() - + _seg_14() - + _seg_15() - + _seg_16() - + _seg_17() - + _seg_18() - + _seg_19() - + _seg_20() - + _seg_21() - + _seg_22() - + _seg_23() - + _seg_24() - + _seg_25() - + _seg_26() - + _seg_27() - + _seg_28() - + _seg_29() - + _seg_30() - + _seg_31() - + _seg_32() - + _seg_33() - + _seg_34() - + _seg_35() - + _seg_36() - + _seg_37() - + _seg_38() - + _seg_39() - + _seg_40() - + _seg_41() - + _seg_42() - + _seg_43() - + _seg_44() - + _seg_45() - + _seg_46() - + _seg_47() - + _seg_48() - + _seg_49() - + _seg_50() - + _seg_51() - + _seg_52() - + _seg_53() - + _seg_54() - + _seg_55() - + _seg_56() - + _seg_57() - + _seg_58() - + _seg_59() - + _seg_60() - + _seg_61() - + _seg_62() - + _seg_63() - + _seg_64() - + _seg_65() - + _seg_66() - + _seg_67() - + _seg_68() - + _seg_69() - + _seg_70() - + _seg_71() - + _seg_72() - + _seg_73() - + _seg_74() - + _seg_75() - + _seg_76() - + _seg_77() - + _seg_78() - + _seg_79() - + _seg_80() - + _seg_81() -) # type: Tuple[Union[Tuple[int, str], Tuple[int, str, str]], ...] diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/backbone/fpn_p5.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/backbone/fpn_p5.py deleted file mode 100644 index e991f9c7be095e2a40e12c849b35e246cd0344bd..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/backbone/fpn_p5.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import math -import fvcore.nn.weight_init as weight_init -import torch.nn.functional as F -from torch import nn - -from detectron2.layers import Conv2d, ShapeSpec, get_norm - -from detectron2.modeling.backbone import Backbone -from detectron2.modeling.backbone.fpn import FPN -from detectron2.modeling.backbone.build import BACKBONE_REGISTRY -from detectron2.modeling.backbone.resnet import build_resnet_backbone - - -class LastLevelP6P7_P5(nn.Module): - """ - This module is used in RetinaNet to generate extra layers, P6 and P7 from - C5 feature. - """ - - def __init__(self, in_channels, out_channels): - super().__init__() - self.num_levels = 2 - self.in_feature = "p5" - self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) - self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) - for module in [self.p6, self.p7]: - weight_init.c2_xavier_fill(module) - - def forward(self, c5): - p6 = self.p6(c5) - p7 = self.p7(F.relu(p6)) - return [p6, p7] - - -@BACKBONE_REGISTRY.register() -def build_p67_resnet_fpn_backbone(cfg, input_shape: ShapeSpec): - """ - Args: - cfg: a detectron2 CfgNode - - Returns: - backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. - """ - bottom_up = build_resnet_backbone(cfg, input_shape) - in_features = cfg.MODEL.FPN.IN_FEATURES - out_channels = cfg.MODEL.FPN.OUT_CHANNELS - backbone = FPN( - bottom_up=bottom_up, - in_features=in_features, - out_channels=out_channels, - norm=cfg.MODEL.FPN.NORM, - top_block=LastLevelP6P7_P5(out_channels, out_channels), - fuse_type=cfg.MODEL.FPN.FUSE_TYPE, - ) - return backbone - -@BACKBONE_REGISTRY.register() -def build_p35_resnet_fpn_backbone(cfg, input_shape: ShapeSpec): - """ - Args: - cfg: a detectron2 CfgNode - - Returns: - backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. - """ - bottom_up = build_resnet_backbone(cfg, input_shape) - in_features = cfg.MODEL.FPN.IN_FEATURES - out_channels = cfg.MODEL.FPN.OUT_CHANNELS - backbone = FPN( - bottom_up=bottom_up, - in_features=in_features, - out_channels=out_channels, - norm=cfg.MODEL.FPN.NORM, - top_block=None, - fuse_type=cfg.MODEL.FPN.FUSE_TYPE, - ) - return backbone \ No newline at end of file diff --git a/spaces/Vaibhavbrkn/Question-gen/app.py b/spaces/Vaibhavbrkn/Question-gen/app.py deleted file mode 100644 index 14fd77c5e33320016a7245590983c5ed3a5726ec..0000000000000000000000000000000000000000 --- a/spaces/Vaibhavbrkn/Question-gen/app.py +++ /dev/null @@ -1,111 +0,0 @@ -import gradio as gr -import numpy as np -from keybert import KeyBERT -import random -from transformers import ( - T5ForConditionalGeneration, - T5Tokenizer, -) -import re -import transformers -import torch - - -DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -MAX_LEN = 512 - -tokenizer = T5Tokenizer.from_pretrained('t5-base') -model = T5ForConditionalGeneration.from_pretrained( - 'Vaibhavbrkn/question-gen') -mod = KeyBERT('distilbert-base-nli-mean-tokens') -model.to(DEVICE) - -context = "The Transgender Persons Bill, 2016 was hurriedly passed in the Lok Sabha, amid much outcry from the very community it claims to protect." - - -def filter_keyword(data, ran=5): - ap = [] - real = [] - res = re.sub(r'-', ' ', data) - res = re.sub(r'[^\w\s\.\,]', '', res) - for i in range(1, 4): - ap.append(mod.extract_keywords( - res, keyphrase_ngram_range=(1, i), diversity=0.7, top_n=ran*2)) - for i in range(3): - for j in range(len(ap[i])): - if ap[i][j][0].lower() in res.lower(): - real.append(ap[i][j]) - - real = sorted(real, key=lambda x: x[1], reverse=True) - ap = [] - st = "" - for i in range(len(real)): - if real[i][0] in st: - continue - else: - ap.append(real[i]) - st += real[i][0] + " " - if len(ap) == ran: - break - - return ap - - -# FOR BAD label negative or bottom 3 - -def func(context, slide): - slide = int(slide) - randomness = 0.4 - orig = int(np.ceil(randomness * slide)) - temp = slide - orig - ap = filter_keyword(context, ran=slide*2) - outputs = [] - print(slide) - print(orig) - print(ap) - for i in range(orig): - - inputs = "context: " + context + " keyword: " + ap[i][0] - source_tokenizer = tokenizer.encode_plus( - inputs, max_length=512, pad_to_max_length=True, return_tensors="pt") - outs = model.generate(input_ids=source_tokenizer['input_ids'].to( - DEVICE), attention_mask=source_tokenizer['attention_mask'].to(DEVICE), max_length=50) - dec = [tokenizer.decode(ids) for ids in outs][0] - st = dec.replace(" ", "") - st = st.replace("", "") - if ap[i][1] > 0.0: - outputs.append((st, "Good")) - else: - outputs.append((st, "Bad")) - - del ap[: orig] - print("first",outputs) - print(temp) - - if temp > 0: - for i in range(temp): - keyword = random.choice(ap) - inputs = "context: " + context + " keyword: " + keyword[0] - source_tokenizer = tokenizer.encode_plus( - inputs, max_length=512, pad_to_max_length=True, return_tensors="pt") - outs = model.generate(input_ids=source_tokenizer['input_ids'].to( - DEVICE), attention_mask=source_tokenizer['attention_mask'].to(DEVICE), max_length=50) - dec = [tokenizer.decode(ids) for ids in outs][0] - st = dec.replace(" ", "") - st = st.replace("", "") - if keyword[1] > 0.0: - outputs.append((st, "Good")) - else: - outputs.append((st, "Bad")) - print("second",outputs) - - return outputs - - -gr.Interface(func, - [ - gr.inputs.Textbox(lines=10, label="context"), - gr.inputs.Slider(minimum=1, maximum=5, - default=1, label="No of Question"), - ], - gr.outputs.KeyValues()).launch() diff --git a/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/helpers/you.py b/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/helpers/you.py deleted file mode 100644 index 02985ed14d4848c2de20a99b4771d208286a2558..0000000000000000000000000000000000000000 --- a/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/helpers/you.py +++ /dev/null @@ -1,79 +0,0 @@ -import sys -import json -import urllib.parse - -from curl_cffi import requests - -config = json.loads(sys.argv[1]) -messages = config['messages'] -prompt = '' - - -def transform(messages: list) -> list: - result = [] - i = 0 - - while i < len(messages): - if messages[i]['role'] == 'user': - question = messages[i]['content'] - i += 1 - - if i < len(messages) and messages[i]['role'] == 'assistant': - answer = messages[i]['content'] - i += 1 - else: - answer = '' - - result.append({'question': question, 'answer': answer}) - - elif messages[i]['role'] == 'assistant': - result.append({'question': '', 'answer': messages[i]['content']}) - i += 1 - - elif messages[i]['role'] == 'system': - result.append({'question': messages[i]['content'], 'answer': ''}) - i += 1 - - return result - -headers = { - 'Content-Type': 'application/x-www-form-urlencoded', - 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', - 'Sec-Fetch-Site': 'same-origin', - 'Accept-Language': 'en-GB,en;q=0.9', - 'Sec-Fetch-Mode': 'navigate', - 'Host': 'you.com', - 'Origin': 'https://you.com', - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15', - 'Referer': 'https://you.com/api/streamingSearch?q=nice&safeSearch=Moderate&onShoppingPage=false&mkt=&responseFilter=WebPages,Translations,TimeZone,Computation,RelatedSearches&domain=youchat&queryTraceId=7a6671f8-5881-404d-8ea3-c3f8301f85ba&chat=%5B%7B%22question%22%3A%22hi%22%2C%22answer%22%3A%22Hello!%20How%20can%20I%20assist%20you%20today%3F%22%7D%5D&chatId=7a6671f8-5881-404d-8ea3-c3f8301f85ba&__cf_chl_tk=ex2bw6vn5vbLsUm8J5rDYUC0Bjzc1XZqka6vUl6765A-1684108495-0-gaNycGzNDtA', - 'Connection': 'keep-alive', - 'Sec-Fetch-Dest': 'document', - 'Priority': 'u=0, i', -} - -if messages[-1]['role'] == 'user': - prompt = messages[-1]['content'] - messages = messages[:-1] - -params = urllib.parse.urlencode({ - 'q': prompt, - 'domain': 'youchat', - 'chat': transform(messages) -}) - -def output(chunk): - if b'"youChatToken"' in chunk: - chunk_json = json.loads(chunk.decode().split('data: ')[1]) - - print(chunk_json['youChatToken'], flush=True, end = '') - -while True: - try: - response = requests.get(f'https://you.com/api/streamingSearch?{params}', - headers=headers, content_callback=output, impersonate='safari15_5') - - exit(0) - - except Exception as e: - print('an error occured, retrying... |', e, flush=True) - continue \ No newline at end of file diff --git a/spaces/WhyLIM/ChatGPT-academic/crazy_functions/test_project/cpp/cppipc/policy.h b/spaces/WhyLIM/ChatGPT-academic/crazy_functions/test_project/cpp/cppipc/policy.h deleted file mode 100644 index f88ab5d8cb343f97026966b402eaeed8831e356a..0000000000000000000000000000000000000000 --- a/spaces/WhyLIM/ChatGPT-academic/crazy_functions/test_project/cpp/cppipc/policy.h +++ /dev/null @@ -1,25 +0,0 @@ -#pragma once - -#include - -#include "libipc/def.h" -#include "libipc/prod_cons.h" - -#include "libipc/circ/elem_array.h" - -namespace ipc { -namespace policy { - -template