diff --git a/spaces/0xrk/gpt2/app.py b/spaces/0xrk/gpt2/app.py deleted file mode 100644 index 4205e03f91904065e1610f7e6c7b2f1de1771184..0000000000000000000000000000000000000000 --- a/spaces/0xrk/gpt2/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/gpt2").launch() \ No newline at end of file diff --git a/spaces/101-5/gpt4free/g4f/Provider/Providers/ChatgptLogin.py b/spaces/101-5/gpt4free/g4f/Provider/Providers/ChatgptLogin.py deleted file mode 100644 index 9551d15dd5121c4b42f80d0ba547a10f0868563b..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/Provider/Providers/ChatgptLogin.py +++ /dev/null @@ -1,96 +0,0 @@ -import os -from ...typing import sha256, Dict, get_type_hints -import requests -import re -import base64 - -url = 'https://chatgptlogin.ac' -model = ['gpt-3.5-turbo'] -supports_stream = False -needs_auth = False - - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - def get_nonce(): - res = requests.get('https://chatgptlogin.ac/use-chatgpt-free/', headers={ - "Referer": "https://chatgptlogin.ac/use-chatgpt-free/", - "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36' - }) - - src = re.search(r'class="mwai-chat mwai-chatgpt">.*Send - - - -

- - Gradio-lite (Gradio running entirely in your browser!) -

-

Try it out! Once the Gradio app loads (can take 10-15 seconds), disconnect your Wifi and the machine learning model will still work!

- - - -transformers_js_py - - - -from transformers_js import import_transformers_js -import gradio as gr - -transformers = await import_transformers_js() -pipeline = transformers.pipeline -pipe = await pipeline('sentiment-analysis') - -async def classify(text): - return await pipe(text) - -demo = gr.Interface(classify, "textbox", "json", examples=["It's a happy day in the neighborhood", "I'm an evil penguin", "It wasn't a bad film."]) -demo.launch() - - - - - \ No newline at end of file diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/drivers/xaudio2/adaptation.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/drivers/xaudio2/adaptation.py deleted file mode 100644 index 0287edf478b6d825d7edba8b1e9532c25c294b8e..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/drivers/xaudio2/adaptation.py +++ /dev/null @@ -1,348 +0,0 @@ -import math - -import pyglet -from pyglet.media.drivers.base import AbstractAudioDriver, AbstractAudioPlayer, MediaEvent -from pyglet.media.drivers.listener import AbstractListener -from pyglet.util import debug_print -from . import interface - -_debug = debug_print('debug_media') - - -def _convert_coordinates(coordinates): - x, y, z = coordinates - return x, y, -z - - -class XAudio2AudioPlayer(AbstractAudioPlayer): - # Need to cache these because pyglet API allows update separately, but - # DSound requires both to be set at once. - _cone_inner_angle = 360 - _cone_outer_angle = 360 - - min_buffer_size = 9600 - - max_buffer_count = 3 # Max in queue at once, increasing may impact performance depending on buffer size. - - def __init__(self, driver, xa2_driver, source, player): - super(XAudio2AudioPlayer, self).__init__(source, player) - # We keep here a strong reference because the AudioDriver is anyway - # a singleton object which will only be deleted when the application - # shuts down. The AudioDriver does not keep a ref to the AudioPlayer. - self.driver = driver - self._xa2_driver = xa2_driver - - # If cleared, we need to check when it's done clearing. - self._flushing = False - - # If deleted, we need to make sure it's done deleting. - self._deleted = False - - # Desired play state (may be actually paused due to underrun -- not - # implemented yet). - self._playing = False - - # Theoretical write and play cursors for an infinite buffer. play - # cursor is always <= write cursor (when equal, underrun is - # happening). - self._write_cursor = 0 - self._play_cursor = 0 - - # List of (play_cursor, MediaEvent), in sort order - self._events = [] - - # List of (cursor, timestamp), in sort order (cursor gives expiry - # place of the timestamp) - self._timestamps = [] - - # This will be True if the last buffer has already been submitted. - self.buffer_end_submitted = False - - self._buffers = [] # Current buffers in queue waiting to be played. - - self._xa2_source_voice = self._xa2_driver.get_source_voice(source, self) - - self._buffer_size = int(source.audio_format.sample_rate * 2) - - def on_driver_destroy(self): - self.stop() - self._xa2_source_voice = None - - def on_driver_reset(self): - self._xa2_source_voice = self._xa2_driver.get_source_voice(self.source, self) - - # Queue up any buffers that are still in queue but weren't deleted. This does not pickup where the last sample - # played, only where the last buffer was submitted. As such it's possible for audio to be replayed if buffer is - # large enough. - for cx2_buffer in self._buffers: - self._xa2_source_voice.submit_buffer(cx2_buffer) - - def __del__(self): - if self._xa2_source_voice: - self._xa2_source_voice = None - - def delete(self): - """Called from Player. Docs says to cleanup resources, but other drivers wait for GC to do it?""" - if self._xa2_source_voice: - self._deleted = True - - if not self._buffers: - self._xa2_driver.return_voice(self._xa2_source_voice) - - - def play(self): - assert _debug('XAudio2 play') - - if not self._playing: - self._playing = True - if not self._flushing: - self._xa2_source_voice.play() - - assert _debug('return XAudio2 play') - - def stop(self): - assert _debug('XAudio2 stop') - - if self._playing: - self._playing = False - self.buffer_end_submitted = False - self._xa2_source_voice.stop() - - assert _debug('return XAudio2 stop') - - def clear(self): - assert _debug('XAudio2 clear') - super(XAudio2AudioPlayer, self).clear() - self._play_cursor = 0 - self._write_cursor = 0 - self.buffer_end_submitted = False - self._deleted = False - - if self._buffers: - self._flushing = True - - self._xa2_source_voice.flush() - self._buffers.clear() - del self._events[:] - del self._timestamps[:] - - def _restart(self, dt): - """Prefill audio and attempt to replay audio.""" - if self._playing and self._xa2_source_voice: - self.refill_source_player() - self._xa2_source_voice.play() - - def refill_source_player(self): - """Obtains audio data from the source, puts it into a buffer to submit to the voice. - Unlike the other drivers this does not carve pieces of audio from the buffer and slowly - consume it. This submits the buffer retrieved from the decoder in it's entirety. - """ - - buffers_queued = self._xa2_source_voice.buffers_queued - - # Free any buffers that have ended. - while len(self._buffers) > buffers_queued: - # Clean out any buffers that have played. - buffer = self._buffers.pop(0) - self._play_cursor += buffer.AudioBytes - del buffer # Does this remove AudioData within the buffer? Let GC remove or explicit remove? - - # We have to wait for all of the buffers we are flushing to end before we restart next buffer. - # When voice reaches 0 buffers, it is available for re-use. - if self._flushing: - if buffers_queued == 0: - self._flushing = False - - # This is required because the next call to play will come before all flushes are done. - # Restart at next available opportunity. - pyglet.clock.schedule_once(self._restart, 0) - return - - if self._deleted: - if buffers_queued == 0: - self._deleted = False - self._xa2_driver.return_voice(self._xa2_source_voice) - return - - # Wait for the playback to hit 0 buffers before we eos. - if self.buffer_end_submitted: - if buffers_queued == 0: - self._xa2_source_voice.stop() - MediaEvent("on_eos").sync_dispatch_to_player(self.player) - else: - current_buffers = [] - while buffers_queued < self.max_buffer_count: - audio_data = self.source.get_audio_data(self._buffer_size, 0.0) - if audio_data: - assert _debug( - 'Xaudio2: audio data - length: {}, duration: {}, buffer size: {}'.format(audio_data.length, - audio_data.duration, - self._buffer_size)) - - if audio_data.length == 0: # Sometimes audio data has 0 length at the front? - continue - - x2_buffer = self._xa2_driver.create_buffer(audio_data) - - current_buffers.append(x2_buffer) - - self._write_cursor += x2_buffer.AudioBytes # We've pushed this many bytes into the source player. - - self._add_audiodata_events(audio_data) - self._add_audiodata_timestamp(audio_data) - - buffers_queued += 1 - else: - # End of audio data, set last packet as end. - self.buffer_end_submitted = True - break - - # We submit the buffers here, just in-case the end of stream was found. - for cx2_buffer in current_buffers: - self._xa2_source_voice.submit_buffer(cx2_buffer) - - # Store buffers temporarily, otherwise they get GC'd. - self._buffers.extend(current_buffers) - - self._dispatch_pending_events() - - def _dispatch_new_event(self, event_name): - MediaEvent(event_name).sync_dispatch_to_player(self.player) - - def _add_audiodata_events(self, audio_data): - for event in audio_data.events: - event_cursor = self._write_cursor + event.timestamp * self.source.audio_format.bytes_per_second - assert _debug('Adding event', event, 'at', event_cursor) - self._events.append((event_cursor, event)) - - def _add_audiodata_timestamp(self, audio_data): - ts_cursor = self._write_cursor + audio_data.length - self._timestamps.append( - (ts_cursor, audio_data.timestamp + audio_data.duration)) - - def _dispatch_pending_events(self): - pending_events = [] - while self._events and self._events[0][0] <= self._play_cursor: - _, event = self._events.pop(0) - pending_events.append(event) - - assert _debug('Dispatching pending events: {}'.format(pending_events)) - assert _debug('Remaining events: {}'.format(self._events)) - - for event in pending_events: - event._sync_dispatch_to_player(self.player) - - def _cleanup_timestamps(self): - while self._timestamps and self._timestamps[0][0] < self._play_cursor: - del self._timestamps[0] - - def get_time(self): - self.update_play_cursor() - if self._timestamps: - cursor, ts = self._timestamps[0] - result = ts + (self._play_cursor - cursor) / float(self.source.audio_format.bytes_per_second) - else: - result = None - - return result - - def set_volume(self, volume): - self._xa2_source_voice.volume = volume - - def set_position(self, position): - if self._xa2_source_voice.is_emitter: - self._xa2_source_voice.position = _convert_coordinates(position) - - def set_min_distance(self, min_distance): - """Not a true min distance, but similar effect. Changes CurveDistanceScaler default is 1.""" - if self._xa2_source_voice.is_emitter: - self._xa2_source_voice.distance_scaler = min_distance - - def set_max_distance(self, max_distance): - """No such thing built into xaudio2""" - return - - def set_pitch(self, pitch): - self._xa2_source_voice.frequency = pitch - - def set_cone_orientation(self, cone_orientation): - if self._xa2_source_voice.is_emitter: - self._xa2_source_voice.cone_orientation = _convert_coordinates(cone_orientation) - - def set_cone_inner_angle(self, cone_inner_angle): - if self._xa2_source_voice.is_emitter: - self._cone_inner_angle = int(cone_inner_angle) - self._set_cone_angles() - - def set_cone_outer_angle(self, cone_outer_angle): - if self._xa2_source_voice.is_emitter: - self._cone_outer_angle = int(cone_outer_angle) - self._set_cone_angles() - - def _set_cone_angles(self): - inner = min(self._cone_inner_angle, self._cone_outer_angle) - outer = max(self._cone_inner_angle, self._cone_outer_angle) - self._xa2_source_voice.set_cone_angles(math.radians(inner), math.radians(outer)) - - def set_cone_outer_gain(self, cone_outer_gain): - if self._xa2_source_voice.is_emitter: - self._xa2_source_voice.cone_outside_volume = cone_outer_gain - - def prefill_audio(self): - # Cannot refill during a flush. Schedule will handle it. - if not self._flushing: - self.refill_source_player() - - -class XAudio2Driver(AbstractAudioDriver): - def __init__(self): - self._xa2_driver = interface.XAudio2Driver() - self._xa2_listener = self._xa2_driver.create_listener() - - assert self._xa2_driver is not None - assert self._xa2_listener is not None - - def __del__(self): - self.delete() - - def get_performance(self): - assert self._xa2_driver is not None - return self._xa2_driver.get_performance() - - def create_audio_player(self, source, player): - assert self._xa2_driver is not None - return XAudio2AudioPlayer(self, self._xa2_driver, source, player) - - def get_listener(self): - assert self._xa2_driver is not None - assert self._xa2_listener is not None - return XAudio2Listener(self._xa2_listener, self._xa2_driver) - - def delete(self): - self._xa2_listener = None - - -class XAudio2Listener(AbstractListener): - def __init__(self, xa2_listener, xa2_driver): - self._xa2_listener = xa2_listener - self._xa2_driver = xa2_driver - - def _set_volume(self, volume): - self._volume = volume - self._xa2_driver.volume = volume - - def _set_position(self, position): - self._position = position - self._xa2_listener.position = _convert_coordinates(position) - - def _set_forward_orientation(self, orientation): - self._forward_orientation = orientation - self._set_orientation() - - def _set_up_orientation(self, orientation): - self._up_orientation = orientation - self._set_orientation() - - def _set_orientation(self): - self._xa2_listener.orientation = _convert_coordinates(self._forward_orientation) + _convert_coordinates( - self._up_orientation) diff --git a/spaces/aditi2222/Title_generation/README.md b/spaces/aditi2222/Title_generation/README.md deleted file mode 100644 index 54164318e06f3941fb80f5e743ab3b3de0a50cf3..0000000000000000000000000000000000000000 --- a/spaces/aditi2222/Title_generation/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Title_generation -emoji: 🌍 -colorFrom: blue -colorTo: purple -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/ai-create/re-generic/README.md b/spaces/ai-create/re-generic/README.md deleted file mode 100644 index 24f2bd0418cf962d6c2bfea9eb92e12c4b786cd3..0000000000000000000000000000000000000000 --- a/spaces/ai-create/re-generic/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Re Generic -emoji: 🦀 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.21.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/akhaliq/Real-Time-Voice-Cloning/synthesizer_preprocess_embeds.py b/spaces/akhaliq/Real-Time-Voice-Cloning/synthesizer_preprocess_embeds.py deleted file mode 100644 index 94f864d5d3c36c6177b211f5818e7c920a41cd8c..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Real-Time-Voice-Cloning/synthesizer_preprocess_embeds.py +++ /dev/null @@ -1,25 +0,0 @@ -from synthesizer.preprocess import create_embeddings -from utils.argutils import print_args -from pathlib import Path -import argparse - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Creates embeddings for the synthesizer from the LibriSpeech utterances.", - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument("synthesizer_root", type=Path, help=\ - "Path to the synthesizer training data that contains the audios and the train.txt file. " - "If you let everything as default, it should be /SV2TTS/synthesizer/.") - parser.add_argument("-e", "--encoder_model_fpath", type=Path, - default="encoder/saved_models/pretrained.pt", help=\ - "Path your trained encoder model.") - parser.add_argument("-n", "--n_processes", type=int, default=4, help= \ - "Number of parallel processes. An encoder is created for each, so you may need to lower " - "this value on GPUs with low memory. Set it to 1 if CUDA is unhappy.") - args = parser.parse_args() - - # Preprocess the dataset - print_args(args, parser) - create_embeddings(**vars(args)) diff --git a/spaces/akhaliq/genji-python-6b/README.md b/spaces/akhaliq/genji-python-6b/README.md deleted file mode 100644 index 349ddae8404d566d206d760a18a8fff65c1adb9f..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/genji-python-6b/README.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Genji Python 6b -emoji: 📉 -colorFrom: purple -colorTo: indigo -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/algomuffin/jojo_fork/e4e/datasets/inference_dataset.py b/spaces/algomuffin/jojo_fork/e4e/datasets/inference_dataset.py deleted file mode 100644 index fb577d7b538d634f27013c2784d2ea32143154cb..0000000000000000000000000000000000000000 --- a/spaces/algomuffin/jojo_fork/e4e/datasets/inference_dataset.py +++ /dev/null @@ -1,25 +0,0 @@ -from torch.utils.data import Dataset -from PIL import Image -from utils import data_utils - - -class InferenceDataset(Dataset): - - def __init__(self, root, opts, transform=None, preprocess=None): - self.paths = sorted(data_utils.make_dataset(root)) - self.transform = transform - self.preprocess = preprocess - self.opts = opts - - def __len__(self): - return len(self.paths) - - def __getitem__(self, index): - from_path = self.paths[index] - if self.preprocess is not None: - from_im = self.preprocess(from_path) - else: - from_im = Image.open(from_path).convert('RGB') - if self.transform: - from_im = self.transform(from_im) - return from_im diff --git a/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/pyrouge/__init__.py b/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/pyrouge/__init__.py deleted file mode 100644 index 33887bae42068a74630432946a2e16d765b6d3e1..0000000000000000000000000000000000000000 --- a/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/pyrouge/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .Rouge155 import Rouge155 diff --git a/spaces/aliceoq/vozes-da-loirinha/lib/uvr5_pack/lib_v5/layers_new.py b/spaces/aliceoq/vozes-da-loirinha/lib/uvr5_pack/lib_v5/layers_new.py deleted file mode 100644 index 0c13e60b0dd136d9115a535101c6dbb2a25c6833..0000000000000000000000000000000000000000 --- a/spaces/aliceoq/vozes-da-loirinha/lib/uvr5_pack/lib_v5/layers_new.py +++ /dev/null @@ -1,125 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, stride, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ) - - def __call__(self, x): - h = self.conv1(x) - h = self.conv2(h) - - return h - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - # self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - - h = self.conv1(x) - # h = self.conv2(h) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 12), activ=nn.ReLU, dropout=False): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ) - self.conv3 = Conv2DBNActiv( - nin, nout, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = Conv2DBNActiv( - nin, nout, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = Conv2DBNActiv( - nin, nout, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = Conv2DBNActiv(nout * 5, nout, 1, 1, 0, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) - out = self.bottleneck(out) - - if self.dropout is not None: - out = self.dropout(out) - - return out - - -class LSTMModule(nn.Module): - def __init__(self, nin_conv, nin_lstm, nout_lstm): - super(LSTMModule, self).__init__() - self.conv = Conv2DBNActiv(nin_conv, 1, 1, 1, 0) - self.lstm = nn.LSTM( - input_size=nin_lstm, hidden_size=nout_lstm // 2, bidirectional=True - ) - self.dense = nn.Sequential( - nn.Linear(nout_lstm, nin_lstm), nn.BatchNorm1d(nin_lstm), nn.ReLU() - ) - - def forward(self, x): - N, _, nbins, nframes = x.size() - h = self.conv(x)[:, 0] # N, nbins, nframes - h = h.permute(2, 0, 1) # nframes, N, nbins - h, _ = self.lstm(h) - h = self.dense(h.reshape(-1, h.size()[-1])) # nframes * N, nbins - h = h.reshape(nframes, N, 1, nbins) - h = h.permute(1, 2, 3, 0) - - return h diff --git a/spaces/aliceoq/vozes-da-loirinha/lib/uvr5_pack/lib_v5/nets_537227KB.py b/spaces/aliceoq/vozes-da-loirinha/lib/uvr5_pack/lib_v5/nets_537227KB.py deleted file mode 100644 index a1bb530e006482704f234c2e739a695174142941..0000000000000000000000000000000000000000 --- a/spaces/aliceoq/vozes-da-loirinha/lib/uvr5_pack/lib_v5/nets_537227KB.py +++ /dev/null @@ -1,123 +0,0 @@ -import torch -import numpy as np -from torch import nn -import torch.nn.functional as F - -from . import layers_537238KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 64) - self.stg1_high_band_net = BaseASPPNet(2, 64) - - self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(32, 64) - - self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(64, 128) - - self.out = nn.Conv2d(128, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(64, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(64, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/spaces/anubhavmaity/minima/app.py b/spaces/anubhavmaity/minima/app.py deleted file mode 100644 index 3703e2db0009fea1686d779101b431c47248e5e9..0000000000000000000000000000000000000000 --- a/spaces/anubhavmaity/minima/app.py +++ /dev/null @@ -1,7 +0,0 @@ -import gradio as gr - -def greet(name): - return "Hello " + name + "!!" - -iface = gr.Interface(fn=greet, inputs="text", outputs="text") -iface.launch() diff --git a/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/rich.py b/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/rich.py deleted file mode 100644 index 745d8c8bc41116fe1ead73e18569c075a03450e1..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/rich.py +++ /dev/null @@ -1,2 +0,0 @@ -from rich.console import Console -console = Console() \ No newline at end of file diff --git a/spaces/aodianyun/stable-diffusion-webui/javascript/imageviewer.js b/spaces/aodianyun/stable-diffusion-webui/javascript/imageviewer.js deleted file mode 100644 index aac2ee82383881bd9d59a264d2cd2c823c2187c4..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/stable-diffusion-webui/javascript/imageviewer.js +++ /dev/null @@ -1,285 +0,0 @@ -// A full size 'lightbox' preview modal shown when left clicking on gallery previews -function closeModal() { - gradioApp().getElementById("lightboxModal").style.display = "none"; -} - -function showModal(event) { - const source = event.target || event.srcElement; - const modalImage = gradioApp().getElementById("modalImage") - const lb = gradioApp().getElementById("lightboxModal") - modalImage.src = source.src - if (modalImage.style.display === 'none') { - lb.style.setProperty('background-image', 'url(' + source.src + ')'); - } - lb.style.display = "block"; - lb.focus() - - const tabTxt2Img = gradioApp().getElementById("tab_txt2img") - const tabImg2Img = gradioApp().getElementById("tab_img2img") - // show the save button in modal only on txt2img or img2img tabs - if (tabTxt2Img.style.display != "none" || tabImg2Img.style.display != "none") { - gradioApp().getElementById("modal_save").style.display = "inline" - } else { - gradioApp().getElementById("modal_save").style.display = "none" - } - event.stopPropagation() -} - -function negmod(n, m) { - return ((n % m) + m) % m; -} - -function updateOnBackgroundChange() { - const modalImage = gradioApp().getElementById("modalImage") - if (modalImage && modalImage.offsetParent) { - let allcurrentButtons = gradioApp().querySelectorAll(".gallery-item.transition-all.\\!ring-2") - let currentButton = null - allcurrentButtons.forEach(function(elem) { - if (elem.parentElement.offsetParent) { - currentButton = elem; - } - }) - - if (currentButton?.children?.length > 0 && modalImage.src != currentButton.children[0].src) { - modalImage.src = currentButton.children[0].src; - if (modalImage.style.display === 'none') { - modal.style.setProperty('background-image', `url(${modalImage.src})`) - } - } - } -} - -function modalImageSwitch(offset) { - var allgalleryButtons = gradioApp().querySelectorAll(".gallery-item.transition-all") - var galleryButtons = [] - allgalleryButtons.forEach(function(elem) { - if (elem.parentElement.offsetParent) { - galleryButtons.push(elem); - } - }) - - if (galleryButtons.length > 1) { - var allcurrentButtons = gradioApp().querySelectorAll(".gallery-item.transition-all.\\!ring-2") - var currentButton = null - allcurrentButtons.forEach(function(elem) { - if (elem.parentElement.offsetParent) { - currentButton = elem; - } - }) - - var result = -1 - galleryButtons.forEach(function(v, i) { - if (v == currentButton) { - result = i - } - }) - - if (result != -1) { - nextButton = galleryButtons[negmod((result + offset), galleryButtons.length)] - nextButton.click() - const modalImage = gradioApp().getElementById("modalImage"); - const modal = gradioApp().getElementById("lightboxModal"); - modalImage.src = nextButton.children[0].src; - if (modalImage.style.display === 'none') { - modal.style.setProperty('background-image', `url(${modalImage.src})`) - } - setTimeout(function() { - modal.focus() - }, 10) - } - } -} - -function saveImage(){ - const tabTxt2Img = gradioApp().getElementById("tab_txt2img") - const tabImg2Img = gradioApp().getElementById("tab_img2img") - const saveTxt2Img = "save_txt2img" - const saveImg2Img = "save_img2img" - if (tabTxt2Img.style.display != "none") { - gradioApp().getElementById(saveTxt2Img).click() - } else if (tabImg2Img.style.display != "none") { - gradioApp().getElementById(saveImg2Img).click() - } else { - console.error("missing implementation for saving modal of this type") - } -} - -function modalSaveImage(event) { - saveImage() - event.stopPropagation() -} - -function modalNextImage(event) { - modalImageSwitch(1) - event.stopPropagation() -} - -function modalPrevImage(event) { - modalImageSwitch(-1) - event.stopPropagation() -} - -function modalKeyHandler(event) { - switch (event.key) { - case "s": - saveImage() - break; - case "ArrowLeft": - modalPrevImage(event) - break; - case "ArrowRight": - modalNextImage(event) - break; - case "Escape": - closeModal(); - break; - } -} - -function showGalleryImage() { - setTimeout(function() { - fullImg_preview = gradioApp().querySelectorAll('img.w-full.object-contain') - - if (fullImg_preview != null) { - fullImg_preview.forEach(function function_name(e) { - if (e.dataset.modded) - return; - e.dataset.modded = true; - if(e && e.parentElement.tagName == 'DIV'){ - e.style.cursor='pointer' - e.style.userSelect='none' - - var isFirefox = isFirefox = navigator.userAgent.toLowerCase().indexOf('firefox') > -1 - - // For Firefox, listening on click first switched to next image then shows the lightbox. - // If you know how to fix this without switching to mousedown event, please. - // For other browsers the event is click to make it possiblr to drag picture. - var event = isFirefox ? 'mousedown' : 'click' - - e.addEventListener(event, function (evt) { - if(!opts.js_modal_lightbox || evt.button != 0) return; - modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed) - evt.preventDefault() - showModal(evt) - }, true); - } - }); - } - - }, 100); -} - -function modalZoomSet(modalImage, enable) { - if (enable) { - modalImage.classList.add('modalImageFullscreen'); - } else { - modalImage.classList.remove('modalImageFullscreen'); - } -} - -function modalZoomToggle(event) { - modalImage = gradioApp().getElementById("modalImage"); - modalZoomSet(modalImage, !modalImage.classList.contains('modalImageFullscreen')) - event.stopPropagation() -} - -function modalTileImageToggle(event) { - const modalImage = gradioApp().getElementById("modalImage"); - const modal = gradioApp().getElementById("lightboxModal"); - const isTiling = modalImage.style.display === 'none'; - if (isTiling) { - modalImage.style.display = 'block'; - modal.style.setProperty('background-image', 'none') - } else { - modalImage.style.display = 'none'; - modal.style.setProperty('background-image', `url(${modalImage.src})`) - } - - event.stopPropagation() -} - -function galleryImageHandler(e) { - if (e && e.parentElement.tagName == 'BUTTON') { - e.onclick = showGalleryImage; - } -} - -onUiUpdate(function() { - fullImg_preview = gradioApp().querySelectorAll('img.w-full') - if (fullImg_preview != null) { - fullImg_preview.forEach(galleryImageHandler); - } - updateOnBackgroundChange(); -}) - -document.addEventListener("DOMContentLoaded", function() { - const modalFragment = document.createDocumentFragment(); - const modal = document.createElement('div') - modal.onclick = closeModal; - modal.id = "lightboxModal"; - modal.tabIndex = 0 - modal.addEventListener('keydown', modalKeyHandler, true) - - const modalControls = document.createElement('div') - modalControls.className = 'modalControls gradio-container'; - modal.append(modalControls); - - const modalZoom = document.createElement('span') - modalZoom.className = 'modalZoom cursor'; - modalZoom.innerHTML = '⤡' - modalZoom.addEventListener('click', modalZoomToggle, true) - modalZoom.title = "Toggle zoomed view"; - modalControls.appendChild(modalZoom) - - const modalTileImage = document.createElement('span') - modalTileImage.className = 'modalTileImage cursor'; - modalTileImage.innerHTML = '⊞' - modalTileImage.addEventListener('click', modalTileImageToggle, true) - modalTileImage.title = "Preview tiling"; - modalControls.appendChild(modalTileImage) - - const modalSave = document.createElement("span") - modalSave.className = "modalSave cursor" - modalSave.id = "modal_save" - modalSave.innerHTML = "🖫" - modalSave.addEventListener("click", modalSaveImage, true) - modalSave.title = "Save Image(s)" - modalControls.appendChild(modalSave) - - const modalClose = document.createElement('span') - modalClose.className = 'modalClose cursor'; - modalClose.innerHTML = '×' - modalClose.onclick = closeModal; - modalClose.title = "Close image viewer"; - modalControls.appendChild(modalClose) - - const modalImage = document.createElement('img') - modalImage.id = 'modalImage'; - modalImage.onclick = closeModal; - modalImage.tabIndex = 0 - modalImage.addEventListener('keydown', modalKeyHandler, true) - modal.appendChild(modalImage) - - const modalPrev = document.createElement('a') - modalPrev.className = 'modalPrev'; - modalPrev.innerHTML = '❮' - modalPrev.tabIndex = 0 - modalPrev.addEventListener('click', modalPrevImage, true); - modalPrev.addEventListener('keydown', modalKeyHandler, true) - modal.appendChild(modalPrev) - - const modalNext = document.createElement('a') - modalNext.className = 'modalNext'; - modalNext.innerHTML = '❯' - modalNext.tabIndex = 0 - modalNext.addEventListener('click', modalNextImage, true); - modalNext.addEventListener('keydown', modalKeyHandler, true) - - modal.appendChild(modalNext) - - - gradioApp().getRootNode().appendChild(modal) - - document.body.appendChild(modalFragment); - -}); diff --git a/spaces/arch-123/bingo/src/components/chat-notification.tsx b/spaces/arch-123/bingo/src/components/chat-notification.tsx deleted file mode 100644 index 3474e522992c43a4d1d0eadcf205a9760d5b930b..0000000000000000000000000000000000000000 --- a/spaces/arch-123/bingo/src/components/chat-notification.tsx +++ /dev/null @@ -1,91 +0,0 @@ -import { useEffect } from 'react' -import Image from 'next/image' - -import IconWarning from '@/assets/images/warning.svg' -import { ChatError, ErrorCode, ChatMessageModel } from '@/lib/bots/bing/types' -import { ExternalLink } from './external-link' -import { useBing } from '@/lib/hooks/use-bing' - -export interface ChatNotificationProps extends Pick, 'bot'> { - message?: ChatMessageModel -} - -function getAction(error: ChatError, reset: () => void) { - if (error.code === ErrorCode.THROTTLE_LIMIT) { - reset() - return ( -
- 你已达到每日最大发送消息次数,请更换账号或隔一天后重试 -
- ) - } - if (error.code === ErrorCode.BING_IP_FORBIDDEN) { - return ( - - 你的服务器或代理已被封禁,请更换服务器或使用代理重试 - - ) - } - if (error.code === ErrorCode.BING_TRY_LATER) { - return ( - - 创建会话失败,请稍候重试 - - ) - } - if (error.code === ErrorCode.BING_FORBIDDEN) { - return ( - - 你的账号已在黑名单,请尝试更换账号及申请解封 - - ) - } - if (error.code === ErrorCode.CONVERSATION_LIMIT) { - return ( -
- 当前话题已中止,请点 - 重新开始 - 开启新的对话 -
- ) - } - if (error.code === ErrorCode.BING_CAPTCHA) { - return ( - - 点击通过人机验证 - - ) - } - if (error.code === ErrorCode.BING_UNAUTHORIZED) { - reset() - return ( - 没有获取到身份信息或身份信息失效,点此重新设置 - ) - } - return error.message -} - -export function ChatNotification({ message, bot }: ChatNotificationProps) { - useEffect(() => { - window.scrollBy(0, 2000) - }, [message]) - - if (!message?.error) return - - return ( -
-
-
-
-
- error - {getAction(message.error, () => bot.resetConversation())} -
-
-
-
-
- ) -} diff --git a/spaces/artificialguybr/video-dubbing/TTS/tests/aux_tests/test_numpy_transforms.py b/spaces/artificialguybr/video-dubbing/TTS/tests/aux_tests/test_numpy_transforms.py deleted file mode 100644 index 00597a0f88038e97ace965234703f43fad872d0f..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/tests/aux_tests/test_numpy_transforms.py +++ /dev/null @@ -1,106 +0,0 @@ -import math -import os -import unittest -from dataclasses import dataclass - -import librosa -import numpy as np -from coqpit import Coqpit - -from tests import get_tests_input_path, get_tests_output_path, get_tests_path -from TTS.utils.audio import numpy_transforms as np_transforms - -TESTS_PATH = get_tests_path() -OUT_PATH = os.path.join(get_tests_output_path(), "audio_tests") -WAV_FILE = os.path.join(get_tests_input_path(), "example_1.wav") - -os.makedirs(OUT_PATH, exist_ok=True) - - -# pylint: disable=no-self-use - - -class TestNumpyTransforms(unittest.TestCase): - def setUp(self) -> None: - @dataclass - class AudioConfig(Coqpit): - sample_rate: int = 22050 - fft_size: int = 1024 - num_mels: int = 256 - mel_fmax: int = 1800 - mel_fmin: int = 0 - hop_length: int = 256 - win_length: int = 1024 - pitch_fmax: int = 640 - pitch_fmin: int = 1 - trim_db: int = -1 - min_silence_sec: float = 0.01 - gain: float = 1.0 - base: float = 10.0 - - self.config = AudioConfig() - self.sample_wav, _ = librosa.load(WAV_FILE, sr=self.config.sample_rate) - - def test_build_mel_basis(self): - """Check if the mel basis is correctly built""" - print(" > Testing mel basis building.") - mel_basis = np_transforms.build_mel_basis(**self.config) - self.assertEqual(mel_basis.shape, (self.config.num_mels, self.config.fft_size // 2 + 1)) - - def test_millisec_to_length(self): - """Check if the conversion from milliseconds to length is correct""" - print(" > Testing millisec to length conversion.") - win_len, hop_len = np_transforms.millisec_to_length( - frame_length_ms=1000, frame_shift_ms=12.5, sample_rate=self.config.sample_rate - ) - self.assertEqual(hop_len, int(12.5 / 1000.0 * self.config.sample_rate)) - self.assertEqual(win_len, self.config.sample_rate) - - def test_amplitude_db_conversion(self): - di = np.random.rand(11) - o1 = np_transforms.amp_to_db(x=di, gain=1.0, base=10) - o2 = np_transforms.db_to_amp(x=o1, gain=1.0, base=10) - np.testing.assert_almost_equal(di, o2, decimal=5) - - def test_preemphasis_deemphasis(self): - di = np.random.rand(11) - o1 = np_transforms.preemphasis(x=di, coeff=0.95) - o2 = np_transforms.deemphasis(x=o1, coeff=0.95) - np.testing.assert_almost_equal(di, o2, decimal=5) - - def test_spec_to_mel(self): - mel_basis = np_transforms.build_mel_basis(**self.config) - spec = np.random.rand(self.config.fft_size // 2 + 1, 20) # [C, T] - mel = np_transforms.spec_to_mel(spec=spec, mel_basis=mel_basis) - self.assertEqual(mel.shape, (self.config.num_mels, 20)) - - def mel_to_spec(self): - mel_basis = np_transforms.build_mel_basis(**self.config) - mel = np.random.rand(self.config.num_mels, 20) # [C, T] - spec = np_transforms.mel_to_spec(mel=mel, mel_basis=mel_basis) - self.assertEqual(spec.shape, (self.config.fft_size // 2 + 1, 20)) - - def test_wav_to_spec(self): - spec = np_transforms.wav_to_spec(wav=self.sample_wav, **self.config) - self.assertEqual( - spec.shape, (self.config.fft_size // 2 + 1, math.ceil(self.sample_wav.shape[0] / self.config.hop_length)) - ) - - def test_wav_to_mel(self): - mel_basis = np_transforms.build_mel_basis(**self.config) - mel = np_transforms.wav_to_mel(wav=self.sample_wav, mel_basis=mel_basis, **self.config) - self.assertEqual( - mel.shape, (self.config.num_mels, math.ceil(self.sample_wav.shape[0] / self.config.hop_length)) - ) - - def test_compute_f0(self): - pitch = np_transforms.compute_f0(x=self.sample_wav, **self.config) - mel_basis = np_transforms.build_mel_basis(**self.config) - mel = np_transforms.wav_to_mel(wav=self.sample_wav, mel_basis=mel_basis, **self.config) - assert pitch.shape[0] == mel.shape[1] - - def test_load_wav(self): - wav = np_transforms.load_wav(filename=WAV_FILE, resample=False, sample_rate=22050) - wav_resample = np_transforms.load_wav(filename=WAV_FILE, resample=True, sample_rate=16000) - self.assertEqual(wav.shape, (self.sample_wav.shape[0],)) - self.assertNotEqual(wav_resample.shape, (self.sample_wav.shape[0],)) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/PublicKey/RSA.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/PublicKey/RSA.py deleted file mode 100644 index 0f5e58924acb3e3094e1cef154bdf1e57679eb0e..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/PublicKey/RSA.py +++ /dev/null @@ -1,802 +0,0 @@ -# -*- coding: utf-8 -*- -# =================================================================== -# -# Copyright (c) 2016, Legrandin -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# =================================================================== - -__all__ = ['generate', 'construct', 'import_key', - 'RsaKey', 'oid'] - -import binascii -import struct - -from Crypto import Random -from Crypto.Util.py3compat import tobytes, bord, tostr -from Crypto.Util.asn1 import DerSequence, DerNull - -from Crypto.Math.Numbers import Integer -from Crypto.Math.Primality import (test_probable_prime, - generate_probable_prime, COMPOSITE) - -from Crypto.PublicKey import (_expand_subject_public_key_info, - _create_subject_public_key_info, - _extract_subject_public_key_info) - - -class RsaKey(object): - r"""Class defining an actual RSA key. - Do not instantiate directly. - Use :func:`generate`, :func:`construct` or :func:`import_key` instead. - - :ivar n: RSA modulus - :vartype n: integer - - :ivar e: RSA public exponent - :vartype e: integer - - :ivar d: RSA private exponent - :vartype d: integer - - :ivar p: First factor of the RSA modulus - :vartype p: integer - - :ivar q: Second factor of the RSA modulus - :vartype q: integer - - :ivar u: Chinese remainder component (:math:`p^{-1} \text{mod } q`) - :vartype u: integer - - :undocumented: exportKey, publickey - """ - - def __init__(self, **kwargs): - """Build an RSA key. - - :Keywords: - n : integer - The modulus. - e : integer - The public exponent. - d : integer - The private exponent. Only required for private keys. - p : integer - The first factor of the modulus. Only required for private keys. - q : integer - The second factor of the modulus. Only required for private keys. - u : integer - The CRT coefficient (inverse of p modulo q). Only required for - private keys. - """ - - input_set = set(kwargs.keys()) - public_set = set(('n', 'e')) - private_set = public_set | set(('p', 'q', 'd', 'u')) - if input_set not in (private_set, public_set): - raise ValueError("Some RSA components are missing") - for component, value in kwargs.items(): - setattr(self, "_" + component, value) - if input_set == private_set: - self._dp = self._d % (self._p - 1) # = (e⁻¹) mod (p-1) - self._dq = self._d % (self._q - 1) # = (e⁻¹) mod (q-1) - - @property - def n(self): - return int(self._n) - - @property - def e(self): - return int(self._e) - - @property - def d(self): - if not self.has_private(): - raise AttributeError("No private exponent available for public keys") - return int(self._d) - - @property - def p(self): - if not self.has_private(): - raise AttributeError("No CRT component 'p' available for public keys") - return int(self._p) - - @property - def q(self): - if not self.has_private(): - raise AttributeError("No CRT component 'q' available for public keys") - return int(self._q) - - @property - def u(self): - if not self.has_private(): - raise AttributeError("No CRT component 'u' available for public keys") - return int(self._u) - - def size_in_bits(self): - """Size of the RSA modulus in bits""" - return self._n.size_in_bits() - - def size_in_bytes(self): - """The minimal amount of bytes that can hold the RSA modulus""" - return (self._n.size_in_bits() - 1) // 8 + 1 - - def _encrypt(self, plaintext): - if not 0 <= plaintext < self._n: - raise ValueError("Plaintext too large") - return int(pow(Integer(plaintext), self._e, self._n)) - - def _decrypt(self, ciphertext): - if not 0 <= ciphertext < self._n: - raise ValueError("Ciphertext too large") - if not self.has_private(): - raise TypeError("This is not a private key") - - # Blinded RSA decryption (to prevent timing attacks): - # Step 1: Generate random secret blinding factor r, - # such that 0 < r < n-1 - r = Integer.random_range(min_inclusive=1, max_exclusive=self._n) - # Step 2: Compute c' = c * r**e mod n - cp = Integer(ciphertext) * pow(r, self._e, self._n) % self._n - # Step 3: Compute m' = c'**d mod n (normal RSA decryption) - m1 = pow(cp, self._dp, self._p) - m2 = pow(cp, self._dq, self._q) - h = ((m2 - m1) * self._u) % self._q - mp = h * self._p + m1 - # Step 4: Compute m = m' * (r**(-1)) mod n - result = (r.inverse(self._n) * mp) % self._n - # Verify no faults occurred - if ciphertext != pow(result, self._e, self._n): - raise ValueError("Fault detected in RSA decryption") - return result - - def has_private(self): - """Whether this is an RSA private key""" - - return hasattr(self, "_d") - - def can_encrypt(self): # legacy - return True - - def can_sign(self): # legacy - return True - - def public_key(self): - """A matching RSA public key. - - Returns: - a new :class:`RsaKey` object - """ - return RsaKey(n=self._n, e=self._e) - - def __eq__(self, other): - if self.has_private() != other.has_private(): - return False - if self.n != other.n or self.e != other.e: - return False - if not self.has_private(): - return True - return (self.d == other.d) - - def __ne__(self, other): - return not (self == other) - - def __getstate__(self): - # RSA key is not pickable - from pickle import PicklingError - raise PicklingError - - def __repr__(self): - if self.has_private(): - extra = ", d=%d, p=%d, q=%d, u=%d" % (int(self._d), int(self._p), - int(self._q), int(self._u)) - else: - extra = "" - return "RsaKey(n=%d, e=%d%s)" % (int(self._n), int(self._e), extra) - - def __str__(self): - if self.has_private(): - key_type = "Private" - else: - key_type = "Public" - return "%s RSA key at 0x%X" % (key_type, id(self)) - - def export_key(self, format='PEM', passphrase=None, pkcs=1, - protection=None, randfunc=None): - """Export this RSA key. - - Args: - format (string): - The format to use for wrapping the key: - - - *'PEM'*. (*Default*) Text encoding, done according to `RFC1421`_/`RFC1423`_. - - *'DER'*. Binary encoding. - - *'OpenSSH'*. Textual encoding, done according to OpenSSH specification. - Only suitable for public keys (not private keys). - - passphrase (string): - (*For private keys only*) The pass phrase used for protecting the output. - - pkcs (integer): - (*For private keys only*) The ASN.1 structure to use for - serializing the key. Note that even in case of PEM - encoding, there is an inner ASN.1 DER structure. - - With ``pkcs=1`` (*default*), the private key is encoded in a - simple `PKCS#1`_ structure (``RSAPrivateKey``). - - With ``pkcs=8``, the private key is encoded in a `PKCS#8`_ structure - (``PrivateKeyInfo``). - - .. note:: - This parameter is ignored for a public key. - For DER and PEM, an ASN.1 DER ``SubjectPublicKeyInfo`` - structure is always used. - - protection (string): - (*For private keys only*) - The encryption scheme to use for protecting the private key. - - If ``None`` (default), the behavior depends on :attr:`format`: - - - For *'DER'*, the *PBKDF2WithHMAC-SHA1AndDES-EDE3-CBC* - scheme is used. The following operations are performed: - - 1. A 16 byte Triple DES key is derived from the passphrase - using :func:`Crypto.Protocol.KDF.PBKDF2` with 8 bytes salt, - and 1 000 iterations of :mod:`Crypto.Hash.HMAC`. - 2. The private key is encrypted using CBC. - 3. The encrypted key is encoded according to PKCS#8. - - - For *'PEM'*, the obsolete PEM encryption scheme is used. - It is based on MD5 for key derivation, and Triple DES for encryption. - - Specifying a value for :attr:`protection` is only meaningful for PKCS#8 - (that is, ``pkcs=8``) and only if a pass phrase is present too. - - The supported schemes for PKCS#8 are listed in the - :mod:`Crypto.IO.PKCS8` module (see :attr:`wrap_algo` parameter). - - randfunc (callable): - A function that provides random bytes. Only used for PEM encoding. - The default is :func:`Crypto.Random.get_random_bytes`. - - Returns: - byte string: the encoded key - - Raises: - ValueError:when the format is unknown or when you try to encrypt a private - key with *DER* format and PKCS#1. - - .. warning:: - If you don't provide a pass phrase, the private key will be - exported in the clear! - - .. _RFC1421: http://www.ietf.org/rfc/rfc1421.txt - .. _RFC1423: http://www.ietf.org/rfc/rfc1423.txt - .. _`PKCS#1`: http://www.ietf.org/rfc/rfc3447.txt - .. _`PKCS#8`: http://www.ietf.org/rfc/rfc5208.txt - """ - - if passphrase is not None: - passphrase = tobytes(passphrase) - - if randfunc is None: - randfunc = Random.get_random_bytes - - if format == 'OpenSSH': - e_bytes, n_bytes = [x.to_bytes() for x in (self._e, self._n)] - if bord(e_bytes[0]) & 0x80: - e_bytes = b'\x00' + e_bytes - if bord(n_bytes[0]) & 0x80: - n_bytes = b'\x00' + n_bytes - keyparts = [b'ssh-rsa', e_bytes, n_bytes] - keystring = b''.join([struct.pack(">I", len(kp)) + kp for kp in keyparts]) - return b'ssh-rsa ' + binascii.b2a_base64(keystring)[:-1] - - # DER format is always used, even in case of PEM, which simply - # encodes it into BASE64. - if self.has_private(): - binary_key = DerSequence([0, - self.n, - self.e, - self.d, - self.p, - self.q, - self.d % (self.p-1), - self.d % (self.q-1), - Integer(self.q).inverse(self.p) - ]).encode() - if pkcs == 1: - key_type = 'RSA PRIVATE KEY' - if format == 'DER' and passphrase: - raise ValueError("PKCS#1 private key cannot be encrypted") - else: # PKCS#8 - from Crypto.IO import PKCS8 - - if format == 'PEM' and protection is None: - key_type = 'PRIVATE KEY' - binary_key = PKCS8.wrap(binary_key, oid, None, - key_params=DerNull()) - else: - key_type = 'ENCRYPTED PRIVATE KEY' - if not protection: - protection = 'PBKDF2WithHMAC-SHA1AndDES-EDE3-CBC' - binary_key = PKCS8.wrap(binary_key, oid, - passphrase, protection, - key_params=DerNull()) - passphrase = None - else: - key_type = "PUBLIC KEY" - binary_key = _create_subject_public_key_info(oid, - DerSequence([self.n, - self.e]), - DerNull() - ) - - if format == 'DER': - return binary_key - if format == 'PEM': - from Crypto.IO import PEM - - pem_str = PEM.encode(binary_key, key_type, passphrase, randfunc) - return tobytes(pem_str) - - raise ValueError("Unknown key format '%s'. Cannot export the RSA key." % format) - - # Backward compatibility - exportKey = export_key - publickey = public_key - - # Methods defined in PyCrypto that we don't support anymore - def sign(self, M, K): - raise NotImplementedError("Use module Crypto.Signature.pkcs1_15 instead") - - def verify(self, M, signature): - raise NotImplementedError("Use module Crypto.Signature.pkcs1_15 instead") - - def encrypt(self, plaintext, K): - raise NotImplementedError("Use module Crypto.Cipher.PKCS1_OAEP instead") - - def decrypt(self, ciphertext): - raise NotImplementedError("Use module Crypto.Cipher.PKCS1_OAEP instead") - - def blind(self, M, B): - raise NotImplementedError - - def unblind(self, M, B): - raise NotImplementedError - - def size(self): - raise NotImplementedError - - -def generate(bits, randfunc=None, e=65537): - """Create a new RSA key pair. - - The algorithm closely follows NIST `FIPS 186-4`_ in its - sections B.3.1 and B.3.3. The modulus is the product of - two non-strong probable primes. - Each prime passes a suitable number of Miller-Rabin tests - with random bases and a single Lucas test. - - Args: - bits (integer): - Key length, or size (in bits) of the RSA modulus. - It must be at least 1024, but **2048 is recommended.** - The FIPS standard only defines 1024, 2048 and 3072. - randfunc (callable): - Function that returns random bytes. - The default is :func:`Crypto.Random.get_random_bytes`. - e (integer): - Public RSA exponent. It must be an odd positive integer. - It is typically a small number with very few ones in its - binary representation. - The FIPS standard requires the public exponent to be - at least 65537 (the default). - - Returns: an RSA key object (:class:`RsaKey`, with private key). - - .. _FIPS 186-4: http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf - """ - - if bits < 1024: - raise ValueError("RSA modulus length must be >= 1024") - if e % 2 == 0 or e < 3: - raise ValueError("RSA public exponent must be a positive, odd integer larger than 2.") - - if randfunc is None: - randfunc = Random.get_random_bytes - - d = n = Integer(1) - e = Integer(e) - - while n.size_in_bits() != bits and d < (1 << (bits // 2)): - # Generate the prime factors of n: p and q. - # By construciton, their product is always - # 2^{bits-1} < p*q < 2^bits. - size_q = bits // 2 - size_p = bits - size_q - - min_p = min_q = (Integer(1) << (2 * size_q - 1)).sqrt() - if size_q != size_p: - min_p = (Integer(1) << (2 * size_p - 1)).sqrt() - - def filter_p(candidate): - return candidate > min_p and (candidate - 1).gcd(e) == 1 - - p = generate_probable_prime(exact_bits=size_p, - randfunc=randfunc, - prime_filter=filter_p) - - min_distance = Integer(1) << (bits // 2 - 100) - - def filter_q(candidate): - return (candidate > min_q and - (candidate - 1).gcd(e) == 1 and - abs(candidate - p) > min_distance) - - q = generate_probable_prime(exact_bits=size_q, - randfunc=randfunc, - prime_filter=filter_q) - - n = p * q - lcm = (p - 1).lcm(q - 1) - d = e.inverse(lcm) - - if p > q: - p, q = q, p - - u = p.inverse(q) - - return RsaKey(n=n, e=e, d=d, p=p, q=q, u=u) - - -def construct(rsa_components, consistency_check=True): - r"""Construct an RSA key from a tuple of valid RSA components. - - The modulus **n** must be the product of two primes. - The public exponent **e** must be odd and larger than 1. - - In case of a private key, the following equations must apply: - - .. math:: - - \begin{align} - p*q &= n \\ - e*d &\equiv 1 ( \text{mod lcm} [(p-1)(q-1)]) \\ - p*u &\equiv 1 ( \text{mod } q) - \end{align} - - Args: - rsa_components (tuple): - A tuple of integers, with at least 2 and no - more than 6 items. The items come in the following order: - - 1. RSA modulus *n*. - 2. Public exponent *e*. - 3. Private exponent *d*. - Only required if the key is private. - 4. First factor of *n* (*p*). - Optional, but the other factor *q* must also be present. - 5. Second factor of *n* (*q*). Optional. - 6. CRT coefficient *q*, that is :math:`p^{-1} \text{mod }q`. Optional. - - consistency_check (boolean): - If ``True``, the library will verify that the provided components - fulfil the main RSA properties. - - Raises: - ValueError: when the key being imported fails the most basic RSA validity checks. - - Returns: An RSA key object (:class:`RsaKey`). - """ - - class InputComps(object): - pass - - input_comps = InputComps() - for (comp, value) in zip(('n', 'e', 'd', 'p', 'q', 'u'), rsa_components): - setattr(input_comps, comp, Integer(value)) - - n = input_comps.n - e = input_comps.e - if not hasattr(input_comps, 'd'): - key = RsaKey(n=n, e=e) - else: - d = input_comps.d - if hasattr(input_comps, 'q'): - p = input_comps.p - q = input_comps.q - else: - # Compute factors p and q from the private exponent d. - # We assume that n has no more than two factors. - # See 8.2.2(i) in Handbook of Applied Cryptography. - ktot = d * e - 1 - # The quantity d*e-1 is a multiple of phi(n), even, - # and can be represented as t*2^s. - t = ktot - while t % 2 == 0: - t //= 2 - # Cycle through all multiplicative inverses in Zn. - # The algorithm is non-deterministic, but there is a 50% chance - # any candidate a leads to successful factoring. - # See "Digitalized Signatures and Public Key Functions as Intractable - # as Factorization", M. Rabin, 1979 - spotted = False - a = Integer(2) - while not spotted and a < 100: - k = Integer(t) - # Cycle through all values a^{t*2^i}=a^k - while k < ktot: - cand = pow(a, k, n) - # Check if a^k is a non-trivial root of unity (mod n) - if cand != 1 and cand != (n - 1) and pow(cand, 2, n) == 1: - # We have found a number such that (cand-1)(cand+1)=0 (mod n). - # Either of the terms divides n. - p = Integer(n).gcd(cand + 1) - spotted = True - break - k *= 2 - # This value was not any good... let's try another! - a += 2 - if not spotted: - raise ValueError("Unable to compute factors p and q from exponent d.") - # Found ! - assert ((n % p) == 0) - q = n // p - - if hasattr(input_comps, 'u'): - u = input_comps.u - else: - u = p.inverse(q) - - # Build key object - key = RsaKey(n=n, e=e, d=d, p=p, q=q, u=u) - - # Verify consistency of the key - if consistency_check: - - # Modulus and public exponent must be coprime - if e <= 1 or e >= n: - raise ValueError("Invalid RSA public exponent") - if Integer(n).gcd(e) != 1: - raise ValueError("RSA public exponent is not coprime to modulus") - - # For RSA, modulus must be odd - if not n & 1: - raise ValueError("RSA modulus is not odd") - - if key.has_private(): - # Modulus and private exponent must be coprime - if d <= 1 or d >= n: - raise ValueError("Invalid RSA private exponent") - if Integer(n).gcd(d) != 1: - raise ValueError("RSA private exponent is not coprime to modulus") - # Modulus must be product of 2 primes - if p * q != n: - raise ValueError("RSA factors do not match modulus") - if test_probable_prime(p) == COMPOSITE: - raise ValueError("RSA factor p is composite") - if test_probable_prime(q) == COMPOSITE: - raise ValueError("RSA factor q is composite") - # See Carmichael theorem - phi = (p - 1) * (q - 1) - lcm = phi // (p - 1).gcd(q - 1) - if (e * d % int(lcm)) != 1: - raise ValueError("Invalid RSA condition") - if hasattr(key, 'u'): - # CRT coefficient - if u <= 1 or u >= q: - raise ValueError("Invalid RSA component u") - if (p * u % q) != 1: - raise ValueError("Invalid RSA component u with p") - - return key - - -def _import_pkcs1_private(encoded, *kwargs): - # RSAPrivateKey ::= SEQUENCE { - # version Version, - # modulus INTEGER, -- n - # publicExponent INTEGER, -- e - # privateExponent INTEGER, -- d - # prime1 INTEGER, -- p - # prime2 INTEGER, -- q - # exponent1 INTEGER, -- d mod (p-1) - # exponent2 INTEGER, -- d mod (q-1) - # coefficient INTEGER -- (inverse of q) mod p - # } - # - # Version ::= INTEGER - der = DerSequence().decode(encoded, nr_elements=9, only_ints_expected=True) - if der[0] != 0: - raise ValueError("No PKCS#1 encoding of an RSA private key") - return construct(der[1:6] + [Integer(der[4]).inverse(der[5])]) - - -def _import_pkcs1_public(encoded, *kwargs): - # RSAPublicKey ::= SEQUENCE { - # modulus INTEGER, -- n - # publicExponent INTEGER -- e - # } - der = DerSequence().decode(encoded, nr_elements=2, only_ints_expected=True) - return construct(der) - - -def _import_subjectPublicKeyInfo(encoded, *kwargs): - - algoid, encoded_key, params = _expand_subject_public_key_info(encoded) - if algoid != oid or params is not None: - raise ValueError("No RSA subjectPublicKeyInfo") - return _import_pkcs1_public(encoded_key) - - -def _import_x509_cert(encoded, *kwargs): - - sp_info = _extract_subject_public_key_info(encoded) - return _import_subjectPublicKeyInfo(sp_info) - - -def _import_pkcs8(encoded, passphrase): - from Crypto.IO import PKCS8 - - k = PKCS8.unwrap(encoded, passphrase) - if k[0] != oid: - raise ValueError("No PKCS#8 encoded RSA key") - return _import_keyDER(k[1], passphrase) - - -def _import_keyDER(extern_key, passphrase): - """Import an RSA key (public or private half), encoded in DER form.""" - - decodings = (_import_pkcs1_private, - _import_pkcs1_public, - _import_subjectPublicKeyInfo, - _import_x509_cert, - _import_pkcs8) - - for decoding in decodings: - try: - return decoding(extern_key, passphrase) - except ValueError: - pass - - raise ValueError("RSA key format is not supported") - - -def _import_openssh_private_rsa(data, password): - - from ._openssh import (import_openssh_private_generic, - read_bytes, read_string, check_padding) - - ssh_name, decrypted = import_openssh_private_generic(data, password) - - if ssh_name != "ssh-rsa": - raise ValueError("This SSH key is not RSA") - - n, decrypted = read_bytes(decrypted) - e, decrypted = read_bytes(decrypted) - d, decrypted = read_bytes(decrypted) - iqmp, decrypted = read_bytes(decrypted) - p, decrypted = read_bytes(decrypted) - q, decrypted = read_bytes(decrypted) - - _, padded = read_string(decrypted) # Comment - check_padding(padded) - - build = [Integer.from_bytes(x) for x in (n, e, d, q, p, iqmp)] - return construct(build) - - -def import_key(extern_key, passphrase=None): - """Import an RSA key (public or private). - - Args: - extern_key (string or byte string): - The RSA key to import. - - The following formats are supported for an RSA **public key**: - - - X.509 certificate (binary or PEM format) - - X.509 ``subjectPublicKeyInfo`` DER SEQUENCE (binary or PEM - encoding) - - `PKCS#1`_ ``RSAPublicKey`` DER SEQUENCE (binary or PEM encoding) - - An OpenSSH line (e.g. the content of ``~/.ssh/id_ecdsa``, ASCII) - - The following formats are supported for an RSA **private key**: - - - PKCS#1 ``RSAPrivateKey`` DER SEQUENCE (binary or PEM encoding) - - `PKCS#8`_ ``PrivateKeyInfo`` or ``EncryptedPrivateKeyInfo`` - DER SEQUENCE (binary or PEM encoding) - - OpenSSH (text format, introduced in `OpenSSH 6.5`_) - - For details about the PEM encoding, see `RFC1421`_/`RFC1423`_. - - passphrase (string or byte string): - For private keys only, the pass phrase that encrypts the key. - - Returns: An RSA key object (:class:`RsaKey`). - - Raises: - ValueError/IndexError/TypeError: - When the given key cannot be parsed (possibly because the pass - phrase is wrong). - - .. _RFC1421: http://www.ietf.org/rfc/rfc1421.txt - .. _RFC1423: http://www.ietf.org/rfc/rfc1423.txt - .. _`PKCS#1`: http://www.ietf.org/rfc/rfc3447.txt - .. _`PKCS#8`: http://www.ietf.org/rfc/rfc5208.txt - .. _`OpenSSH 6.5`: https://flak.tedunangst.com/post/new-openssh-key-format-and-bcrypt-pbkdf - """ - - from Crypto.IO import PEM - - extern_key = tobytes(extern_key) - if passphrase is not None: - passphrase = tobytes(passphrase) - - if extern_key.startswith(b'-----BEGIN OPENSSH PRIVATE KEY'): - text_encoded = tostr(extern_key) - openssh_encoded, marker, enc_flag = PEM.decode(text_encoded, passphrase) - result = _import_openssh_private_rsa(openssh_encoded, passphrase) - return result - - if extern_key.startswith(b'-----'): - # This is probably a PEM encoded key. - (der, marker, enc_flag) = PEM.decode(tostr(extern_key), passphrase) - if enc_flag: - passphrase = None - return _import_keyDER(der, passphrase) - - if extern_key.startswith(b'ssh-rsa '): - # This is probably an OpenSSH key - keystring = binascii.a2b_base64(extern_key.split(b' ')[1]) - keyparts = [] - while len(keystring) > 4: - length = struct.unpack(">I", keystring[:4])[0] - keyparts.append(keystring[4:4 + length]) - keystring = keystring[4 + length:] - e = Integer.from_bytes(keyparts[1]) - n = Integer.from_bytes(keyparts[2]) - return construct([n, e]) - - if len(extern_key) > 0 and bord(extern_key[0]) == 0x30: - # This is probably a DER encoded key - return _import_keyDER(extern_key, passphrase) - - raise ValueError("RSA key format is not supported") - - -# Backward compatibility -importKey = import_key - -#: `Object ID`_ for the RSA encryption algorithm. This OID often indicates -#: a generic RSA key, even when such key will be actually used for digital -#: signatures. -#: -#: .. _`Object ID`: http://www.alvestrand.no/objectid/1.2.840.113549.1.1.1.html -oid = "1.2.840.113549.1.1.1" diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Utility/Coroutine.c b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Utility/Coroutine.c deleted file mode 100644 index 1ad27df2642c23275873f9404532df163394c62b..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Utility/Coroutine.c +++ /dev/null @@ -1,2391 +0,0 @@ -//////////////////// GeneratorYieldFrom.proto //////////////////// - -static CYTHON_INLINE PyObject* __Pyx_Generator_Yield_From(__pyx_CoroutineObject *gen, PyObject *source); - -//////////////////// GeneratorYieldFrom //////////////////// -//@requires: Generator - -static void __PyxPyIter_CheckErrorAndDecref(PyObject *source) { - PyErr_Format(PyExc_TypeError, - "iter() returned non-iterator of type '%.100s'", - Py_TYPE(source)->tp_name); - Py_DECREF(source); -} - -static CYTHON_INLINE PyObject* __Pyx_Generator_Yield_From(__pyx_CoroutineObject *gen, PyObject *source) { - PyObject *source_gen, *retval; -#ifdef __Pyx_Coroutine_USED - if (__Pyx_Coroutine_Check(source)) { - // TODO: this should only happen for types.coroutine()ed generators, but we can't determine that here - Py_INCREF(source); - source_gen = source; - retval = __Pyx_Generator_Next(source); - } else -#endif - { -#if CYTHON_USE_TYPE_SLOTS - if (likely(Py_TYPE(source)->tp_iter)) { - source_gen = Py_TYPE(source)->tp_iter(source); - if (unlikely(!source_gen)) - return NULL; - if (unlikely(!PyIter_Check(source_gen))) { - __PyxPyIter_CheckErrorAndDecref(source_gen); - return NULL; - } - } else - // CPython also allows non-iterable sequences to be iterated over -#endif - { - source_gen = PyObject_GetIter(source); - if (unlikely(!source_gen)) - return NULL; - } - // source_gen is now the iterator, make the first next() call -#if CYTHON_USE_TYPE_SLOTS - retval = Py_TYPE(source_gen)->tp_iternext(source_gen); -#else - retval = PyIter_Next(source_gen); -#endif - } - if (likely(retval)) { - gen->yieldfrom = source_gen; - return retval; - } - Py_DECREF(source_gen); - return NULL; -} - - -//////////////////// CoroutineYieldFrom.proto //////////////////// - -static CYTHON_INLINE PyObject* __Pyx_Coroutine_Yield_From(__pyx_CoroutineObject *gen, PyObject *source); - -//////////////////// CoroutineYieldFrom //////////////////// -//@requires: Coroutine -//@requires: GetAwaitIter - -static PyObject* __Pyx__Coroutine_Yield_From_Generic(__pyx_CoroutineObject *gen, PyObject *source) { - PyObject *retval; - PyObject *source_gen = __Pyx__Coroutine_GetAwaitableIter(source); - if (unlikely(!source_gen)) { - return NULL; - } - // source_gen is now the iterator, make the first next() call - if (__Pyx_Coroutine_Check(source_gen)) { - retval = __Pyx_Generator_Next(source_gen); - } else { -#if CYTHON_USE_TYPE_SLOTS - retval = Py_TYPE(source_gen)->tp_iternext(source_gen); -#else - retval = PyIter_Next(source_gen); -#endif - } - if (retval) { - gen->yieldfrom = source_gen; - return retval; - } - Py_DECREF(source_gen); - return NULL; -} - -static CYTHON_INLINE PyObject* __Pyx_Coroutine_Yield_From(__pyx_CoroutineObject *gen, PyObject *source) { - PyObject *retval; - if (__Pyx_Coroutine_Check(source)) { - if (unlikely(((__pyx_CoroutineObject*)source)->yieldfrom)) { - PyErr_SetString( - PyExc_RuntimeError, - "coroutine is being awaited already"); - return NULL; - } - retval = __Pyx_Generator_Next(source); -#ifdef __Pyx_AsyncGen_USED - // inlined "__pyx_PyAsyncGenASend" handling to avoid the series of generic calls - } else if (__pyx_PyAsyncGenASend_CheckExact(source)) { - retval = __Pyx_async_gen_asend_iternext(source); -#endif - } else { - return __Pyx__Coroutine_Yield_From_Generic(gen, source); - } - if (retval) { - Py_INCREF(source); - gen->yieldfrom = source; - } - return retval; -} - - -//////////////////// GetAwaitIter.proto //////////////////// - -static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAwaitableIter(PyObject *o); /*proto*/ -static PyObject *__Pyx__Coroutine_GetAwaitableIter(PyObject *o); /*proto*/ - -//////////////////// GetAwaitIter //////////////////// -//@requires: ObjectHandling.c::PyObjectGetMethod -//@requires: ObjectHandling.c::PyObjectCallNoArg -//@requires: ObjectHandling.c::PyObjectCallOneArg - -static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAwaitableIter(PyObject *o) { -#ifdef __Pyx_Coroutine_USED - if (__Pyx_Coroutine_Check(o)) { - return __Pyx_NewRef(o); - } -#endif - return __Pyx__Coroutine_GetAwaitableIter(o); -} - - -static void __Pyx_Coroutine_AwaitableIterError(PyObject *source) { -#if PY_VERSION_HEX >= 0x030600B3 || defined(_PyErr_FormatFromCause) - _PyErr_FormatFromCause( - PyExc_TypeError, - "'async for' received an invalid object " - "from __anext__: %.100s", - Py_TYPE(source)->tp_name); -#elif PY_MAJOR_VERSION >= 3 - PyObject *exc, *val, *val2, *tb; - assert(PyErr_Occurred()); - PyErr_Fetch(&exc, &val, &tb); - PyErr_NormalizeException(&exc, &val, &tb); - if (tb != NULL) { - PyException_SetTraceback(val, tb); - Py_DECREF(tb); - } - Py_DECREF(exc); - assert(!PyErr_Occurred()); - PyErr_Format( - PyExc_TypeError, - "'async for' received an invalid object " - "from __anext__: %.100s", - Py_TYPE(source)->tp_name); - - PyErr_Fetch(&exc, &val2, &tb); - PyErr_NormalizeException(&exc, &val2, &tb); - Py_INCREF(val); - PyException_SetCause(val2, val); - PyException_SetContext(val2, val); - PyErr_Restore(exc, val2, tb); -#else - // since Py2 does not have exception chaining, it's better to avoid shadowing exceptions there - source++; -#endif -} - -// adapted from genobject.c in Py3.5 -static PyObject *__Pyx__Coroutine_GetAwaitableIter(PyObject *obj) { - PyObject *res; -#if CYTHON_USE_ASYNC_SLOTS - __Pyx_PyAsyncMethodsStruct* am = __Pyx_PyType_AsAsync(obj); - if (likely(am && am->am_await)) { - res = (*am->am_await)(obj); - } else -#endif -#if PY_VERSION_HEX >= 0x030500B2 || defined(PyCoro_CheckExact) - if (PyCoro_CheckExact(obj)) { - return __Pyx_NewRef(obj); - } else -#endif -#if CYTHON_COMPILING_IN_CPYTHON && defined(CO_ITERABLE_COROUTINE) - if (PyGen_CheckExact(obj) && ((PyGenObject*)obj)->gi_code && ((PyCodeObject *)((PyGenObject*)obj)->gi_code)->co_flags & CO_ITERABLE_COROUTINE) { - // Python generator marked with "@types.coroutine" decorator - return __Pyx_NewRef(obj); - } else -#endif - { - PyObject *method = NULL; - int is_method = __Pyx_PyObject_GetMethod(obj, PYIDENT("__await__"), &method); - if (likely(is_method)) { - res = __Pyx_PyObject_CallOneArg(method, obj); - } else if (likely(method)) { - res = __Pyx_PyObject_CallNoArg(method); - } else - goto slot_error; - Py_DECREF(method); - } - if (unlikely(!res)) { - // surprisingly, CPython replaces the exception here... - __Pyx_Coroutine_AwaitableIterError(obj); - goto bad; - } - if (unlikely(!PyIter_Check(res))) { - PyErr_Format(PyExc_TypeError, - "__await__() returned non-iterator of type '%.100s'", - Py_TYPE(res)->tp_name); - Py_CLEAR(res); - } else { - int is_coroutine = 0; - #ifdef __Pyx_Coroutine_USED - is_coroutine |= __Pyx_Coroutine_Check(res); - #endif - #if PY_VERSION_HEX >= 0x030500B2 || defined(PyCoro_CheckExact) - is_coroutine |= PyCoro_CheckExact(res); - #endif - if (unlikely(is_coroutine)) { - /* __await__ must return an *iterator*, not - a coroutine or another awaitable (see PEP 492) */ - PyErr_SetString(PyExc_TypeError, - "__await__() returned a coroutine"); - Py_CLEAR(res); - } - } - return res; -slot_error: - PyErr_Format(PyExc_TypeError, - "object %.100s can't be used in 'await' expression", - Py_TYPE(obj)->tp_name); -bad: - return NULL; -} - - -//////////////////// AsyncIter.proto //////////////////// - -static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAsyncIter(PyObject *o); /*proto*/ -static CYTHON_INLINE PyObject *__Pyx_Coroutine_AsyncIterNext(PyObject *o); /*proto*/ - -//////////////////// AsyncIter //////////////////// -//@requires: GetAwaitIter -//@requires: ObjectHandling.c::PyObjectCallMethod0 - -static PyObject *__Pyx_Coroutine_GetAsyncIter_Generic(PyObject *obj) { -#if PY_VERSION_HEX < 0x030500B1 - { - PyObject *iter = __Pyx_PyObject_CallMethod0(obj, PYIDENT("__aiter__")); - if (likely(iter)) - return iter; - // FIXME: for the sake of a nicely conforming exception message, assume any AttributeError meant '__aiter__' - if (!PyErr_ExceptionMatches(PyExc_AttributeError)) - return NULL; - } -#else - // avoid C warning about 'unused function' - if ((0)) (void) __Pyx_PyObject_CallMethod0(obj, PYIDENT("__aiter__")); -#endif - - PyErr_Format(PyExc_TypeError, "'async for' requires an object with __aiter__ method, got %.100s", - Py_TYPE(obj)->tp_name); - return NULL; -} - - -static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAsyncIter(PyObject *obj) { -#ifdef __Pyx_AsyncGen_USED - if (__Pyx_AsyncGen_CheckExact(obj)) { - return __Pyx_NewRef(obj); - } -#endif -#if CYTHON_USE_ASYNC_SLOTS - { - __Pyx_PyAsyncMethodsStruct* am = __Pyx_PyType_AsAsync(obj); - if (likely(am && am->am_aiter)) { - return (*am->am_aiter)(obj); - } - } -#endif - return __Pyx_Coroutine_GetAsyncIter_Generic(obj); -} - - -static PyObject *__Pyx__Coroutine_AsyncIterNext(PyObject *obj) { -#if PY_VERSION_HEX < 0x030500B1 - { - PyObject *value = __Pyx_PyObject_CallMethod0(obj, PYIDENT("__anext__")); - if (likely(value)) - return value; - } - // FIXME: for the sake of a nicely conforming exception message, assume any AttributeError meant '__anext__' - if (PyErr_ExceptionMatches(PyExc_AttributeError)) -#endif - PyErr_Format(PyExc_TypeError, "'async for' requires an object with __anext__ method, got %.100s", - Py_TYPE(obj)->tp_name); - return NULL; -} - - -static CYTHON_INLINE PyObject *__Pyx_Coroutine_AsyncIterNext(PyObject *obj) { -#ifdef __Pyx_AsyncGen_USED - if (__Pyx_AsyncGen_CheckExact(obj)) { - return __Pyx_async_gen_anext(obj); - } -#endif -#if CYTHON_USE_ASYNC_SLOTS - { - __Pyx_PyAsyncMethodsStruct* am = __Pyx_PyType_AsAsync(obj); - if (likely(am && am->am_anext)) { - return (*am->am_anext)(obj); - } - } -#endif - return __Pyx__Coroutine_AsyncIterNext(obj); -} - - -//////////////////// pep479.proto //////////////////// - -static void __Pyx_Generator_Replace_StopIteration(int in_async_gen); /*proto*/ - -//////////////////// pep479 //////////////////// -//@requires: Exceptions.c::GetException - -static void __Pyx_Generator_Replace_StopIteration(CYTHON_UNUSED int in_async_gen) { - PyObject *exc, *val, *tb, *cur_exc; - __Pyx_PyThreadState_declare - #ifdef __Pyx_StopAsyncIteration_USED - int is_async_stopiteration = 0; - #endif - - cur_exc = PyErr_Occurred(); - if (likely(!__Pyx_PyErr_GivenExceptionMatches(cur_exc, PyExc_StopIteration))) { - #ifdef __Pyx_StopAsyncIteration_USED - if (in_async_gen && unlikely(__Pyx_PyErr_GivenExceptionMatches(cur_exc, __Pyx_PyExc_StopAsyncIteration))) { - is_async_stopiteration = 1; - } else - #endif - return; - } - - __Pyx_PyThreadState_assign - // Chain exceptions by moving Stop(Async)Iteration to exc_info before creating the RuntimeError. - // In Py2.x, no chaining happens, but the exception still stays visible in exc_info. - __Pyx_GetException(&exc, &val, &tb); - Py_XDECREF(exc); - Py_XDECREF(val); - Py_XDECREF(tb); - PyErr_SetString(PyExc_RuntimeError, - #ifdef __Pyx_StopAsyncIteration_USED - is_async_stopiteration ? "async generator raised StopAsyncIteration" : - in_async_gen ? "async generator raised StopIteration" : - #endif - "generator raised StopIteration"); -} - - -//////////////////// CoroutineBase.proto //////////////////// -//@substitute: naming - -typedef PyObject *(*__pyx_coroutine_body_t)(PyObject *, PyThreadState *, PyObject *); - -#if CYTHON_USE_EXC_INFO_STACK -// See https://bugs.python.org/issue25612 -#define __Pyx_ExcInfoStruct _PyErr_StackItem -#else -// Minimal replacement struct for Py<3.7, without the Py3.7 exception state stack. -typedef struct { - PyObject *exc_type; - PyObject *exc_value; - PyObject *exc_traceback; -} __Pyx_ExcInfoStruct; -#endif - -typedef struct { - PyObject_HEAD - __pyx_coroutine_body_t body; - PyObject *closure; - __Pyx_ExcInfoStruct gi_exc_state; - PyObject *gi_weakreflist; - PyObject *classobj; - PyObject *yieldfrom; - PyObject *gi_name; - PyObject *gi_qualname; - PyObject *gi_modulename; - PyObject *gi_code; - PyObject *gi_frame; - int resume_label; - // using T_BOOL for property below requires char value - char is_running; -} __pyx_CoroutineObject; - -static __pyx_CoroutineObject *__Pyx__Coroutine_New( - PyTypeObject *type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, - PyObject *name, PyObject *qualname, PyObject *module_name); /*proto*/ - -static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit( - __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, - PyObject *name, PyObject *qualname, PyObject *module_name); /*proto*/ - -static CYTHON_INLINE void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *self); -static int __Pyx_Coroutine_clear(PyObject *self); /*proto*/ -static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value); /*proto*/ -static PyObject *__Pyx_Coroutine_Close(PyObject *self); /*proto*/ -static PyObject *__Pyx_Coroutine_Throw(PyObject *gen, PyObject *args); /*proto*/ - -// macros for exception state swapping instead of inline functions to make use of the local thread state context -#if CYTHON_USE_EXC_INFO_STACK -#define __Pyx_Coroutine_SwapException(self) -#define __Pyx_Coroutine_ResetAndClearException(self) __Pyx_Coroutine_ExceptionClear(&(self)->gi_exc_state) -#else -#define __Pyx_Coroutine_SwapException(self) { \ - __Pyx_ExceptionSwap(&(self)->gi_exc_state.exc_type, &(self)->gi_exc_state.exc_value, &(self)->gi_exc_state.exc_traceback); \ - __Pyx_Coroutine_ResetFrameBackpointer(&(self)->gi_exc_state); \ - } -#define __Pyx_Coroutine_ResetAndClearException(self) { \ - __Pyx_ExceptionReset((self)->gi_exc_state.exc_type, (self)->gi_exc_state.exc_value, (self)->gi_exc_state.exc_traceback); \ - (self)->gi_exc_state.exc_type = (self)->gi_exc_state.exc_value = (self)->gi_exc_state.exc_traceback = NULL; \ - } -#endif - -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyGen_FetchStopIterationValue(pvalue) \ - __Pyx_PyGen__FetchStopIterationValue($local_tstate_cname, pvalue) -#else -#define __Pyx_PyGen_FetchStopIterationValue(pvalue) \ - __Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, pvalue) -#endif -static int __Pyx_PyGen__FetchStopIterationValue(PyThreadState *tstate, PyObject **pvalue); /*proto*/ -static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state); /*proto*/ - - -//////////////////// Coroutine.proto //////////////////// - -#define __Pyx_Coroutine_USED -static PyTypeObject *__pyx_CoroutineType = 0; -static PyTypeObject *__pyx_CoroutineAwaitType = 0; -#define __Pyx_Coroutine_CheckExact(obj) (Py_TYPE(obj) == __pyx_CoroutineType) -// __Pyx_Coroutine_Check(obj): see override for IterableCoroutine below -#define __Pyx_Coroutine_Check(obj) __Pyx_Coroutine_CheckExact(obj) -#define __Pyx_CoroutineAwait_CheckExact(obj) (Py_TYPE(obj) == __pyx_CoroutineAwaitType) - -#define __Pyx_Coroutine_New(body, code, closure, name, qualname, module_name) \ - __Pyx__Coroutine_New(__pyx_CoroutineType, body, code, closure, name, qualname, module_name) - -static int __pyx_Coroutine_init(void); /*proto*/ -static PyObject *__Pyx__Coroutine_await(PyObject *coroutine); /*proto*/ - -typedef struct { - PyObject_HEAD - PyObject *coroutine; -} __pyx_CoroutineAwaitObject; - -static PyObject *__Pyx_CoroutineAwait_Close(__pyx_CoroutineAwaitObject *self, PyObject *arg); /*proto*/ -static PyObject *__Pyx_CoroutineAwait_Throw(__pyx_CoroutineAwaitObject *self, PyObject *args); /*proto*/ - - -//////////////////// Generator.proto //////////////////// - -#define __Pyx_Generator_USED -static PyTypeObject *__pyx_GeneratorType = 0; -#define __Pyx_Generator_CheckExact(obj) (Py_TYPE(obj) == __pyx_GeneratorType) - -#define __Pyx_Generator_New(body, code, closure, name, qualname, module_name) \ - __Pyx__Coroutine_New(__pyx_GeneratorType, body, code, closure, name, qualname, module_name) - -static PyObject *__Pyx_Generator_Next(PyObject *self); -static int __pyx_Generator_init(void); /*proto*/ - - -//////////////////// AsyncGen //////////////////// -//@requires: AsyncGen.c::AsyncGenerator -// -> empty, only delegates to separate file - - -//////////////////// CoroutineBase //////////////////// -//@substitute: naming -//@requires: Exceptions.c::PyErrFetchRestore -//@requires: Exceptions.c::PyThreadStateGet -//@requires: Exceptions.c::SwapException -//@requires: Exceptions.c::RaiseException -//@requires: Exceptions.c::SaveResetException -//@requires: ObjectHandling.c::PyObjectCallMethod1 -//@requires: ObjectHandling.c::PyObjectGetAttrStr -//@requires: CommonStructures.c::FetchCommonType - -#include -#include -#if PY_VERSION_HEX >= 0x030b00a6 - #ifndef Py_BUILD_CORE - #define Py_BUILD_CORE 1 - #endif - #include "internal/pycore_frame.h" -#endif - -#define __Pyx_Coroutine_Undelegate(gen) Py_CLEAR((gen)->yieldfrom) - -// If StopIteration exception is set, fetches its 'value' -// attribute if any, otherwise sets pvalue to None. -// -// Returns 0 if no exception or StopIteration is set. -// If any other exception is set, returns -1 and leaves -// pvalue unchanged. -static int __Pyx_PyGen__FetchStopIterationValue(CYTHON_UNUSED PyThreadState *$local_tstate_cname, PyObject **pvalue) { - PyObject *et, *ev, *tb; - PyObject *value = NULL; - - __Pyx_ErrFetch(&et, &ev, &tb); - - if (!et) { - Py_XDECREF(tb); - Py_XDECREF(ev); - Py_INCREF(Py_None); - *pvalue = Py_None; - return 0; - } - - // most common case: plain StopIteration without or with separate argument - if (likely(et == PyExc_StopIteration)) { - if (!ev) { - Py_INCREF(Py_None); - value = Py_None; - } -#if PY_VERSION_HEX >= 0x030300A0 - else if (Py_TYPE(ev) == (PyTypeObject*)PyExc_StopIteration) { - value = ((PyStopIterationObject *)ev)->value; - Py_INCREF(value); - Py_DECREF(ev); - } -#endif - // PyErr_SetObject() and friends put the value directly into ev - else if (unlikely(PyTuple_Check(ev))) { - // if it's a tuple, it is interpreted as separate constructor arguments (surprise!) - if (PyTuple_GET_SIZE(ev) >= 1) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - value = PyTuple_GET_ITEM(ev, 0); - Py_INCREF(value); -#else - value = PySequence_ITEM(ev, 0); -#endif - } else { - Py_INCREF(Py_None); - value = Py_None; - } - Py_DECREF(ev); - } - else if (!__Pyx_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration)) { - // 'steal' reference to ev - value = ev; - } - if (likely(value)) { - Py_XDECREF(tb); - Py_DECREF(et); - *pvalue = value; - return 0; - } - } else if (!__Pyx_PyErr_GivenExceptionMatches(et, PyExc_StopIteration)) { - __Pyx_ErrRestore(et, ev, tb); - return -1; - } - - // otherwise: normalise and check what that gives us - PyErr_NormalizeException(&et, &ev, &tb); - if (unlikely(!PyObject_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration))) { - // looks like normalisation failed - raise the new exception - __Pyx_ErrRestore(et, ev, tb); - return -1; - } - Py_XDECREF(tb); - Py_DECREF(et); -#if PY_VERSION_HEX >= 0x030300A0 - value = ((PyStopIterationObject *)ev)->value; - Py_INCREF(value); - Py_DECREF(ev); -#else - { - PyObject* args = __Pyx_PyObject_GetAttrStr(ev, PYIDENT("args")); - Py_DECREF(ev); - if (likely(args)) { - value = PySequence_GetItem(args, 0); - Py_DECREF(args); - } - if (unlikely(!value)) { - __Pyx_ErrRestore(NULL, NULL, NULL); - Py_INCREF(Py_None); - value = Py_None; - } - } -#endif - *pvalue = value; - return 0; -} - -static CYTHON_INLINE -void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *exc_state) { - PyObject *t, *v, *tb; - t = exc_state->exc_type; - v = exc_state->exc_value; - tb = exc_state->exc_traceback; - - exc_state->exc_type = NULL; - exc_state->exc_value = NULL; - exc_state->exc_traceback = NULL; - - Py_XDECREF(t); - Py_XDECREF(v); - Py_XDECREF(tb); -} - -#define __Pyx_Coroutine_AlreadyRunningError(gen) (__Pyx__Coroutine_AlreadyRunningError(gen), (PyObject*)NULL) -static void __Pyx__Coroutine_AlreadyRunningError(CYTHON_UNUSED __pyx_CoroutineObject *gen) { - const char *msg; - if ((0)) { - #ifdef __Pyx_Coroutine_USED - } else if (__Pyx_Coroutine_Check((PyObject*)gen)) { - msg = "coroutine already executing"; - #endif - #ifdef __Pyx_AsyncGen_USED - } else if (__Pyx_AsyncGen_CheckExact((PyObject*)gen)) { - msg = "async generator already executing"; - #endif - } else { - msg = "generator already executing"; - } - PyErr_SetString(PyExc_ValueError, msg); -} - -#define __Pyx_Coroutine_NotStartedError(gen) (__Pyx__Coroutine_NotStartedError(gen), (PyObject*)NULL) -static void __Pyx__Coroutine_NotStartedError(CYTHON_UNUSED PyObject *gen) { - const char *msg; - if ((0)) { - #ifdef __Pyx_Coroutine_USED - } else if (__Pyx_Coroutine_Check(gen)) { - msg = "can't send non-None value to a just-started coroutine"; - #endif - #ifdef __Pyx_AsyncGen_USED - } else if (__Pyx_AsyncGen_CheckExact(gen)) { - msg = "can't send non-None value to a just-started async generator"; - #endif - } else { - msg = "can't send non-None value to a just-started generator"; - } - PyErr_SetString(PyExc_TypeError, msg); -} - -#define __Pyx_Coroutine_AlreadyTerminatedError(gen, value, closing) (__Pyx__Coroutine_AlreadyTerminatedError(gen, value, closing), (PyObject*)NULL) -static void __Pyx__Coroutine_AlreadyTerminatedError(CYTHON_UNUSED PyObject *gen, PyObject *value, CYTHON_UNUSED int closing) { - #ifdef __Pyx_Coroutine_USED - if (!closing && __Pyx_Coroutine_Check(gen)) { - // `self` is an exhausted coroutine: raise an error, - // except when called from gen_close(), which should - // always be a silent method. - PyErr_SetString(PyExc_RuntimeError, "cannot reuse already awaited coroutine"); - } else - #endif - if (value) { - // `gen` is an exhausted generator: - // only set exception if called from send(). - #ifdef __Pyx_AsyncGen_USED - if (__Pyx_AsyncGen_CheckExact(gen)) - PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration); - else - #endif - PyErr_SetNone(PyExc_StopIteration); - } -} - -static -PyObject *__Pyx_Coroutine_SendEx(__pyx_CoroutineObject *self, PyObject *value, int closing) { - __Pyx_PyThreadState_declare - PyThreadState *tstate; - __Pyx_ExcInfoStruct *exc_state; - PyObject *retval; - - assert(!self->is_running); - - if (unlikely(self->resume_label == 0)) { - if (unlikely(value && value != Py_None)) { - return __Pyx_Coroutine_NotStartedError((PyObject*)self); - } - } - - if (unlikely(self->resume_label == -1)) { - return __Pyx_Coroutine_AlreadyTerminatedError((PyObject*)self, value, closing); - } - -#if CYTHON_FAST_THREAD_STATE - __Pyx_PyThreadState_assign - tstate = $local_tstate_cname; -#else - tstate = __Pyx_PyThreadState_Current; -#endif - - // Traceback/Frame rules pre-Py3.7: - // - on entry, save external exception state in self->gi_exc_state, restore it on exit - // - on exit, keep internally generated exceptions in self->gi_exc_state, clear everything else - // - on entry, set "f_back" pointer of internal exception traceback to (current) outer call frame - // - on exit, clear "f_back" of internal exception traceback - // - do not touch external frames and tracebacks - - // Traceback/Frame rules for Py3.7+ (CYTHON_USE_EXC_INFO_STACK): - // - on entry, push internal exception state in self->gi_exc_state on the exception stack - // - on exit, keep internally generated exceptions in self->gi_exc_state, clear everything else - // - on entry, set "f_back" pointer of internal exception traceback to (current) outer call frame - // - on exit, clear "f_back" of internal exception traceback - // - do not touch external frames and tracebacks - - exc_state = &self->gi_exc_state; - if (exc_state->exc_type) { - #if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_PYSTON - // FIXME: what to do in PyPy? - #else - // Generators always return to their most recent caller, not - // necessarily their creator. - if (exc_state->exc_traceback) { - PyTracebackObject *tb = (PyTracebackObject *) exc_state->exc_traceback; - PyFrameObject *f = tb->tb_frame; - - assert(f->f_back == NULL); - #if PY_VERSION_HEX >= 0x030B00A1 - // PyThreadState_GetFrame returns NULL if there isn't a current frame - // which is a valid state so no need to check - f->f_back = PyThreadState_GetFrame(tstate); - #else - Py_XINCREF(tstate->frame); - f->f_back = tstate->frame; - #endif - } - #endif - } - -#if CYTHON_USE_EXC_INFO_STACK - // See https://bugs.python.org/issue25612 - exc_state->previous_item = tstate->exc_info; - tstate->exc_info = exc_state; -#else - if (exc_state->exc_type) { - // We were in an except handler when we left, - // restore the exception state which was put aside. - __Pyx_ExceptionSwap(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback); - // self->exc_* now holds the exception state of the caller - } else { - // save away the exception state of the caller - __Pyx_Coroutine_ExceptionClear(exc_state); - __Pyx_ExceptionSave(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback); - } -#endif - - self->is_running = 1; - retval = self->body((PyObject *) self, tstate, value); - self->is_running = 0; - -#if CYTHON_USE_EXC_INFO_STACK - // See https://bugs.python.org/issue25612 - exc_state = &self->gi_exc_state; - tstate->exc_info = exc_state->previous_item; - exc_state->previous_item = NULL; - // Cut off the exception frame chain so that we can reconnect it on re-entry above. - __Pyx_Coroutine_ResetFrameBackpointer(exc_state); -#endif - - return retval; -} - -static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state) { - // Don't keep the reference to f_back any longer than necessary. It - // may keep a chain of frames alive or it could create a reference - // cycle. - PyObject *exc_tb = exc_state->exc_traceback; - - if (likely(exc_tb)) { -#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_PYSTON - // FIXME: what to do in PyPy? -#else - PyTracebackObject *tb = (PyTracebackObject *) exc_tb; - PyFrameObject *f = tb->tb_frame; - Py_CLEAR(f->f_back); -#endif - } -} - -static CYTHON_INLINE -PyObject *__Pyx_Coroutine_MethodReturn(CYTHON_UNUSED PyObject* gen, PyObject *retval) { - if (unlikely(!retval)) { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - if (!__Pyx_PyErr_Occurred()) { - // method call must not terminate with NULL without setting an exception - PyObject *exc = PyExc_StopIteration; - #ifdef __Pyx_AsyncGen_USED - if (__Pyx_AsyncGen_CheckExact(gen)) - exc = __Pyx_PyExc_StopAsyncIteration; - #endif - __Pyx_PyErr_SetNone(exc); - } - } - return retval; -} - -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) -static CYTHON_INLINE -PyObject *__Pyx_PyGen_Send(PyGenObject *gen, PyObject *arg) { -#if PY_VERSION_HEX <= 0x030A00A1 - return _PyGen_Send(gen, arg); -#else - PyObject *result; - // PyIter_Send() asserts non-NULL arg - if (PyIter_Send((PyObject*)gen, arg ? arg : Py_None, &result) == PYGEN_RETURN) { - if (PyAsyncGen_CheckExact(gen)) { - assert(result == Py_None); - PyErr_SetNone(PyExc_StopAsyncIteration); - } - else if (result == Py_None) { - PyErr_SetNone(PyExc_StopIteration); - } - else { - _PyGen_SetStopIterationValue(result); - } - Py_CLEAR(result); - } - return result; -#endif -} -#endif - -static CYTHON_INLINE -PyObject *__Pyx_Coroutine_FinishDelegation(__pyx_CoroutineObject *gen) { - PyObject *ret; - PyObject *val = NULL; - __Pyx_Coroutine_Undelegate(gen); - __Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, &val); - // val == NULL on failure => pass on exception - ret = __Pyx_Coroutine_SendEx(gen, val, 0); - Py_XDECREF(val); - return ret; -} - -static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value) { - PyObject *retval; - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self; - PyObject *yf = gen->yieldfrom; - if (unlikely(gen->is_running)) - return __Pyx_Coroutine_AlreadyRunningError(gen); - if (yf) { - PyObject *ret; - // FIXME: does this really need an INCREF() ? - //Py_INCREF(yf); - gen->is_running = 1; - #ifdef __Pyx_Generator_USED - if (__Pyx_Generator_CheckExact(yf)) { - ret = __Pyx_Coroutine_Send(yf, value); - } else - #endif - #ifdef __Pyx_Coroutine_USED - if (__Pyx_Coroutine_Check(yf)) { - ret = __Pyx_Coroutine_Send(yf, value); - } else - #endif - #ifdef __Pyx_AsyncGen_USED - if (__pyx_PyAsyncGenASend_CheckExact(yf)) { - ret = __Pyx_async_gen_asend_send(yf, value); - } else - #endif - #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) - // _PyGen_Send() is not exported before Py3.6 - if (PyGen_CheckExact(yf)) { - ret = __Pyx_PyGen_Send((PyGenObject*)yf, value == Py_None ? NULL : value); - } else - #endif - #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03050000 && defined(PyCoro_CheckExact) && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) - // _PyGen_Send() is not exported before Py3.6 - if (PyCoro_CheckExact(yf)) { - ret = __Pyx_PyGen_Send((PyGenObject*)yf, value == Py_None ? NULL : value); - } else - #endif - { - if (value == Py_None) - ret = Py_TYPE(yf)->tp_iternext(yf); - else - ret = __Pyx_PyObject_CallMethod1(yf, PYIDENT("send"), value); - } - gen->is_running = 0; - //Py_DECREF(yf); - if (likely(ret)) { - return ret; - } - retval = __Pyx_Coroutine_FinishDelegation(gen); - } else { - retval = __Pyx_Coroutine_SendEx(gen, value, 0); - } - return __Pyx_Coroutine_MethodReturn(self, retval); -} - -// This helper function is used by gen_close and gen_throw to -// close a subiterator being delegated to by yield-from. -static int __Pyx_Coroutine_CloseIter(__pyx_CoroutineObject *gen, PyObject *yf) { - PyObject *retval = NULL; - int err = 0; - - #ifdef __Pyx_Generator_USED - if (__Pyx_Generator_CheckExact(yf)) { - retval = __Pyx_Coroutine_Close(yf); - if (!retval) - return -1; - } else - #endif - #ifdef __Pyx_Coroutine_USED - if (__Pyx_Coroutine_Check(yf)) { - retval = __Pyx_Coroutine_Close(yf); - if (!retval) - return -1; - } else - if (__Pyx_CoroutineAwait_CheckExact(yf)) { - retval = __Pyx_CoroutineAwait_Close((__pyx_CoroutineAwaitObject*)yf, NULL); - if (!retval) - return -1; - } else - #endif - #ifdef __Pyx_AsyncGen_USED - if (__pyx_PyAsyncGenASend_CheckExact(yf)) { - retval = __Pyx_async_gen_asend_close(yf, NULL); - // cannot fail - } else - if (__pyx_PyAsyncGenAThrow_CheckExact(yf)) { - retval = __Pyx_async_gen_athrow_close(yf, NULL); - // cannot fail - } else - #endif - { - PyObject *meth; - gen->is_running = 1; - meth = __Pyx_PyObject_GetAttrStr(yf, PYIDENT("close")); - if (unlikely(!meth)) { - if (!PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_WriteUnraisable(yf); - } - PyErr_Clear(); - } else { - retval = PyObject_CallFunction(meth, NULL); - Py_DECREF(meth); - if (!retval) - err = -1; - } - gen->is_running = 0; - } - Py_XDECREF(retval); - return err; -} - -static PyObject *__Pyx_Generator_Next(PyObject *self) { - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self; - PyObject *yf = gen->yieldfrom; - if (unlikely(gen->is_running)) - return __Pyx_Coroutine_AlreadyRunningError(gen); - if (yf) { - PyObject *ret; - // FIXME: does this really need an INCREF() ? - //Py_INCREF(yf); - // YieldFrom code ensures that yf is an iterator - gen->is_running = 1; - #ifdef __Pyx_Generator_USED - if (__Pyx_Generator_CheckExact(yf)) { - ret = __Pyx_Generator_Next(yf); - } else - #endif - #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) - // _PyGen_Send() is not exported before Py3.6 - if (PyGen_CheckExact(yf)) { - ret = __Pyx_PyGen_Send((PyGenObject*)yf, NULL); - } else - #endif - #ifdef __Pyx_Coroutine_USED - if (__Pyx_Coroutine_Check(yf)) { - ret = __Pyx_Coroutine_Send(yf, Py_None); - } else - #endif - ret = Py_TYPE(yf)->tp_iternext(yf); - gen->is_running = 0; - //Py_DECREF(yf); - if (likely(ret)) { - return ret; - } - return __Pyx_Coroutine_FinishDelegation(gen); - } - return __Pyx_Coroutine_SendEx(gen, Py_None, 0); -} - -static PyObject *__Pyx_Coroutine_Close_Method(PyObject *self, CYTHON_UNUSED PyObject *arg) { - return __Pyx_Coroutine_Close(self); -} - -static PyObject *__Pyx_Coroutine_Close(PyObject *self) { - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; - PyObject *retval, *raised_exception; - PyObject *yf = gen->yieldfrom; - int err = 0; - - if (unlikely(gen->is_running)) - return __Pyx_Coroutine_AlreadyRunningError(gen); - - if (yf) { - Py_INCREF(yf); - err = __Pyx_Coroutine_CloseIter(gen, yf); - __Pyx_Coroutine_Undelegate(gen); - Py_DECREF(yf); - } - if (err == 0) - PyErr_SetNone(PyExc_GeneratorExit); - retval = __Pyx_Coroutine_SendEx(gen, NULL, 1); - if (unlikely(retval)) { - const char *msg; - Py_DECREF(retval); - if ((0)) { - #ifdef __Pyx_Coroutine_USED - } else if (__Pyx_Coroutine_Check(self)) { - msg = "coroutine ignored GeneratorExit"; - #endif - #ifdef __Pyx_AsyncGen_USED - } else if (__Pyx_AsyncGen_CheckExact(self)) { -#if PY_VERSION_HEX < 0x03060000 - msg = "async generator ignored GeneratorExit - might require Python 3.6+ finalisation (PEP 525)"; -#else - msg = "async generator ignored GeneratorExit"; -#endif - #endif - } else { - msg = "generator ignored GeneratorExit"; - } - PyErr_SetString(PyExc_RuntimeError, msg); - return NULL; - } - raised_exception = PyErr_Occurred(); - if (likely(!raised_exception || __Pyx_PyErr_GivenExceptionMatches2(raised_exception, PyExc_GeneratorExit, PyExc_StopIteration))) { - // ignore these errors - if (raised_exception) PyErr_Clear(); - Py_INCREF(Py_None); - return Py_None; - } - return NULL; -} - -static PyObject *__Pyx__Coroutine_Throw(PyObject *self, PyObject *typ, PyObject *val, PyObject *tb, - PyObject *args, int close_on_genexit) { - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; - PyObject *yf = gen->yieldfrom; - - if (unlikely(gen->is_running)) - return __Pyx_Coroutine_AlreadyRunningError(gen); - - if (yf) { - PyObject *ret; - Py_INCREF(yf); - if (__Pyx_PyErr_GivenExceptionMatches(typ, PyExc_GeneratorExit) && close_on_genexit) { - // Asynchronous generators *should not* be closed right away. - // We have to allow some awaits to work it through, hence the - // `close_on_genexit` parameter here. - int err = __Pyx_Coroutine_CloseIter(gen, yf); - Py_DECREF(yf); - __Pyx_Coroutine_Undelegate(gen); - if (err < 0) - return __Pyx_Coroutine_MethodReturn(self, __Pyx_Coroutine_SendEx(gen, NULL, 0)); - goto throw_here; - } - gen->is_running = 1; - if (0 - #ifdef __Pyx_Generator_USED - || __Pyx_Generator_CheckExact(yf) - #endif - #ifdef __Pyx_Coroutine_USED - || __Pyx_Coroutine_Check(yf) - #endif - ) { - ret = __Pyx__Coroutine_Throw(yf, typ, val, tb, args, close_on_genexit); - #ifdef __Pyx_Coroutine_USED - } else if (__Pyx_CoroutineAwait_CheckExact(yf)) { - ret = __Pyx__Coroutine_Throw(((__pyx_CoroutineAwaitObject*)yf)->coroutine, typ, val, tb, args, close_on_genexit); - #endif - } else { - PyObject *meth = __Pyx_PyObject_GetAttrStr(yf, PYIDENT("throw")); - if (unlikely(!meth)) { - Py_DECREF(yf); - if (!PyErr_ExceptionMatches(PyExc_AttributeError)) { - gen->is_running = 0; - return NULL; - } - PyErr_Clear(); - __Pyx_Coroutine_Undelegate(gen); - gen->is_running = 0; - goto throw_here; - } - if (likely(args)) { - ret = PyObject_CallObject(meth, args); - } else { - // "tb" or even "val" might be NULL, but that also correctly terminates the argument list - ret = PyObject_CallFunctionObjArgs(meth, typ, val, tb, NULL); - } - Py_DECREF(meth); - } - gen->is_running = 0; - Py_DECREF(yf); - if (!ret) { - ret = __Pyx_Coroutine_FinishDelegation(gen); - } - return __Pyx_Coroutine_MethodReturn(self, ret); - } -throw_here: - __Pyx_Raise(typ, val, tb, NULL); - return __Pyx_Coroutine_MethodReturn(self, __Pyx_Coroutine_SendEx(gen, NULL, 0)); -} - -static PyObject *__Pyx_Coroutine_Throw(PyObject *self, PyObject *args) { - PyObject *typ; - PyObject *val = NULL; - PyObject *tb = NULL; - - if (!PyArg_UnpackTuple(args, (char *)"throw", 1, 3, &typ, &val, &tb)) - return NULL; - - return __Pyx__Coroutine_Throw(self, typ, val, tb, args, 1); -} - -static CYTHON_INLINE int __Pyx_Coroutine_traverse_excstate(__Pyx_ExcInfoStruct *exc_state, visitproc visit, void *arg) { - Py_VISIT(exc_state->exc_type); - Py_VISIT(exc_state->exc_value); - Py_VISIT(exc_state->exc_traceback); - return 0; -} - -static int __Pyx_Coroutine_traverse(__pyx_CoroutineObject *gen, visitproc visit, void *arg) { - Py_VISIT(gen->closure); - Py_VISIT(gen->classobj); - Py_VISIT(gen->yieldfrom); - return __Pyx_Coroutine_traverse_excstate(&gen->gi_exc_state, visit, arg); -} - -static int __Pyx_Coroutine_clear(PyObject *self) { - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; - - Py_CLEAR(gen->closure); - Py_CLEAR(gen->classobj); - Py_CLEAR(gen->yieldfrom); - __Pyx_Coroutine_ExceptionClear(&gen->gi_exc_state); -#ifdef __Pyx_AsyncGen_USED - if (__Pyx_AsyncGen_CheckExact(self)) { - Py_CLEAR(((__pyx_PyAsyncGenObject*)gen)->ag_finalizer); - } -#endif - Py_CLEAR(gen->gi_code); - Py_CLEAR(gen->gi_frame); - Py_CLEAR(gen->gi_name); - Py_CLEAR(gen->gi_qualname); - Py_CLEAR(gen->gi_modulename); - return 0; -} - -static void __Pyx_Coroutine_dealloc(PyObject *self) { - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; - - PyObject_GC_UnTrack(gen); - if (gen->gi_weakreflist != NULL) - PyObject_ClearWeakRefs(self); - - if (gen->resume_label >= 0) { - // Generator is paused or unstarted, so we need to close - PyObject_GC_Track(self); -#if PY_VERSION_HEX >= 0x030400a1 && CYTHON_USE_TP_FINALIZE - if (PyObject_CallFinalizerFromDealloc(self)) -#else - Py_TYPE(gen)->tp_del(self); - if (Py_REFCNT(self) > 0) -#endif - { - // resurrected. :( - return; - } - PyObject_GC_UnTrack(self); - } - -#ifdef __Pyx_AsyncGen_USED - if (__Pyx_AsyncGen_CheckExact(self)) { - /* We have to handle this case for asynchronous generators - right here, because this code has to be between UNTRACK - and GC_Del. */ - Py_CLEAR(((__pyx_PyAsyncGenObject*)self)->ag_finalizer); - } -#endif - __Pyx_Coroutine_clear(self); - PyObject_GC_Del(gen); -} - -static void __Pyx_Coroutine_del(PyObject *self) { - PyObject *error_type, *error_value, *error_traceback; - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; - __Pyx_PyThreadState_declare - - if (gen->resume_label < 0) { - // already terminated => nothing to clean up - return; - } - -#if !CYTHON_USE_TP_FINALIZE - // Temporarily resurrect the object. - assert(self->ob_refcnt == 0); - __Pyx_SET_REFCNT(self, 1); -#endif - - __Pyx_PyThreadState_assign - - // Save the current exception, if any. - __Pyx_ErrFetch(&error_type, &error_value, &error_traceback); - -#ifdef __Pyx_AsyncGen_USED - if (__Pyx_AsyncGen_CheckExact(self)) { - __pyx_PyAsyncGenObject *agen = (__pyx_PyAsyncGenObject*)self; - PyObject *finalizer = agen->ag_finalizer; - if (finalizer && !agen->ag_closed) { - PyObject *res = __Pyx_PyObject_CallOneArg(finalizer, self); - if (unlikely(!res)) { - PyErr_WriteUnraisable(self); - } else { - Py_DECREF(res); - } - // Restore the saved exception. - __Pyx_ErrRestore(error_type, error_value, error_traceback); - return; - } - } -#endif - - if (unlikely(gen->resume_label == 0 && !error_value)) { -#ifdef __Pyx_Coroutine_USED -#ifdef __Pyx_Generator_USED - // only warn about (async) coroutines - if (!__Pyx_Generator_CheckExact(self)) -#endif - { - // untrack dead object as we are executing Python code (which might trigger GC) - PyObject_GC_UnTrack(self); -#if PY_MAJOR_VERSION >= 3 /* PY_VERSION_HEX >= 0x03030000*/ || defined(PyErr_WarnFormat) - if (unlikely(PyErr_WarnFormat(PyExc_RuntimeWarning, 1, "coroutine '%.50S' was never awaited", gen->gi_qualname) < 0)) - PyErr_WriteUnraisable(self); -#else - {PyObject *msg; - char *cmsg; - #if CYTHON_COMPILING_IN_PYPY - msg = NULL; - cmsg = (char*) "coroutine was never awaited"; - #else - char *cname; - PyObject *qualname; - qualname = gen->gi_qualname; - cname = PyString_AS_STRING(qualname); - msg = PyString_FromFormat("coroutine '%.50s' was never awaited", cname); - - if (unlikely(!msg)) { - PyErr_Clear(); - cmsg = (char*) "coroutine was never awaited"; - } else { - cmsg = PyString_AS_STRING(msg); - } - #endif - if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, cmsg, 1) < 0)) - PyErr_WriteUnraisable(self); - Py_XDECREF(msg);} -#endif - PyObject_GC_Track(self); - } -#endif /*__Pyx_Coroutine_USED*/ - } else { - PyObject *res = __Pyx_Coroutine_Close(self); - if (unlikely(!res)) { - if (PyErr_Occurred()) - PyErr_WriteUnraisable(self); - } else { - Py_DECREF(res); - } - } - - // Restore the saved exception. - __Pyx_ErrRestore(error_type, error_value, error_traceback); - -#if !CYTHON_USE_TP_FINALIZE - // Undo the temporary resurrection; can't use DECREF here, it would - // cause a recursive call. - assert(Py_REFCNT(self) > 0); - if (--self->ob_refcnt == 0) { - // this is the normal path out - return; - } - - // close() resurrected it! Make it look like the original Py_DECREF - // never happened. - { - Py_ssize_t refcnt = Py_REFCNT(self); - _Py_NewReference(self); - __Pyx_SET_REFCNT(self, refcnt); - } -#if CYTHON_COMPILING_IN_CPYTHON - assert(PyType_IS_GC(Py_TYPE(self)) && - _Py_AS_GC(self)->gc.gc_refs != _PyGC_REFS_UNTRACKED); - - // If Py_REF_DEBUG, _Py_NewReference bumped _Py_RefTotal, so - // we need to undo that. - _Py_DEC_REFTOTAL; -#endif - // If Py_TRACE_REFS, _Py_NewReference re-added self to the object - // chain, so no more to do there. - // If COUNT_ALLOCS, the original decref bumped tp_frees, and - // _Py_NewReference bumped tp_allocs: both of those need to be - // undone. -#ifdef COUNT_ALLOCS - --Py_TYPE(self)->tp_frees; - --Py_TYPE(self)->tp_allocs; -#endif -#endif -} - -static PyObject * -__Pyx_Coroutine_get_name(__pyx_CoroutineObject *self, CYTHON_UNUSED void *context) -{ - PyObject *name = self->gi_name; - // avoid NULL pointer dereference during garbage collection - if (unlikely(!name)) name = Py_None; - Py_INCREF(name); - return name; -} - -static int -__Pyx_Coroutine_set_name(__pyx_CoroutineObject *self, PyObject *value, CYTHON_UNUSED void *context) -{ - PyObject *tmp; - -#if PY_MAJOR_VERSION >= 3 - if (unlikely(value == NULL || !PyUnicode_Check(value))) -#else - if (unlikely(value == NULL || !PyString_Check(value))) -#endif - { - PyErr_SetString(PyExc_TypeError, - "__name__ must be set to a string object"); - return -1; - } - tmp = self->gi_name; - Py_INCREF(value); - self->gi_name = value; - Py_XDECREF(tmp); - return 0; -} - -static PyObject * -__Pyx_Coroutine_get_qualname(__pyx_CoroutineObject *self, CYTHON_UNUSED void *context) -{ - PyObject *name = self->gi_qualname; - // avoid NULL pointer dereference during garbage collection - if (unlikely(!name)) name = Py_None; - Py_INCREF(name); - return name; -} - -static int -__Pyx_Coroutine_set_qualname(__pyx_CoroutineObject *self, PyObject *value, CYTHON_UNUSED void *context) -{ - PyObject *tmp; - -#if PY_MAJOR_VERSION >= 3 - if (unlikely(value == NULL || !PyUnicode_Check(value))) -#else - if (unlikely(value == NULL || !PyString_Check(value))) -#endif - { - PyErr_SetString(PyExc_TypeError, - "__qualname__ must be set to a string object"); - return -1; - } - tmp = self->gi_qualname; - Py_INCREF(value); - self->gi_qualname = value; - Py_XDECREF(tmp); - return 0; -} - - -static PyObject * -__Pyx_Coroutine_get_frame(__pyx_CoroutineObject *self, CYTHON_UNUSED void *context) -{ - PyObject *frame = self->gi_frame; - if (!frame) { - if (unlikely(!self->gi_code)) { - // Avoid doing something stupid, e.g. during garbage collection. - Py_RETURN_NONE; - } - frame = (PyObject *) PyFrame_New( - PyThreadState_Get(), /*PyThreadState *tstate,*/ - (PyCodeObject*) self->gi_code, /*PyCodeObject *code,*/ - $moddict_cname, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (unlikely(!frame)) - return NULL; - // keep the frame cached once it's created - self->gi_frame = frame; - } - Py_INCREF(frame); - return frame; -} - -static __pyx_CoroutineObject *__Pyx__Coroutine_New( - PyTypeObject* type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, - PyObject *name, PyObject *qualname, PyObject *module_name) { - __pyx_CoroutineObject *gen = PyObject_GC_New(__pyx_CoroutineObject, type); - if (unlikely(!gen)) - return NULL; - return __Pyx__Coroutine_NewInit(gen, body, code, closure, name, qualname, module_name); -} - -static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit( - __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, - PyObject *name, PyObject *qualname, PyObject *module_name) { - gen->body = body; - gen->closure = closure; - Py_XINCREF(closure); - gen->is_running = 0; - gen->resume_label = 0; - gen->classobj = NULL; - gen->yieldfrom = NULL; - gen->gi_exc_state.exc_type = NULL; - gen->gi_exc_state.exc_value = NULL; - gen->gi_exc_state.exc_traceback = NULL; -#if CYTHON_USE_EXC_INFO_STACK - gen->gi_exc_state.previous_item = NULL; -#endif - gen->gi_weakreflist = NULL; - Py_XINCREF(qualname); - gen->gi_qualname = qualname; - Py_XINCREF(name); - gen->gi_name = name; - Py_XINCREF(module_name); - gen->gi_modulename = module_name; - Py_XINCREF(code); - gen->gi_code = code; - gen->gi_frame = NULL; - - PyObject_GC_Track(gen); - return gen; -} - - -//////////////////// Coroutine //////////////////// -//@requires: CoroutineBase -//@requires: PatchGeneratorABC -//@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict - -static void __Pyx_CoroutineAwait_dealloc(PyObject *self) { - PyObject_GC_UnTrack(self); - Py_CLEAR(((__pyx_CoroutineAwaitObject*)self)->coroutine); - PyObject_GC_Del(self); -} - -static int __Pyx_CoroutineAwait_traverse(__pyx_CoroutineAwaitObject *self, visitproc visit, void *arg) { - Py_VISIT(self->coroutine); - return 0; -} - -static int __Pyx_CoroutineAwait_clear(__pyx_CoroutineAwaitObject *self) { - Py_CLEAR(self->coroutine); - return 0; -} - -static PyObject *__Pyx_CoroutineAwait_Next(__pyx_CoroutineAwaitObject *self) { - return __Pyx_Generator_Next(self->coroutine); -} - -static PyObject *__Pyx_CoroutineAwait_Send(__pyx_CoroutineAwaitObject *self, PyObject *value) { - return __Pyx_Coroutine_Send(self->coroutine, value); -} - -static PyObject *__Pyx_CoroutineAwait_Throw(__pyx_CoroutineAwaitObject *self, PyObject *args) { - return __Pyx_Coroutine_Throw(self->coroutine, args); -} - -static PyObject *__Pyx_CoroutineAwait_Close(__pyx_CoroutineAwaitObject *self, CYTHON_UNUSED PyObject *arg) { - return __Pyx_Coroutine_Close(self->coroutine); -} - -static PyObject *__Pyx_CoroutineAwait_self(PyObject *self) { - Py_INCREF(self); - return self; -} - -#if !CYTHON_COMPILING_IN_PYPY -static PyObject *__Pyx_CoroutineAwait_no_new(CYTHON_UNUSED PyTypeObject *type, CYTHON_UNUSED PyObject *args, CYTHON_UNUSED PyObject *kwargs) { - PyErr_SetString(PyExc_TypeError, "cannot instantiate type, use 'await coroutine' instead"); - return NULL; -} -#endif - -static PyMethodDef __pyx_CoroutineAwait_methods[] = { - {"send", (PyCFunction) __Pyx_CoroutineAwait_Send, METH_O, - (char*) PyDoc_STR("send(arg) -> send 'arg' into coroutine,\nreturn next yielded value or raise StopIteration.")}, - {"throw", (PyCFunction) __Pyx_CoroutineAwait_Throw, METH_VARARGS, - (char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in coroutine,\nreturn next yielded value or raise StopIteration.")}, - {"close", (PyCFunction) __Pyx_CoroutineAwait_Close, METH_NOARGS, - (char*) PyDoc_STR("close() -> raise GeneratorExit inside coroutine.")}, - {0, 0, 0, 0} -}; - -static PyTypeObject __pyx_CoroutineAwaitType_type = { - PyVarObject_HEAD_INIT(0, 0) - "coroutine_wrapper", /*tp_name*/ - sizeof(__pyx_CoroutineAwaitObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - (destructor) __Pyx_CoroutineAwait_dealloc,/*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - 0, /*tp_as_async resp. tp_compare*/ - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - PyDoc_STR("A wrapper object implementing __await__ for coroutines."), /*tp_doc*/ - (traverseproc) __Pyx_CoroutineAwait_traverse, /*tp_traverse*/ - (inquiry) __Pyx_CoroutineAwait_clear, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - __Pyx_CoroutineAwait_self, /*tp_iter*/ - (iternextfunc) __Pyx_CoroutineAwait_Next, /*tp_iternext*/ - __pyx_CoroutineAwait_methods, /*tp_methods*/ - 0 , /*tp_members*/ - 0 , /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ -#if !CYTHON_COMPILING_IN_PYPY - __Pyx_CoroutineAwait_no_new, /*tp_new*/ -#else - 0, /*tp_new*/ -#endif - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ -#if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ -#endif -#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ -#endif -#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ -#endif -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, /*tp_pypy_flags*/ -#endif -}; - -#if PY_VERSION_HEX < 0x030500B1 || defined(__Pyx_IterableCoroutine_USED) || CYTHON_USE_ASYNC_SLOTS -static CYTHON_INLINE PyObject *__Pyx__Coroutine_await(PyObject *coroutine) { - __pyx_CoroutineAwaitObject *await = PyObject_GC_New(__pyx_CoroutineAwaitObject, __pyx_CoroutineAwaitType); - if (unlikely(!await)) return NULL; - Py_INCREF(coroutine); - await->coroutine = coroutine; - PyObject_GC_Track(await); - return (PyObject*)await; -} -#endif - -#if PY_VERSION_HEX < 0x030500B1 -static PyObject *__Pyx_Coroutine_await_method(PyObject *coroutine, CYTHON_UNUSED PyObject *arg) { - return __Pyx__Coroutine_await(coroutine); -} -#endif - -#if defined(__Pyx_IterableCoroutine_USED) || CYTHON_USE_ASYNC_SLOTS -static PyObject *__Pyx_Coroutine_await(PyObject *coroutine) { - if (unlikely(!coroutine || !__Pyx_Coroutine_Check(coroutine))) { - PyErr_SetString(PyExc_TypeError, "invalid input, expected coroutine"); - return NULL; - } - return __Pyx__Coroutine_await(coroutine); -} -#endif - -#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1 -static PyObject *__Pyx_Coroutine_compare(PyObject *obj, PyObject *other, int op) { - PyObject* result; - switch (op) { - case Py_EQ: result = (other == obj) ? Py_True : Py_False; break; - case Py_NE: result = (other != obj) ? Py_True : Py_False; break; - default: - result = Py_NotImplemented; - } - Py_INCREF(result); - return result; -} -#endif - -static PyMethodDef __pyx_Coroutine_methods[] = { - {"send", (PyCFunction) __Pyx_Coroutine_Send, METH_O, - (char*) PyDoc_STR("send(arg) -> send 'arg' into coroutine,\nreturn next iterated value or raise StopIteration.")}, - {"throw", (PyCFunction) __Pyx_Coroutine_Throw, METH_VARARGS, - (char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in coroutine,\nreturn next iterated value or raise StopIteration.")}, - {"close", (PyCFunction) __Pyx_Coroutine_Close_Method, METH_NOARGS, - (char*) PyDoc_STR("close() -> raise GeneratorExit inside coroutine.")}, -#if PY_VERSION_HEX < 0x030500B1 - {"__await__", (PyCFunction) __Pyx_Coroutine_await_method, METH_NOARGS, - (char*) PyDoc_STR("__await__() -> return an iterator to be used in await expression.")}, -#endif - {0, 0, 0, 0} -}; - -static PyMemberDef __pyx_Coroutine_memberlist[] = { - {(char *) "cr_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL}, - {(char*) "cr_await", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY, - (char*) PyDoc_STR("object being awaited, or None")}, - {(char*) "cr_code", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_code), READONLY, NULL}, - {(char *) "__module__", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_modulename), PY_WRITE_RESTRICTED, 0}, - {0, 0, 0, 0, 0} -}; - -static PyGetSetDef __pyx_Coroutine_getsets[] = { - {(char *) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name, - (char*) PyDoc_STR("name of the coroutine"), 0}, - {(char *) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname, - (char*) PyDoc_STR("qualified name of the coroutine"), 0}, - {(char *) "cr_frame", (getter)__Pyx_Coroutine_get_frame, NULL, - (char*) PyDoc_STR("Frame of the coroutine"), 0}, - {0, 0, 0, 0, 0} -}; - -#if CYTHON_USE_ASYNC_SLOTS -static __Pyx_PyAsyncMethodsStruct __pyx_Coroutine_as_async = { - __Pyx_Coroutine_await, /*am_await*/ - 0, /*am_aiter*/ - 0, /*am_anext*/ -#if PY_VERSION_HEX >= 0x030A00A3 - 0, /*am_send*/ -#endif -}; -#endif - -static PyTypeObject __pyx_CoroutineType_type = { - PyVarObject_HEAD_INIT(0, 0) - "coroutine", /*tp_name*/ - sizeof(__pyx_CoroutineObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - (destructor) __Pyx_Coroutine_dealloc,/*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ -#if CYTHON_USE_ASYNC_SLOTS - &__pyx_Coroutine_as_async, /*tp_as_async (tp_reserved) - Py3 only! */ -#else - 0, /*tp_reserved*/ -#endif - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/ - 0, /*tp_doc*/ - (traverseproc) __Pyx_Coroutine_traverse, /*tp_traverse*/ - 0, /*tp_clear*/ -#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1 - // in order to (mis-)use tp_reserved above, we must also implement tp_richcompare - __Pyx_Coroutine_compare, /*tp_richcompare*/ -#else - 0, /*tp_richcompare*/ -#endif - offsetof(__pyx_CoroutineObject, gi_weakreflist), /*tp_weaklistoffset*/ - // no tp_iter() as iterator is only available through __await__() - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_Coroutine_methods, /*tp_methods*/ - __pyx_Coroutine_memberlist, /*tp_members*/ - __pyx_Coroutine_getsets, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - 0, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ -#if CYTHON_USE_TP_FINALIZE - 0, /*tp_del*/ -#else - __Pyx_Coroutine_del, /*tp_del*/ -#endif - 0, /*tp_version_tag*/ -#if CYTHON_USE_TP_FINALIZE - __Pyx_Coroutine_del, /*tp_finalize*/ -#elif PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ -#endif -#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ -#endif -#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ -#endif -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, /*tp_pypy_flags*/ -#endif -}; - -static int __pyx_Coroutine_init(void) { - // on Windows, C-API functions can't be used in slots statically - __pyx_CoroutineType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; - __pyx_CoroutineType = __Pyx_FetchCommonType(&__pyx_CoroutineType_type); - if (unlikely(!__pyx_CoroutineType)) - return -1; - -#ifdef __Pyx_IterableCoroutine_USED - if (unlikely(__pyx_IterableCoroutine_init() == -1)) - return -1; -#endif - - __pyx_CoroutineAwaitType = __Pyx_FetchCommonType(&__pyx_CoroutineAwaitType_type); - if (unlikely(!__pyx_CoroutineAwaitType)) - return -1; - return 0; -} - - -//////////////////// IterableCoroutine.proto //////////////////// - -#define __Pyx_IterableCoroutine_USED - -static PyTypeObject *__pyx_IterableCoroutineType = 0; - -#undef __Pyx_Coroutine_Check -#define __Pyx_Coroutine_Check(obj) (__Pyx_Coroutine_CheckExact(obj) || (Py_TYPE(obj) == __pyx_IterableCoroutineType)) - -#define __Pyx_IterableCoroutine_New(body, code, closure, name, qualname, module_name) \ - __Pyx__Coroutine_New(__pyx_IterableCoroutineType, body, code, closure, name, qualname, module_name) - -static int __pyx_IterableCoroutine_init(void);/*proto*/ - - -//////////////////// IterableCoroutine //////////////////// -//@requires: Coroutine -//@requires: CommonStructures.c::FetchCommonType - -static PyTypeObject __pyx_IterableCoroutineType_type = { - PyVarObject_HEAD_INIT(0, 0) - "iterable_coroutine", /*tp_name*/ - sizeof(__pyx_CoroutineObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - (destructor) __Pyx_Coroutine_dealloc,/*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ -#if CYTHON_USE_ASYNC_SLOTS - &__pyx_Coroutine_as_async, /*tp_as_async (tp_reserved) - Py3 only! */ -#else - 0, /*tp_reserved*/ -#endif - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/ - 0, /*tp_doc*/ - (traverseproc) __Pyx_Coroutine_traverse, /*tp_traverse*/ - 0, /*tp_clear*/ -#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1 - // in order to (mis-)use tp_reserved above, we must also implement tp_richcompare - __Pyx_Coroutine_compare, /*tp_richcompare*/ -#else - 0, /*tp_richcompare*/ -#endif - offsetof(__pyx_CoroutineObject, gi_weakreflist), /*tp_weaklistoffset*/ - // enable iteration for legacy support of asyncio yield-from protocol - __Pyx_Coroutine_await, /*tp_iter*/ - (iternextfunc) __Pyx_Generator_Next, /*tp_iternext*/ - __pyx_Coroutine_methods, /*tp_methods*/ - __pyx_Coroutine_memberlist, /*tp_members*/ - __pyx_Coroutine_getsets, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - 0, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ -#if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_del*/ -#else - __Pyx_Coroutine_del, /*tp_del*/ -#endif - 0, /*tp_version_tag*/ -#if PY_VERSION_HEX >= 0x030400a1 - __Pyx_Coroutine_del, /*tp_finalize*/ -#endif -#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ -#endif -#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ -#endif -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, /*tp_pypy_flags*/ -#endif -}; - - -static int __pyx_IterableCoroutine_init(void) { - __pyx_IterableCoroutineType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; - __pyx_IterableCoroutineType = __Pyx_FetchCommonType(&__pyx_IterableCoroutineType_type); - if (unlikely(!__pyx_IterableCoroutineType)) - return -1; - return 0; -} - - -//////////////////// Generator //////////////////// -//@requires: CoroutineBase -//@requires: PatchGeneratorABC -//@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict - -static PyMethodDef __pyx_Generator_methods[] = { - {"send", (PyCFunction) __Pyx_Coroutine_Send, METH_O, - (char*) PyDoc_STR("send(arg) -> send 'arg' into generator,\nreturn next yielded value or raise StopIteration.")}, - {"throw", (PyCFunction) __Pyx_Coroutine_Throw, METH_VARARGS, - (char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in generator,\nreturn next yielded value or raise StopIteration.")}, - {"close", (PyCFunction) __Pyx_Coroutine_Close_Method, METH_NOARGS, - (char*) PyDoc_STR("close() -> raise GeneratorExit inside generator.")}, - {0, 0, 0, 0} -}; - -static PyMemberDef __pyx_Generator_memberlist[] = { - {(char *) "gi_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL}, - {(char*) "gi_yieldfrom", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY, - (char*) PyDoc_STR("object being iterated by 'yield from', or None")}, - {(char*) "gi_code", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_code), READONLY, NULL}, - {0, 0, 0, 0, 0} -}; - -static PyGetSetDef __pyx_Generator_getsets[] = { - {(char *) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name, - (char*) PyDoc_STR("name of the generator"), 0}, - {(char *) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname, - (char*) PyDoc_STR("qualified name of the generator"), 0}, - {(char *) "gi_frame", (getter)__Pyx_Coroutine_get_frame, NULL, - (char*) PyDoc_STR("Frame of the generator"), 0}, - {0, 0, 0, 0, 0} -}; - -static PyTypeObject __pyx_GeneratorType_type = { - PyVarObject_HEAD_INIT(0, 0) - "generator", /*tp_name*/ - sizeof(__pyx_CoroutineObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - (destructor) __Pyx_Coroutine_dealloc,/*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - 0, /*tp_compare / tp_as_async*/ - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/ - 0, /*tp_doc*/ - (traverseproc) __Pyx_Coroutine_traverse, /*tp_traverse*/ - 0, /*tp_clear*/ - 0, /*tp_richcompare*/ - offsetof(__pyx_CoroutineObject, gi_weakreflist), /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - (iternextfunc) __Pyx_Generator_Next, /*tp_iternext*/ - __pyx_Generator_methods, /*tp_methods*/ - __pyx_Generator_memberlist, /*tp_members*/ - __pyx_Generator_getsets, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - 0, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ -#if CYTHON_USE_TP_FINALIZE - 0, /*tp_del*/ -#else - __Pyx_Coroutine_del, /*tp_del*/ -#endif - 0, /*tp_version_tag*/ -#if CYTHON_USE_TP_FINALIZE - __Pyx_Coroutine_del, /*tp_finalize*/ -#elif PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ -#endif -#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ -#endif -#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ -#endif -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, /*tp_pypy_flags*/ -#endif -}; - -static int __pyx_Generator_init(void) { - // on Windows, C-API functions can't be used in slots statically - __pyx_GeneratorType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; - __pyx_GeneratorType_type.tp_iter = PyObject_SelfIter; - - __pyx_GeneratorType = __Pyx_FetchCommonType(&__pyx_GeneratorType_type); - if (unlikely(!__pyx_GeneratorType)) { - return -1; - } - return 0; -} - - -/////////////// ReturnWithStopIteration.proto /////////////// - -#define __Pyx_ReturnWithStopIteration(value) \ - if (value == Py_None) PyErr_SetNone(PyExc_StopIteration); else __Pyx__ReturnWithStopIteration(value) -static void __Pyx__ReturnWithStopIteration(PyObject* value); /*proto*/ - -/////////////// ReturnWithStopIteration /////////////// -//@requires: Exceptions.c::PyErrFetchRestore -//@requires: Exceptions.c::PyThreadStateGet -//@substitute: naming - -// 1) Instantiating an exception just to pass back a value is costly. -// 2) CPython 3.3 <= x < 3.5b1 crash in yield-from when the StopIteration is not instantiated. -// 3) Passing a tuple as value into PyErr_SetObject() passes its items on as arguments. -// 4) Passing an exception as value will interpret it as an exception on unpacking and raise it (or unpack its value). -// 5) If there is currently an exception being handled, we need to chain it. - -static void __Pyx__ReturnWithStopIteration(PyObject* value) { - PyObject *exc, *args; -#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_PYSTON - __Pyx_PyThreadState_declare - if ((PY_VERSION_HEX >= 0x03030000 && PY_VERSION_HEX < 0x030500B1) - || unlikely(PyTuple_Check(value) || PyExceptionInstance_Check(value))) { - args = PyTuple_New(1); - if (unlikely(!args)) return; - Py_INCREF(value); - PyTuple_SET_ITEM(args, 0, value); - exc = PyType_Type.tp_call(PyExc_StopIteration, args, NULL); - Py_DECREF(args); - if (!exc) return; - } else { - // it's safe to avoid instantiating the exception - Py_INCREF(value); - exc = value; - } - #if CYTHON_FAST_THREAD_STATE - __Pyx_PyThreadState_assign - #if CYTHON_USE_EXC_INFO_STACK - if (!$local_tstate_cname->exc_info->exc_type) - #else - if (!$local_tstate_cname->exc_type) - #endif - { - // no chaining needed => avoid the overhead in PyErr_SetObject() - Py_INCREF(PyExc_StopIteration); - __Pyx_ErrRestore(PyExc_StopIteration, exc, NULL); - return; - } - #endif -#else - args = PyTuple_Pack(1, value); - if (unlikely(!args)) return; - exc = PyObject_Call(PyExc_StopIteration, args, NULL); - Py_DECREF(args); - if (unlikely(!exc)) return; -#endif - PyErr_SetObject(PyExc_StopIteration, exc); - Py_DECREF(exc); -} - - -//////////////////// PatchModuleWithCoroutine.proto //////////////////// - -static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code); /*proto*/ - -//////////////////// PatchModuleWithCoroutine //////////////////// -//@substitute: naming - -static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code) { -#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) - int result; - PyObject *globals, *result_obj; - globals = PyDict_New(); if (unlikely(!globals)) goto ignore; - result = PyDict_SetItemString(globals, "_cython_coroutine_type", - #ifdef __Pyx_Coroutine_USED - (PyObject*)__pyx_CoroutineType); - #else - Py_None); - #endif - if (unlikely(result < 0)) goto ignore; - result = PyDict_SetItemString(globals, "_cython_generator_type", - #ifdef __Pyx_Generator_USED - (PyObject*)__pyx_GeneratorType); - #else - Py_None); - #endif - if (unlikely(result < 0)) goto ignore; - if (unlikely(PyDict_SetItemString(globals, "_module", module) < 0)) goto ignore; - if (unlikely(PyDict_SetItemString(globals, "__builtins__", $builtins_cname) < 0)) goto ignore; - result_obj = PyRun_String(py_code, Py_file_input, globals, globals); - if (unlikely(!result_obj)) goto ignore; - Py_DECREF(result_obj); - Py_DECREF(globals); - return module; - -ignore: - Py_XDECREF(globals); - PyErr_WriteUnraisable(module); - if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, "Cython module failed to patch module with custom type", 1) < 0)) { - Py_DECREF(module); - module = NULL; - } -#else - // avoid "unused" warning - py_code++; -#endif - return module; -} - - -//////////////////// PatchGeneratorABC.proto //////////////////// - -// register with Generator/Coroutine ABCs in 'collections.abc' -// see https://bugs.python.org/issue24018 -static int __Pyx_patch_abc(void); /*proto*/ - -//////////////////// PatchGeneratorABC //////////////////// -//@requires: PatchModuleWithCoroutine - -#ifndef CYTHON_REGISTER_ABCS -#define CYTHON_REGISTER_ABCS 1 -#endif - -#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) -static PyObject* __Pyx_patch_abc_module(PyObject *module); /*proto*/ -static PyObject* __Pyx_patch_abc_module(PyObject *module) { - module = __Pyx_Coroutine_patch_module( - module, CSTRING("""\ -if _cython_generator_type is not None: - try: Generator = _module.Generator - except AttributeError: pass - else: Generator.register(_cython_generator_type) -if _cython_coroutine_type is not None: - try: Coroutine = _module.Coroutine - except AttributeError: pass - else: Coroutine.register(_cython_coroutine_type) -""") - ); - return module; -} -#endif - -static int __Pyx_patch_abc(void) { -#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) - static int abc_patched = 0; - if (CYTHON_REGISTER_ABCS && !abc_patched) { - PyObject *module; - module = PyImport_ImportModule((PY_MAJOR_VERSION >= 3) ? "collections.abc" : "collections"); - if (!module) { - PyErr_WriteUnraisable(NULL); - if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, - ((PY_MAJOR_VERSION >= 3) ? - "Cython module failed to register with collections.abc module" : - "Cython module failed to register with collections module"), 1) < 0)) { - return -1; - } - } else { - module = __Pyx_patch_abc_module(module); - abc_patched = 1; - if (unlikely(!module)) - return -1; - Py_DECREF(module); - } - // also register with "backports_abc" module if available, just in case - module = PyImport_ImportModule("backports_abc"); - if (module) { - module = __Pyx_patch_abc_module(module); - Py_XDECREF(module); - } - if (!module) { - PyErr_Clear(); - } - } -#else - // avoid "unused" warning for __Pyx_Coroutine_patch_module() - if ((0)) __Pyx_Coroutine_patch_module(NULL, NULL); -#endif - return 0; -} - - -//////////////////// PatchAsyncIO.proto //////////////////// - -// run after importing "asyncio" to patch Cython generator support into it -static PyObject* __Pyx_patch_asyncio(PyObject* module); /*proto*/ - -//////////////////// PatchAsyncIO //////////////////// -//@requires: ImportExport.c::Import -//@requires: PatchModuleWithCoroutine -//@requires: PatchInspect - -static PyObject* __Pyx_patch_asyncio(PyObject* module) { -#if PY_VERSION_HEX < 0x030500B2 && \ - (defined(__Pyx_Coroutine_USED) || defined(__Pyx_Generator_USED)) && \ - (!defined(CYTHON_PATCH_ASYNCIO) || CYTHON_PATCH_ASYNCIO) - PyObject *patch_module = NULL; - static int asyncio_patched = 0; - if (unlikely((!asyncio_patched) && module)) { - PyObject *package; - package = __Pyx_Import(PYIDENT("asyncio.coroutines"), NULL, 0); - if (package) { - patch_module = __Pyx_Coroutine_patch_module( - PyObject_GetAttrString(package, "coroutines"), CSTRING("""\ -try: - coro_types = _module._COROUTINE_TYPES -except AttributeError: pass -else: - if _cython_coroutine_type is not None and _cython_coroutine_type not in coro_types: - coro_types = tuple(coro_types) + (_cython_coroutine_type,) - if _cython_generator_type is not None and _cython_generator_type not in coro_types: - coro_types = tuple(coro_types) + (_cython_generator_type,) -_module._COROUTINE_TYPES = coro_types -""") - ); - } else { - PyErr_Clear(); -// Always enable fallback: even if we compile against 3.4.2, we might be running on 3.4.1 at some point. -//#if PY_VERSION_HEX < 0x03040200 - // Py3.4.1 used to have asyncio.tasks instead of asyncio.coroutines - package = __Pyx_Import(PYIDENT("asyncio.tasks"), NULL, 0); - if (unlikely(!package)) goto asyncio_done; - patch_module = __Pyx_Coroutine_patch_module( - PyObject_GetAttrString(package, "tasks"), CSTRING("""\ -if hasattr(_module, 'iscoroutine'): - old_types = getattr(_module.iscoroutine, '_cython_coroutine_types', None) - if old_types is None or not isinstance(old_types, set): - old_types = set() - def cy_wrap(orig_func, type=type, cython_coroutine_types=old_types): - def cy_iscoroutine(obj): return type(obj) in cython_coroutine_types or orig_func(obj) - cy_iscoroutine._cython_coroutine_types = cython_coroutine_types - return cy_iscoroutine - _module.iscoroutine = cy_wrap(_module.iscoroutine) - if _cython_coroutine_type is not None: - old_types.add(_cython_coroutine_type) - if _cython_generator_type is not None: - old_types.add(_cython_generator_type) -""") - ); -//#endif -// Py < 0x03040200 - } - Py_DECREF(package); - if (unlikely(!patch_module)) goto ignore; -//#if PY_VERSION_HEX < 0x03040200 -asyncio_done: - PyErr_Clear(); -//#endif - asyncio_patched = 1; -#ifdef __Pyx_Generator_USED - // now patch inspect.isgenerator() by looking up the imported module in the patched asyncio module - { - PyObject *inspect_module; - if (patch_module) { - inspect_module = PyObject_GetAttr(patch_module, PYIDENT("inspect")); - Py_DECREF(patch_module); - } else { - inspect_module = __Pyx_Import(PYIDENT("inspect"), NULL, 0); - } - if (unlikely(!inspect_module)) goto ignore; - inspect_module = __Pyx_patch_inspect(inspect_module); - if (unlikely(!inspect_module)) { - Py_DECREF(module); - module = NULL; - } - Py_XDECREF(inspect_module); - } -#else - // avoid "unused" warning for __Pyx_patch_inspect() - if ((0)) return __Pyx_patch_inspect(module); -#endif - } - return module; -ignore: - PyErr_WriteUnraisable(module); - if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, "Cython module failed to patch asyncio package with custom generator type", 1) < 0)) { - Py_DECREF(module); - module = NULL; - } -#else - // avoid "unused" warning for __Pyx_Coroutine_patch_module() - if ((0)) return __Pyx_patch_inspect(__Pyx_Coroutine_patch_module(module, NULL)); -#endif - return module; -} - - -//////////////////// PatchInspect.proto //////////////////// - -// run after importing "inspect" to patch Cython generator support into it -static PyObject* __Pyx_patch_inspect(PyObject* module); /*proto*/ - -//////////////////// PatchInspect //////////////////// -//@requires: PatchModuleWithCoroutine - -static PyObject* __Pyx_patch_inspect(PyObject* module) { -#if defined(__Pyx_Generator_USED) && (!defined(CYTHON_PATCH_INSPECT) || CYTHON_PATCH_INSPECT) - static int inspect_patched = 0; - if (unlikely((!inspect_patched) && module)) { - module = __Pyx_Coroutine_patch_module( - module, CSTRING("""\ -old_types = getattr(_module.isgenerator, '_cython_generator_types', None) -if old_types is None or not isinstance(old_types, set): - old_types = set() - def cy_wrap(orig_func, type=type, cython_generator_types=old_types): - def cy_isgenerator(obj): return type(obj) in cython_generator_types or orig_func(obj) - cy_isgenerator._cython_generator_types = cython_generator_types - return cy_isgenerator - _module.isgenerator = cy_wrap(_module.isgenerator) -old_types.add(_cython_generator_type) -""") - ); - inspect_patched = 1; - } -#else - // avoid "unused" warning for __Pyx_Coroutine_patch_module() - if ((0)) return __Pyx_Coroutine_patch_module(module, NULL); -#endif - return module; -} - - -//////////////////// StopAsyncIteration.proto //////////////////// - -#define __Pyx_StopAsyncIteration_USED -static PyObject *__Pyx_PyExc_StopAsyncIteration; -static int __pyx_StopAsyncIteration_init(void); /*proto*/ - -//////////////////// StopAsyncIteration //////////////////// - -#if PY_VERSION_HEX < 0x030500B1 -static PyTypeObject __Pyx__PyExc_StopAsyncIteration_type = { - PyVarObject_HEAD_INIT(0, 0) - "StopAsyncIteration", /*tp_name*/ - sizeof(PyBaseExceptionObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - 0, /*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - 0, /*tp_compare / reserved*/ - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - PyDoc_STR("Signal the end from iterator.__anext__()."), /*tp_doc*/ - 0, /*tp_traverse*/ - 0, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - 0, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - 0, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ -#if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ -#endif -#if CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM+0 >= 0x06000000 - 0, /*tp_pypy_flags*/ -#endif -}; -#endif - -static int __pyx_StopAsyncIteration_init(void) { -#if PY_VERSION_HEX >= 0x030500B1 - __Pyx_PyExc_StopAsyncIteration = PyExc_StopAsyncIteration; -#else - PyObject *builtins = PyEval_GetBuiltins(); - if (likely(builtins)) { - PyObject *exc = PyMapping_GetItemString(builtins, (char*) "StopAsyncIteration"); - if (exc) { - __Pyx_PyExc_StopAsyncIteration = exc; - return 0; - } - } - PyErr_Clear(); - - __Pyx__PyExc_StopAsyncIteration_type.tp_traverse = ((PyTypeObject*)PyExc_BaseException)->tp_traverse; - __Pyx__PyExc_StopAsyncIteration_type.tp_clear = ((PyTypeObject*)PyExc_BaseException)->tp_clear; - __Pyx__PyExc_StopAsyncIteration_type.tp_dictoffset = ((PyTypeObject*)PyExc_BaseException)->tp_dictoffset; - __Pyx__PyExc_StopAsyncIteration_type.tp_base = (PyTypeObject*)PyExc_Exception; - - __Pyx_PyExc_StopAsyncIteration = (PyObject*) __Pyx_FetchCommonType(&__Pyx__PyExc_StopAsyncIteration_type); - if (unlikely(!__Pyx_PyExc_StopAsyncIteration)) - return -1; - if (builtins && unlikely(PyMapping_SetItemString(builtins, (char*) "StopAsyncIteration", __Pyx_PyExc_StopAsyncIteration) < 0)) - return -1; -#endif - return 0; -} diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/vega/v5/schema/__init__.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/vega/v5/schema/__init__.py deleted file mode 100644 index 3764f925d985210ebb924abe8d1447489cff3fae..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/vega/v5/schema/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# flake8: noqa -from .core import * - -SCHEMA_VERSION = 'v5.10.0' -SCHEMA_URL = 'https://vega.github.io/schema/vega/v5.10.0.json' diff --git a/spaces/arxnov/anotest/ONNXVITS_models.py b/spaces/arxnov/anotest/ONNXVITS_models.py deleted file mode 100644 index acd00238895d57ba878fd0211d5654250fb10061..0000000000000000000000000000000000000000 --- a/spaces/arxnov/anotest/ONNXVITS_models.py +++ /dev/null @@ -1,509 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import ONNXVITS_modules as modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - self.w = None - self.reverse = None - self.noise_scale = None - def forward(self, x, x_mask, g=None): - w = self.w - reverse = self.reverse - noise_scale = self.noise_scale - - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - self.emb = nn.Embedding(n_vocab, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths): - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - self.reverse = None - def forward(self, x, x_mask, g=None): - reverse = self.reverse - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask # x_in : [b, c, t] -> [b, h, t] - x = self.enc(x, x_mask, g=g) # x_in : [b, h, t], g : [b, h, 1], x = x_in + g - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask # z, m, logs : [b, h, t] - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - - if n_speakers > 0: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, sid=None, noise_scale=.667, length_scale=1, noise_scale_w=.8, max_len=None): - torch.onnx.export( - self.enc_p, - (x, x_lengths), - "ONNX_net/enc_p.onnx", - input_names=["x", "x_lengths"], - output_names=["xout", "m_p", "logs_p", "x_mask"], - dynamic_axes={ - "x" : [1], - "xout" : [2], - "m_p" : [2], - "logs_p" : [2], - "x_mask" : [2] - }, - verbose=True, - ) - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - self.dp.reverse = True - self.dp.noise_scale = noise_scale_w - torch.onnx.export( - self.dp, - (x, x_mask, g), - "ONNX_net/dp.onnx", - input_names=["x", "x_mask", "g"], - output_names=["logw"], - dynamic_axes={ - "x" : [2], - "x_mask" : [2], - "logw" : [2] - }, - verbose=True, - ) - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - - self.flow.reverse = True - torch.onnx.export( - self.flow, - (z_p, y_mask, g), - "ONNX_net/flow.onnx", - input_names=["z_p", "y_mask", "g"], - output_names=["z"], - dynamic_axes={ - "z_p" : [2], - "y_mask" : [2], - "z" : [2] - }, - verbose=True, - ) - z = self.flow(z_p, y_mask, g=g) - z_in = (z * y_mask)[:,:,:max_len] - - torch.onnx.export( - self.dec, - (z_in, g), - "ONNX_net/dec.onnx", - input_names=["z_in", "g"], - output_names=["o"], - dynamic_axes={ - "z_in" : [2], - "o" : [2] - }, - verbose=True, - ) - o = self.dec(z_in, g=g) - return o diff --git a/spaces/ashrestha/auto-multi-class/README.md b/spaces/ashrestha/auto-multi-class/README.md deleted file mode 100644 index 47fd39c8837c8d7fdf2d748f8e6bf7822b111c7c..0000000000000000000000000000000000000000 --- a/spaces/ashrestha/auto-multi-class/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Auto Multi Class -emoji: 🔥 -colorFrom: red -colorTo: gray -sdk: streamlit -sdk_version: 1.9.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/AI-MovieMaker-Comedy/app.py b/spaces/awacke1/AI-MovieMaker-Comedy/app.py deleted file mode 100644 index fa7cf551def59c200994d0ee697cc4c408aee0c0..0000000000000000000000000000000000000000 --- a/spaces/awacke1/AI-MovieMaker-Comedy/app.py +++ /dev/null @@ -1,74 +0,0 @@ -import gradio as gr -import moviepy.video.io.ImageSequenceClip -from PIL import Image -from pydub import AudioSegment -from moviepy.editor import * -import numpy as np -import mutagen -from mutagen.mp3 import MP3 -import cv2 - -HF_TOKEN = os.environ.get("HF_TOKEN") - - -def resize(img_list): - resize_img_list = [] - for item in img_list: - im = Image.open(item) - imResize = im.resize((256,256), Image.ANTIALIAS) - resize_img_list.append(np.array(imResize)) - return resize_img_list - - -def merge_audio_video(entities_num, resize_img_list, text_input): - speech = text2speech(text_input) - wav_audio = AudioSegment.from_file(speech, "flac") #("/content/gdrive/My Drive/AI/audio1.flac", "flac") - wav_audio.export("audio.mp3", format="mp3") #("/content/gdrive/My Drive/AI/audio1.mp3", format="mp3") - audio_length = int(MP3("audio.mp3").info.length) - fps= entities_num / audio_length #length of audio file - fps = float(format(fps, '.5f')) - clip = moviepy.video.io.ImageSequenceClip.ImageSequenceClip(resize_img_list, fps=fps) - clip.write_videofile('my_vid_tmp.mp4') - videoclip = VideoFileClip('my_vid_tmp.mp4') #("/content/gdrive/My Drive/AI/my_video1.mp4") - audioclip = AudioFileClip('audio.mp3') #.subclip(0, 15) - mergedclip = videoclip.set_audio(audioclip) - duration = mergedclip.duration - frame_count = mergedclip.fps - return mergedclip - -fastspeech = gr.Interface.load("huggingface/facebook/fastspeech2-en-ljspeech", api_key=HF_TOKEN) - -def text2speech(text): - speech = fastspeech(text) - return speech - -def engine(text_input): - ner = gr.Interface.load("huggingface/flair/ner-english-ontonotes-large", api_key=HF_TOKEN) - entities = ner(text_input) - entities = [tupl for tupl in entities if None not in tupl] - entities_num = len(entities) - img_list = [] - for ent in entities: - img = gr.Interface.load("spaces/multimodalart/latentdiffusion, api_key=HF_TOKEN")(ent[0],'50','256','256','1',10)[0] - img_list.append(img) - resize_img_list = resize(img_list) - mergedclip = merge_audio_video(entities_num, resize_img_list, text_input) - mergedclip.to_videofile('mergedvideo.mp4') - return 'mergedvideo.mp4' - -app = gr.Interface(engine, - gr.inputs.Textbox(lines=5, label="Input Text"), - gr.outputs.Video(type=None, label='Final Merged video'), - description="
🎭🎞️🍿 AI Movie Maker - Comedy 🎬 🧠 🎨
" , - examples= -[ -["Two space marines take up arms to save the planet from an alien invasion. These two dashing strong men play a comedic role in the science fiction movie of the future where even barnaby bunny is willing to join their wacky gang of space marines to save the planet with good looks and comedy."], -["Two space marines take up arms to save the planet from an alien invasion. In each episode they play different roles with the main mission to save Wills girlfriend from alien attack. These two dashing strong men play a comedic role in the science fiction movie of the future, and make their second feature film appearance together in a comedy. In the short-lived TV series The Twilight Space Marines, Two space marines take up arms to save the planet from an alien invasion."], -["These two dashing strong men play a comedic role in the science fiction movie of the future but also serve as a bit of sci-fi history lesson the first time we saw them one of them held a gun to the head of another in anger due to a wrestling disagreement about a famous clearwater bar karaoke and wrestling champion. Barnaby bunny is willing to join their wacky gang of space marines for their adventure in the universe. But, its not just any adventure. They have got to survive a dangerous world, find their destiny, and defeat a cosmic villain before These two dashing strong men play a comedic role in the science fiction movie of the future."], -["To save the planet with good looks and comedy is not easy and they are both pro expert mobsters and wrestling federation superstars. It is a wonder I have been able to continue working as I have when so many others have given up And here he is, a mere nine years after the film release, interviewed. Two space marines who take up arms and wrestling moves and jokes to save the planet from invading bad guys. it is hilarious. That is why they are so perfect as a comedic double act, so we asked if they will be up for a sequel to charm us once again. Dudes we are ready!"] -], - title="AI Pipeline Multi Model 🎭🎞️🍿 Movie Maker 🎬 🧠 🎨", - article="
" - - #).launch(enable_queue=True, debug=True) - ).launch(debug=True) \ No newline at end of file diff --git a/spaces/awacke1/AIDocumentUnderstandingOCR/app.py b/spaces/awacke1/AIDocumentUnderstandingOCR/app.py deleted file mode 100644 index 8bbf87abb0c0dd3619d417982762138967d83042..0000000000000000000000000000000000000000 --- a/spaces/awacke1/AIDocumentUnderstandingOCR/app.py +++ /dev/null @@ -1,100 +0,0 @@ -import os -os.system('pip install pyyaml==5.1') -# workaround: install old version of pytorch since detectron2 hasn't released packages for pytorch 1.9 (issue: https://github.com/facebookresearch/detectron2/issues/3158) -os.system('pip install torch==1.8.0+cu101 torchvision==0.9.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html') - -# install detectron2 that matches pytorch 1.8 -# See https://detectron2.readthedocs.io/tutorials/install.html for instructions -os.system('pip install -q detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.8/index.html') - -## install PyTesseract -os.system('pip install -q pytesseract') - -import gradio as gr -import numpy as np -from transformers import LayoutLMv2Processor, LayoutLMv2ForTokenClassification -from datasets import load_dataset -from PIL import Image, ImageDraw, ImageFont - -processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased") -model = LayoutLMv2ForTokenClassification.from_pretrained("nielsr/layoutlmv2-finetuned-funsd") - -# load image example -dataset = load_dataset("nielsr/funsd", split="test") -image = Image.open(dataset[0]["image_path"]).convert("RGB") -image = Image.open("./invoice.png") -image.save("document.png") - -# define id2label, label2color -labels = dataset.features['ner_tags'].feature.names -id2label = {v: k for v, k in enumerate(labels)} -label2color = {'question':'blue', 'answer':'green', 'header':'orange', 'other':'violet'} - -def unnormalize_box(bbox, width, height): - return [ - width * (bbox[0] / 1000), - height * (bbox[1] / 1000), - width * (bbox[2] / 1000), - height * (bbox[3] / 1000), - ] - -def iob_to_label(label): - label = label[2:] - if not label: - return 'other' - return label - -def process_image(image): - width, height = image.size - - # encode - encoding = processor(image, truncation=True, return_offsets_mapping=True, return_tensors="pt") - offset_mapping = encoding.pop('offset_mapping') - - # forward pass - outputs = model(**encoding) - - # get predictions - predictions = outputs.logits.argmax(-1).squeeze().tolist() - token_boxes = encoding.bbox.squeeze().tolist() - - # only keep non-subword predictions - is_subword = np.array(offset_mapping.squeeze().tolist())[:,0] != 0 - true_predictions = [id2label[pred] for idx, pred in enumerate(predictions) if not is_subword[idx]] - true_boxes = [unnormalize_box(box, width, height) for idx, box in enumerate(token_boxes) if not is_subword[idx]] - - # draw predictions over the image - draw = ImageDraw.Draw(image) - font = ImageFont.load_default() - for prediction, box in zip(true_predictions, true_boxes): - predicted_label = iob_to_label(prediction).lower() - draw.rectangle(box, outline=label2color[predicted_label]) - draw.text((box[0]+10, box[1]-10), text=predicted_label, fill=label2color[predicted_label], font=font) - - return image - - -title = "🧠📑 AI Document Understanding OCR Using LayoutLMv2, FUNSD, and UNILM 📑🧠" - -description = "LayoutLMv2: https://paperswithcode.com/paper/layoutlmv2-multi-modal-pre-training-for fine-tuned on FUNSD, a dataset of manually annotated forms annotates words appearing in the image as QUESTION/ANSWER/HEADER/OTHER." - -article = "

LayoutLMv2 on PapersWithCode | UNILM Git

" - -#examples =[['document.png']] -#examples = [f"{i}.jpg" for i in range(1,5)] -examples = [f"{i}.png" for i in range(1,3)] - -#css = ".output-image, .input-image {height: 40rem !important; width: 100% !important;}" -css = ".image-preview {height: auto !important;}" - -iface = gr.Interface(fn=process_image, - inputs=gr.inputs.Image(type="pil"), - outputs=gr.outputs.Image(type="pil", label="annotated image"), - title=title, - description=description, - article=article, - examples=examples, - css=css, - enable_queue=True) - -iface.launch(debug=True) \ No newline at end of file diff --git a/spaces/awacke1/ClinicalTerminologyAISearch/README.md b/spaces/awacke1/ClinicalTerminologyAISearch/README.md deleted file mode 100644 index 3d344db381bf459b9368e1f6eacb0da349c8f0cc..0000000000000000000000000000000000000000 --- a/spaces/awacke1/ClinicalTerminologyAISearch/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: 🩺Clinical Terminology Search ICD10-SNOMED-CT-LOINC-Panels-OMS ⚕️ Gradio -emoji: 👩‍⚕️🩺⚕️ -colorFrom: red -colorTo: yellow -sdk: gradio -sdk_version: 3.9.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/SpaceBuggyPlaycanvasHTML5/style.css b/spaces/awacke1/SpaceBuggyPlaycanvasHTML5/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/awacke1/SpaceBuggyPlaycanvasHTML5/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/awacke1/WebAssemblyStreamlitLite-stlite/style.css b/spaces/awacke1/WebAssemblyStreamlitLite-stlite/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/awacke1/WebAssemblyStreamlitLite-stlite/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/azusarang/so-vits-svc-models-ba_P/vdecoder/hifigan/nvSTFT.py b/spaces/azusarang/so-vits-svc-models-ba_P/vdecoder/hifigan/nvSTFT.py deleted file mode 100644 index 88597d62a505715091f9ba62d38bf0a85a31b95a..0000000000000000000000000000000000000000 --- a/spaces/azusarang/so-vits-svc-models-ba_P/vdecoder/hifigan/nvSTFT.py +++ /dev/null @@ -1,111 +0,0 @@ -import math -import os -os.environ["LRU_CACHE_CAPACITY"] = "3" -import random -import torch -import torch.utils.data -import numpy as np -import librosa -from librosa.util import normalize -from librosa.filters import mel as librosa_mel_fn -from scipy.io.wavfile import read -import soundfile as sf - -def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False): - sampling_rate = None - try: - data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile. - except Exception as ex: - print(f"'{full_path}' failed to load.\nException:") - print(ex) - if return_empty_on_exception: - return [], sampling_rate or target_sr or 32000 - else: - raise Exception(ex) - - if len(data.shape) > 1: - data = data[:, 0] - assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension) - - if np.issubdtype(data.dtype, np.integer): # if audio data is type int - max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX - else: # if audio data is type fp32 - max_mag = max(np.amax(data), -np.amin(data)) - max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32 - - data = torch.FloatTensor(data.astype(np.float32))/max_mag - - if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except - return [], sampling_rate or target_sr or 32000 - if target_sr is not None and sampling_rate != target_sr: - data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr)) - sampling_rate = target_sr - - return data, sampling_rate - -def dynamic_range_compression(x, C=1, clip_val=1e-5): - return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) - -def dynamic_range_decompression(x, C=1): - return np.exp(x) / C - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - -def dynamic_range_decompression_torch(x, C=1): - return torch.exp(x) / C - -class STFT(): - def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5): - self.target_sr = sr - - self.n_mels = n_mels - self.n_fft = n_fft - self.win_size = win_size - self.hop_length = hop_length - self.fmin = fmin - self.fmax = fmax - self.clip_val = clip_val - self.mel_basis = {} - self.hann_window = {} - - def get_mel(self, y, center=False): - sampling_rate = self.target_sr - n_mels = self.n_mels - n_fft = self.n_fft - win_size = self.win_size - hop_length = self.hop_length - fmin = self.fmin - fmax = self.fmax - clip_val = self.clip_val - - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - if fmax not in self.mel_basis: - mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax) - self.mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device) - self.hann_window[str(y.device)] = torch.hann_window(self.win_size).to(y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_length)/2), int((n_fft-hop_length)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_length, win_length=win_size, window=self.hann_window[str(y.device)], - center=center, pad_mode='reflect', normalized=False, onesided=True) - # print(111,spec) - spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9)) - # print(222,spec) - spec = torch.matmul(self.mel_basis[str(fmax)+'_'+str(y.device)], spec) - # print(333,spec) - spec = dynamic_range_compression_torch(spec, clip_val=clip_val) - # print(444,spec) - return spec - - def __call__(self, audiopath): - audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr) - spect = self.get_mel(audio.unsqueeze(0)).squeeze(0) - return spect - -stft = STFT() diff --git a/spaces/badayvedat/LLaVA/llava/serve/controller.py b/spaces/badayvedat/LLaVA/llava/serve/controller.py deleted file mode 100644 index b61fca6ea9fe8aa37acd143784a3d76e90a58b9f..0000000000000000000000000000000000000000 --- a/spaces/badayvedat/LLaVA/llava/serve/controller.py +++ /dev/null @@ -1,298 +0,0 @@ -""" -A controller manages distributed workers. -It sends worker addresses to clients. -""" -import argparse -import asyncio -import dataclasses -from enum import Enum, auto -import json -import logging -import time -from typing import List, Union -import threading - -from fastapi import FastAPI, Request -from fastapi.responses import StreamingResponse -import numpy as np -import requests -import uvicorn - -from llava.constants import CONTROLLER_HEART_BEAT_EXPIRATION -from llava.utils import build_logger, server_error_msg - - -logger = build_logger("controller", "controller.log") - - -class DispatchMethod(Enum): - LOTTERY = auto() - SHORTEST_QUEUE = auto() - - @classmethod - def from_str(cls, name): - if name == "lottery": - return cls.LOTTERY - elif name == "shortest_queue": - return cls.SHORTEST_QUEUE - else: - raise ValueError(f"Invalid dispatch method") - - -@dataclasses.dataclass -class WorkerInfo: - model_names: List[str] - speed: int - queue_length: int - check_heart_beat: bool - last_heart_beat: str - - -def heart_beat_controller(controller): - while True: - time.sleep(CONTROLLER_HEART_BEAT_EXPIRATION) - controller.remove_stable_workers_by_expiration() - - -class Controller: - def __init__(self, dispatch_method: str): - # Dict[str -> WorkerInfo] - self.worker_info = {} - self.dispatch_method = DispatchMethod.from_str(dispatch_method) - - self.heart_beat_thread = threading.Thread( - target=heart_beat_controller, args=(self,)) - self.heart_beat_thread.start() - - logger.info("Init controller") - - def register_worker(self, worker_name: str, check_heart_beat: bool, - worker_status: dict): - if worker_name not in self.worker_info: - logger.info(f"Register a new worker: {worker_name}") - else: - logger.info(f"Register an existing worker: {worker_name}") - - if not worker_status: - worker_status = self.get_worker_status(worker_name) - if not worker_status: - return False - - self.worker_info[worker_name] = WorkerInfo( - worker_status["model_names"], worker_status["speed"], worker_status["queue_length"], - check_heart_beat, time.time()) - - logger.info(f"Register done: {worker_name}, {worker_status}") - return True - - def get_worker_status(self, worker_name: str): - try: - r = requests.post(worker_name + "/worker_get_status", timeout=5) - except requests.exceptions.RequestException as e: - logger.error(f"Get status fails: {worker_name}, {e}") - return None - - if r.status_code != 200: - logger.error(f"Get status fails: {worker_name}, {r}") - return None - - return r.json() - - def remove_worker(self, worker_name: str): - del self.worker_info[worker_name] - - def refresh_all_workers(self): - old_info = dict(self.worker_info) - self.worker_info = {} - - for w_name, w_info in old_info.items(): - if not self.register_worker(w_name, w_info.check_heart_beat, None): - logger.info(f"Remove stale worker: {w_name}") - - def list_models(self): - model_names = set() - - for w_name, w_info in self.worker_info.items(): - model_names.update(w_info.model_names) - - return list(model_names) - - def get_worker_address(self, model_name: str): - if self.dispatch_method == DispatchMethod.LOTTERY: - worker_names = [] - worker_speeds = [] - for w_name, w_info in self.worker_info.items(): - if model_name in w_info.model_names: - worker_names.append(w_name) - worker_speeds.append(w_info.speed) - worker_speeds = np.array(worker_speeds, dtype=np.float32) - norm = np.sum(worker_speeds) - if norm < 1e-4: - return "" - worker_speeds = worker_speeds / norm - if True: # Directly return address - pt = np.random.choice(np.arange(len(worker_names)), - p=worker_speeds) - worker_name = worker_names[pt] - return worker_name - - # Check status before returning - while True: - pt = np.random.choice(np.arange(len(worker_names)), - p=worker_speeds) - worker_name = worker_names[pt] - - if self.get_worker_status(worker_name): - break - else: - self.remove_worker(worker_name) - worker_speeds[pt] = 0 - norm = np.sum(worker_speeds) - if norm < 1e-4: - return "" - worker_speeds = worker_speeds / norm - continue - return worker_name - elif self.dispatch_method == DispatchMethod.SHORTEST_QUEUE: - worker_names = [] - worker_qlen = [] - for w_name, w_info in self.worker_info.items(): - if model_name in w_info.model_names: - worker_names.append(w_name) - worker_qlen.append(w_info.queue_length / w_info.speed) - if len(worker_names) == 0: - return "" - min_index = np.argmin(worker_qlen) - w_name = worker_names[min_index] - self.worker_info[w_name].queue_length += 1 - logger.info(f"names: {worker_names}, queue_lens: {worker_qlen}, ret: {w_name}") - return w_name - else: - raise ValueError(f"Invalid dispatch method: {self.dispatch_method}") - - def receive_heart_beat(self, worker_name: str, queue_length: int): - if worker_name not in self.worker_info: - logger.info(f"Receive unknown heart beat. {worker_name}") - return False - - self.worker_info[worker_name].queue_length = queue_length - self.worker_info[worker_name].last_heart_beat = time.time() - logger.info(f"Receive heart beat. {worker_name}") - return True - - def remove_stable_workers_by_expiration(self): - expire = time.time() - CONTROLLER_HEART_BEAT_EXPIRATION - to_delete = [] - for worker_name, w_info in self.worker_info.items(): - if w_info.check_heart_beat and w_info.last_heart_beat < expire: - to_delete.append(worker_name) - - for worker_name in to_delete: - self.remove_worker(worker_name) - - def worker_api_generate_stream(self, params): - worker_addr = self.get_worker_address(params["model"]) - if not worker_addr: - logger.info(f"no worker: {params['model']}") - ret = { - "text": server_error_msg, - "error_code": 2, - } - yield json.dumps(ret).encode() + b"\0" - - try: - response = requests.post(worker_addr + "/worker_generate_stream", - json=params, stream=True, timeout=5) - for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"): - if chunk: - yield chunk + b"\0" - except requests.exceptions.RequestException as e: - logger.info(f"worker timeout: {worker_addr}") - ret = { - "text": server_error_msg, - "error_code": 3, - } - yield json.dumps(ret).encode() + b"\0" - - - # Let the controller act as a worker to achieve hierarchical - # management. This can be used to connect isolated sub networks. - def worker_api_get_status(self): - model_names = set() - speed = 0 - queue_length = 0 - - for w_name in self.worker_info: - worker_status = self.get_worker_status(w_name) - if worker_status is not None: - model_names.update(worker_status["model_names"]) - speed += worker_status["speed"] - queue_length += worker_status["queue_length"] - - return { - "model_names": list(model_names), - "speed": speed, - "queue_length": queue_length, - } - - -app = FastAPI() - - -@app.post("/register_worker") -async def register_worker(request: Request): - data = await request.json() - controller.register_worker( - data["worker_name"], data["check_heart_beat"], - data.get("worker_status", None)) - - -@app.post("/refresh_all_workers") -async def refresh_all_workers(): - models = controller.refresh_all_workers() - - -@app.post("/list_models") -async def list_models(): - models = controller.list_models() - return {"models": models} - - -@app.post("/get_worker_address") -async def get_worker_address(request: Request): - data = await request.json() - addr = controller.get_worker_address(data["model"]) - return {"address": addr} - - -@app.post("/receive_heart_beat") -async def receive_heart_beat(request: Request): - data = await request.json() - exist = controller.receive_heart_beat( - data["worker_name"], data["queue_length"]) - return {"exist": exist} - - -@app.post("/worker_generate_stream") -async def worker_api_generate_stream(request: Request): - params = await request.json() - generator = controller.worker_api_generate_stream(params) - return StreamingResponse(generator) - - -@app.post("/worker_get_status") -async def worker_api_get_status(request: Request): - return controller.worker_api_get_status() - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--host", type=str, default="localhost") - parser.add_argument("--port", type=int, default=21001) - parser.add_argument("--dispatch-method", type=str, choices=[ - "lottery", "shortest_queue"], default="shortest_queue") - args = parser.parse_args() - logger.info(f"args: {args}") - - controller = Controller(args.dispatch_method) - uvicorn.run(app, host=args.host, port=args.port, log_level="info") diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/postprocessing/NodePass.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/postprocessing/NodePass.js deleted file mode 100644 index f92a2a817edce46c9170b864c38336e00d1780da..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/postprocessing/NodePass.js +++ /dev/null @@ -1,91 +0,0 @@ -/** - * @author sunag / http://www.sunag.com.br/ - */ - -import { NodeMaterial } from '../materials/NodeMaterial.js'; -import { ScreenNode } from '../inputs/ScreenNode.js'; - -function NodePass() { - - THREE.ShaderPass.call( this ); - - this.name = ""; - this.uuid = THREE.Math.generateUUID(); - - this.userData = {}; - - this.textureID = 'renderTexture'; - - this.input = new ScreenNode(); - - this.material = new NodeMaterial(); - - this.needsUpdate = true; - -} - -NodePass.prototype = Object.create( THREE.ShaderPass.prototype ); -NodePass.prototype.constructor = NodePass; - -NodePass.prototype.render = function () { - - if ( this.needsUpdate ) { - - this.material.dispose(); - - this.material.fragment.value = this.input; - - this.needsUpdate = false; - - } - - this.uniforms = this.material.uniforms; - - THREE.ShaderPass.prototype.render.apply( this, arguments ); - -}; - -NodePass.prototype.copy = function ( source ) { - - this.input = source.input; - -}; - -NodePass.prototype.toJSON = function ( meta ) { - - var isRootObject = ( meta === undefined || typeof meta === 'string' ); - - if ( isRootObject ) { - - meta = { - nodes: {} - }; - - } - - if ( meta && ! meta.passes ) meta.passes = {}; - - if ( ! meta.passes[ this.uuid ] ) { - - var data = {}; - - data.uuid = this.uuid; - data.type = "NodePass"; - - meta.passes[ this.uuid ] = data; - - if ( this.name !== "" ) data.name = this.name; - - if ( JSON.stringify( this.userData ) !== '{}' ) data.userData = this.userData; - - data.input = this.input.toJSON( meta ).uuid; - - } - - meta.pass = this.uuid; - - return meta; - -}; - -export { NodePass }; diff --git a/spaces/banana-projects/web3d/node_modules/three/src/math/Vector2.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/math/Vector2.d.ts deleted file mode 100644 index 75327683df39b3b791f5ac16507d2dbbe362d96a..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/math/Vector2.d.ts +++ /dev/null @@ -1,449 +0,0 @@ -import { Vector4 } from './Vector4'; -import { Matrix3 } from './Matrix3'; -import { BufferAttribute } from './../core/BufferAttribute'; - -/** - * ( interface Vector<T> ) - * - * Abstract interface of Vector2, Vector3 and Vector4. - * Currently the members of Vector is NOT type safe because it accepts different typed vectors. - * Those definitions will be changed when TypeScript innovates Generics to be type safe. - * - * @example - * var v:THREE.Vector = new THREE.Vector3(); - * v.addVectors(new THREE.Vector2(0, 1), new THREE.Vector2(2, 3)); // invalid but compiled successfully - */ -export interface Vector { - setComponent(index: number, value: number): this; - - getComponent(index: number): number; - - set(...args: number[]): this; - - setScalar(scalar: number): this; - - /** - * copy(v:T):T; - */ - copy(v: Vector): this; - - /** - * NOTE: The second argument is deprecated. - * - * add(v:T):T; - */ - add(v: Vector, w?: Vector): this; - - /** - * addVectors(a:T, b:T):T; - */ - addVectors(a: Vector, b: Vector): this; - - addScaledVector(vector: Vector, scale: number): this; - - /** - * Adds the scalar value s to this vector's values. - */ - addScalar(scalar: number): this; - - /** - * sub(v:T):T; - */ - sub(v: Vector): this; - - /** - * subVectors(a:T, b:T):T; - */ - subVectors(a: Vector, b: Vector): this; - - /** - * multiplyScalar(s:number):T; - */ - multiplyScalar(s: number): this; - - /** - * divideScalar(s:number):T; - */ - divideScalar(s: number): this; - - /** - * negate():T; - */ - negate(): this; - - /** - * dot(v:T):T; - */ - dot(v: Vector): number; - - /** - * lengthSq():number; - */ - lengthSq(): number; - - /** - * length():number; - */ - length(): number; - - /** - * normalize():T; - */ - normalize(): this; - - /** - * NOTE: Vector4 doesn't have the property. - * - * distanceTo(v:T):number; - */ - distanceTo?(v: Vector): number; - - /** - * NOTE: Vector4 doesn't have the property. - * - * distanceToSquared(v:T):number; - */ - distanceToSquared?(v: Vector): number; - - /** - * setLength(l:number):T; - */ - setLength(l: number): this; - - /** - * lerp(v:T, alpha:number):T; - */ - lerp(v: Vector, alpha: number): this; - - /** - * equals(v:T):boolean; - */ - equals(v: Vector): boolean; - - /** - * clone():T; - */ - clone(): this; -} - -/** - * 2D vector. - * - * ( class Vector2 implements Vector ) - */ -export class Vector2 implements Vector { - constructor(x?: number, y?: number); - - x: number; - y: number; - width: number; - height: number; - isVector2: true; - - /** - * Sets value of this vector. - */ - set(x: number, y: number): this; - - /** - * Sets the x and y values of this vector both equal to scalar. - */ - setScalar(scalar: number): this; - - /** - * Sets X component of this vector. - */ - setX(x: number): this; - - /** - * Sets Y component of this vector. - */ - setY(y: number): this; - - /** - * Sets a component of this vector. - */ - setComponent(index: number, value: number): this; - - /** - * Gets a component of this vector. - */ - getComponent(index: number): number; - - /** - * Returns a new Vector2 instance with the same `x` and `y` values. - */ - clone(): this; - - /** - * Copies value of v to this vector. - */ - copy(v: Vector2): this; - - /** - * Adds v to this vector. - */ - add(v: Vector2, w?: Vector2): this; - - /** - * Adds the scalar value s to this vector's x and y values. - */ - addScalar(s: number): this; - - /** - * Sets this vector to a + b. - */ - addVectors(a: Vector2, b: Vector2): this; - - /** - * Adds the multiple of v and s to this vector. - */ - addScaledVector(v: Vector2, s: number): this; - - /** - * Subtracts v from this vector. - */ - sub(v: Vector2): this; - - /** - * Subtracts s from this vector's x and y components. - */ - subScalar(s: number): this; - - /** - * Sets this vector to a - b. - */ - subVectors(a: Vector2, b: Vector2): this; - - /** - * Multiplies this vector by v. - */ - multiply(v: Vector2): this; - - /** - * Multiplies this vector by scalar s. - */ - multiplyScalar(scalar: number): this; - - /** - * Divides this vector by v. - */ - divide(v: Vector2): this; - - /** - * Divides this vector by scalar s. - * Set vector to ( 0, 0 ) if s == 0. - */ - divideScalar(s: number): this; - - /** - * Multiplies this vector (with an implicit 1 as the 3rd component) by m. - */ - applyMatrix3(m: Matrix3): this; - - /** - * If this vector's x or y value is greater than v's x or y value, replace that value with the corresponding min value. - */ - min(v: Vector2): this; - - /** - * If this vector's x or y value is less than v's x or y value, replace that value with the corresponding max value. - */ - max(v: Vector2): this; - - /** - * If this vector's x or y value is greater than the max vector's x or y value, it is replaced by the corresponding value. - * If this vector's x or y value is less than the min vector's x or y value, it is replaced by the corresponding value. - * @param min the minimum x and y values. - * @param max the maximum x and y values in the desired range. - */ - clamp(min: Vector2, max: Vector2): this; - - /** - * If this vector's x or y values are greater than the max value, they are replaced by the max value. - * If this vector's x or y values are less than the min value, they are replaced by the min value. - * @param min the minimum value the components will be clamped to. - * @param max the maximum value the components will be clamped to. - */ - clampScalar(min: number, max: number): this; - - /** - * If this vector's length is greater than the max value, it is replaced by the max value. - * If this vector's length is less than the min value, it is replaced by the min value. - * @param min the minimum value the length will be clamped to. - * @param max the maximum value the length will be clamped to. - */ - clampLength(min: number, max: number): this; - - /** - * The components of the vector are rounded down to the nearest integer value. - */ - floor(): this; - - /** - * The x and y components of the vector are rounded up to the nearest integer value. - */ - ceil(): this; - - /** - * The components of the vector are rounded to the nearest integer value. - */ - round(): this; - - /** - * The components of the vector are rounded towards zero (up if negative, down if positive) to an integer value. - */ - roundToZero(): this; - - /** - * Inverts this vector. - */ - negate(): this; - - /** - * Computes dot product of this vector and v. - */ - dot(v: Vector2): number; - - /** - * Computes squared length of this vector. - */ - lengthSq(): number; - - /** - * Computes length of this vector. - */ - length(): number; - - /** - * @deprecated Use {@link Vector2#manhattanLength .manhattanLength()} instead. - */ - lengthManhattan(): number; - - /** - * Computes the Manhattan length of this vector. - * - * @return {number} - * - * @see {@link http://en.wikipedia.org/wiki/Taxicab_geometry|Wikipedia: Taxicab Geometry} - */ - manhattanLength(): number; - - /** - * Normalizes this vector. - */ - normalize(): this; - - /** - * computes the angle in radians with respect to the positive x-axis - */ - angle(): number; - - /** - * Computes distance of this vector to v. - */ - distanceTo(v: Vector2): number; - - /** - * Computes squared distance of this vector to v. - */ - distanceToSquared(v: Vector2): number; - - /** - * @deprecated Use {@link Vector2#manhattanDistanceTo .manhattanDistanceTo()} instead. - */ - distanceToManhattan(v: Vector2): number; - - /** - * Computes the Manhattan length (distance) from this vector to the given vector v - * - * @param {Vector2} v - * - * @return {number} - * - * @see {@link http://en.wikipedia.org/wiki/Taxicab_geometry|Wikipedia: Taxicab Geometry} - */ - manhattanDistanceTo(v: Vector2): number; - - /** - * Normalizes this vector and multiplies it by l. - */ - setLength(length: number): this; - - /** - * Linearly interpolates between this vector and v, where alpha is the distance along the line - alpha = 0 will be this vector, and alpha = 1 will be v. - * @param v vector to interpolate towards. - * @param alpha interpolation factor in the closed interval [0, 1]. - */ - lerp(v: Vector2, alpha: number): this; - - /** - * Sets this vector to be the vector linearly interpolated between v1 and v2 where alpha is the distance along the line connecting the two vectors - alpha = 0 will be v1, and alpha = 1 will be v2. - * @param v1 the starting vector. - * @param v2 vector to interpolate towards. - * @param alpha interpolation factor in the closed interval [0, 1]. - */ - lerpVectors(v1: Vector2, v2: Vector2, alpha: number): this; - - /** - * Checks for strict equality of this vector and v. - */ - equals(v: Vector2): boolean; - - /** - * Sets this vector's x value to be array[offset] and y value to be array[offset + 1]. - * @param array the source array. - * @param offset (optional) offset into the array. Default is 0. - */ - fromArray(array: number[], offset?: number): this; - - /** - * Returns an array [x, y], or copies x and y into the provided array. - * @param array (optional) array to store the vector to. If this is not provided, a new array will be created. - * @param offset (optional) optional offset into the array. - * @return The created or provided array. - */ - toArray(array?: number[], offset?: number): number[]; - - /** - * Copies x and y into the provided array-like. - * @param array array-like to store the vector to. - * @param offset (optional) optional offset into the array. - * @return The provided array-like. - */ - toArray(array: ArrayLike, offset?: number): ArrayLike; - - /** - * Sets this vector's x and y values from the attribute. - * @param attribute the source attribute. - * @param index index in the attribute. - */ - fromBufferAttribute(attribute: BufferAttribute, index: number): this; - - /** - * Rotates the vector around center by angle radians. - * @param center the point around which to rotate. - * @param angle the angle to rotate, in radians. - */ - rotateAround(center: Vector2, angle: number): this; - - /** - * Computes the Manhattan length of this vector. - * - * @return {number} - * - * @see {@link http://en.wikipedia.org/wiki/Taxicab_geometry|Wikipedia: Taxicab Geometry} - */ - manhattanLength(): number; - - /** - * Computes the Manhattan length (distance) from this vector to the given vector v - * - * @param {Vector2} v - * - * @return {number} - * - * @see {@link http://en.wikipedia.org/wiki/Taxicab_geometry|Wikipedia: Taxicab Geometry} - */ - manhattanDistanceTo(v: Vector2): number; -} diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/bumpmap_pars_fragment.glsl.js b/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/bumpmap_pars_fragment.glsl.js deleted file mode 100644 index a3d05938d3b4cd5c0cc7b1c90c08d476ab792f39..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/bumpmap_pars_fragment.glsl.js +++ /dev/null @@ -1,46 +0,0 @@ -export default /* glsl */` -#ifdef USE_BUMPMAP - - uniform sampler2D bumpMap; - uniform float bumpScale; - - // Bump Mapping Unparametrized Surfaces on the GPU by Morten S. Mikkelsen - // http://api.unrealengine.com/attachments/Engine/Rendering/LightingAndShadows/BumpMappingWithoutTangentSpace/mm_sfgrad_bump.pdf - - // Evaluate the derivative of the height w.r.t. screen-space using forward differencing (listing 2) - - vec2 dHdxy_fwd() { - - vec2 dSTdx = dFdx( vUv ); - vec2 dSTdy = dFdy( vUv ); - - float Hll = bumpScale * texture2D( bumpMap, vUv ).x; - float dBx = bumpScale * texture2D( bumpMap, vUv + dSTdx ).x - Hll; - float dBy = bumpScale * texture2D( bumpMap, vUv + dSTdy ).x - Hll; - - return vec2( dBx, dBy ); - - } - - vec3 perturbNormalArb( vec3 surf_pos, vec3 surf_norm, vec2 dHdxy ) { - - // Workaround for Adreno 3XX dFd*( vec3 ) bug. See #9988 - - vec3 vSigmaX = vec3( dFdx( surf_pos.x ), dFdx( surf_pos.y ), dFdx( surf_pos.z ) ); - vec3 vSigmaY = vec3( dFdy( surf_pos.x ), dFdy( surf_pos.y ), dFdy( surf_pos.z ) ); - vec3 vN = surf_norm; // normalized - - vec3 R1 = cross( vSigmaY, vN ); - vec3 R2 = cross( vN, vSigmaX ); - - float fDet = dot( vSigmaX, R1 ); - - fDet *= ( float( gl_FrontFacing ) * 2.0 - 1.0 ); - - vec3 vGrad = sign( fDet ) * ( dHdxy.x * R1 + dHdxy.y * R2 ); - return normalize( abs( fDet ) * surf_norm - vGrad ); - - } - -#endif -`; diff --git a/spaces/bayartsogt/whisper-demo-mongolian/app.py b/spaces/bayartsogt/whisper-demo-mongolian/app.py deleted file mode 100644 index 79609fb9cd8a8a7eeee9df957d91ebe1edcbe6c8..0000000000000000000000000000000000000000 --- a/spaces/bayartsogt/whisper-demo-mongolian/app.py +++ /dev/null @@ -1,116 +0,0 @@ -import gradio as gr -import numpy as np -import os, time, librosa, torch -from pyannote.audio import Pipeline -from transformers import pipeline -from utils import second_to_timecode, download_from_youtube - -MODEL_NAME = 'bayartsogt/whisper-large-v2-mn-13' -lang = 'mn' - -chunk_length_s = 9 -vad_activation_min_duration = 9 # sec -device = 0 if torch.cuda.is_available() else "cpu" -SAMPLE_RATE = 16_000 - -######## LOAD MODELS FROM HUB ######## -dia_model = Pipeline.from_pretrained("pyannote/speaker-diarization", use_auth_token=os.environ['TOKEN']) -vad_model = Pipeline.from_pretrained("pyannote/voice-activity-detection", use_auth_token=os.environ['TOKEN']) -pipe = pipeline(task="automatic-speech-recognition", model=MODEL_NAME, chunk_length_s=chunk_length_s, device=device) -pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe") - -print("----------> Loaded models <-----------") - -def generator(youtube_link, microphone, file_upload, num_speakers, max_duration, history): - - if int(youtube_link != '') + int(microphone is not None) + int(file_upload is not None) != 1: - raise Exception(f"Only one of the source should be given youtube_link={youtube_link}, microphone={microphone}, file_upload={file_upload}") - - history = history or "" - - if microphone: - path = microphone - elif file_upload: - path = file_upload - elif youtube_link: - path = download_from_youtube(youtube_link) - - waveform, sampling_rate = librosa.load(path, sr=SAMPLE_RATE, mono=True, duration=max_duration) - - print(waveform.shape, sampling_rate) - waveform_tensor = torch.unsqueeze(torch.tensor(waveform), 0).to(device) - - dia_result = dia_model({ - "waveform": waveform_tensor, - "sample_rate": sampling_rate, - }, num_speakers=num_speakers) - - counter = 1 - - for speech_turn, track, speaker in dia_result.itertracks(yield_label=True): - print(f"{speech_turn.start:4.1f} {speech_turn.end:4.1f} {speaker}") - _start = int(sampling_rate * speech_turn.start) - _end = int(sampling_rate * speech_turn.end) - data = waveform[_start: _end] - - if speech_turn.end - speech_turn.start > vad_activation_min_duration: - print(f'audio duration {speech_turn.end - speech_turn.start} sec ----> activating VAD') - vad_output = vad_model({ - 'waveform': waveform_tensor[:, _start:_end], - 'sample_rate': sampling_rate}) - for vad_turn in vad_output.get_timeline().support(): - vad_start = _start + int(sampling_rate * vad_turn.start) - vad_end = _start + int(sampling_rate * vad_turn.end) - prediction = pipe(waveform[vad_start: vad_end])['text'] - history += f"{counter}\n" + \ - f"{second_to_timecode(speech_turn.start + vad_turn.start)} --> {second_to_timecode(speech_turn.start + vad_turn.end)}\n" + \ - f"{prediction}\n\n" - # f">> {speaker}: {prediction}\n\n" - yield history, history, None - counter += 1 - - else: - prediction = pipe(data)['text'] - history += f"{counter}\n" + \ - f"{second_to_timecode(speech_turn.start)} --> {second_to_timecode(speech_turn.end)}\n" + \ - f"{prediction}\n\n" - # f">> {speaker}: {prediction}\n\n" - counter += 1 - yield history, history, None - - # https://support.google.com/youtube/answer/2734698?hl=en#zippy=%2Cbasic-file-formats%2Csubrip-srt-example%2Csubviewer-sbv-example - file_name = 'transcript.srt' - with open(file_name, 'w') as fp: - fp.write(history) - - yield history, history, file_name - -demo = gr.Interface( - generator, - inputs=[ - gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL", optional=True), - gr.inputs.Audio(source="microphone", type="filepath", optional=True), - gr.inputs.Audio(source="upload", type="filepath", optional=True), - gr.Number(value=1, label="Number of Speakers"), - gr.Number(value=120, label="Maximum Duration (Seconds)"), - 'state', - ], - outputs=['text', 'state', 'file'], - layout="horizontal", - theme="huggingface", - title="Transcribe Mongolian Whisper 🇲🇳", - description=( - "Transcribe Youtube Video / Microphone / Uploaded File in Mongolian Whisper Model." + \ - " | You can upload SubRip file (`.srt`) [to your youtube video](https://support.google.com/youtube/answer/2734698?hl=en#zippy=%2Cbasic-file-formats)." + \ - " | Please REFRESH 🔄 the page after you transcribed!" + \ - " | 🐦 [@_tsogoo_](https://twitter.com/_tsogoo_)" + \ - " | 🤗 [@bayartsogt](https://huggingface.co/bayartsogt)" + \ - "" - ), - allow_flagging="never", -) - -# define queue - required for generators -demo.queue() - -demo.launch() \ No newline at end of file diff --git a/spaces/bhasker412/IDD-YOLO-Tracking/utils/google_utils.py b/spaces/bhasker412/IDD-YOLO-Tracking/utils/google_utils.py deleted file mode 100644 index f363408e63981702e63dcda189cbc2099d0a9499..0000000000000000000000000000000000000000 --- a/spaces/bhasker412/IDD-YOLO-Tracking/utils/google_utils.py +++ /dev/null @@ -1,123 +0,0 @@ -# Google utils: https://cloud.google.com/storage/docs/reference/libraries - -import os -import platform -import subprocess -import time -from pathlib import Path - -import requests -import torch - - -def gsutil_getsize(url=''): - # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du - s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') - return eval(s.split(' ')[0]) if len(s) else 0 # bytes - - -def attempt_download(file, repo='WongKinYiu/yolov7'): - # Attempt file download if does not exist - file = Path(str(file).strip().replace("'", '').lower()) - - if not file.exists(): - try: - response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api - assets = [x['name'] for x in response['assets']] # release assets - tag = response['tag_name'] # i.e. 'v1.0' - except: # fallback plan - assets = ['yolov7.pt', 'yolov7-tiny.pt', 'yolov7x.pt', 'yolov7-d6.pt', 'yolov7-e6.pt', - 'yolov7-e6e.pt', 'yolov7-w6.pt'] - tag = subprocess.check_output('git tag', shell=True).decode().split()[-1] - - name = file.name - if name in assets: - msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/' - redundant = False # second download option - try: # GitHub - url = f'https://github.com/{repo}/releases/download/{tag}/{name}' - print(f'Downloading {url} to {file}...') - torch.hub.download_url_to_file(url, file) - assert file.exists() and file.stat().st_size > 1E6 # check - except Exception as e: # GCP - print(f'Download error: {e}') - assert redundant, 'No secondary mirror' - url = f'https://storage.googleapis.com/{repo}/ckpt/{name}' - print(f'Downloading {url} to {file}...') - os.system(f'curl -L {url} -o {file}') # torch.hub.download_url_to_file(url, weights) - finally: - if not file.exists() or file.stat().st_size < 1E6: # check - file.unlink(missing_ok=True) # remove partial downloads - print(f'ERROR: Download failure: {msg}') - print('') - return - - -def gdrive_download(id='', file='tmp.zip'): - # Downloads a file from Google Drive. from yolov7.utils.google_utils import *; gdrive_download() - t = time.time() - file = Path(file) - cookie = Path('cookie') # gdrive cookie - print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') - file.unlink(missing_ok=True) # remove existing file - cookie.unlink(missing_ok=True) # remove existing cookie - - # Attempt file download - out = "NUL" if platform.system() == "Windows" else "/dev/null" - os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') - if os.path.exists('cookie'): # large file - s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' - else: # small file - s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' - r = os.system(s) # execute, capture return - cookie.unlink(missing_ok=True) # remove existing cookie - - # Error check - if r != 0: - file.unlink(missing_ok=True) # remove partial - print('Download error ') # raise Exception('Download error') - return r - - # Unzip if archive - if file.suffix == '.zip': - print('unzipping... ', end='') - os.system(f'unzip -q {file}') # unzip - file.unlink() # remove zip to free space - - print(f'Done ({time.time() - t:.1f}s)') - return r - - -def get_token(cookie="./cookie"): - with open(cookie) as f: - for line in f: - if "download" in line: - return line.split()[-1] - return "" - -# def upload_blob(bucket_name, source_file_name, destination_blob_name): -# # Uploads a file to a bucket -# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python -# -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(destination_blob_name) -# -# blob.upload_from_filename(source_file_name) -# -# print('File {} uploaded to {}.'.format( -# source_file_name, -# destination_blob_name)) -# -# -# def download_blob(bucket_name, source_blob_name, destination_file_name): -# # Uploads a blob from a bucket -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(source_blob_name) -# -# blob.download_to_filename(destination_file_name) -# -# print('Blob {} downloaded to {}.'.format( -# source_blob_name, -# destination_file_name)) diff --git a/spaces/bigcode/bigcode-models-leaderboard/src/build.py b/spaces/bigcode/bigcode-models-leaderboard/src/build.py deleted file mode 100644 index ea66c4903229e51bf062b5cd883a3d6476efd853..0000000000000000000000000000000000000000 --- a/spaces/bigcode/bigcode-models-leaderboard/src/build.py +++ /dev/null @@ -1,99 +0,0 @@ -import pandas as pd - - -def add_model_readme(df): - # write model ids to README.md - with open("README.md", "r") as f: - lines = f.readlines() - - links = df["Links"].astype(str) - for link in links: - try: - model_id = link.split(".co/")[1] - # verify line doesn't exist - if f"- {model_id}\n" in lines: - continue - lines.insert(-1, f"- {model_id}\n") - except IndexError: - print(f"link {link} is not valid") - - with open("README.md", "w") as f: - f.writelines(lines) - -df = pd.read_csv("data/raw_scores.csv") -COLS = df.columns.to_list() -# add column models_query with same values a smodels at the end of columns -df.insert(len(COLS), "models_query", df["Models"]) -print(f"all cols {df.columns.to_list()}") -# average score -mean_columns = df.iloc[:,5:-3] -# print cols in mean_columns -print("cols", mean_columns.columns.to_list()) -df.insert(len(mean_columns.columns.to_list()), "Average score", mean_columns.mean(axis=1).round(2)) - -# add win rate columns for each language -old_size = len(df.columns) - -for col in df.columns[6:-2]: - df[col + " rank"] = df[col].rank(ascending=False) - df[col + " rank"] = len(df) - (df[col + " rank"] - 1) -df["Win Rate"] = df.iloc[:, old_size:].mean(axis=1).round(2) -df = df.drop(df.columns[old_size:-1], axis=1) -df = df[["Models", "Size (B)", "Win Rate"] + df.columns[2:-1].tolist()] - -# sort with regard to column win rate -df = df.sort_values(by=["Win Rate"], ascending=False) -# add column with model links as https://huggingface.co/WizardLM/WizardCoder-15B-V1.0, https://huggingface.co/bigcode/starcoder, https://huggingface.co/bigcode/starcoderbase, https://huggingface.co/bigcode/starcoderbase-7b, -# https://huggingface.co/bigcode/starcoderbase-3b, https://huggingface.co/bigcode/starcoderbase-1b, https://huggingface.co/bigcode/santacoder, https://huggingface.co/replit/replit-code-v1-3b, https://huggingface.co/THUDM/codegeex2-6b - -links = { - "WizardCoder-15B-V1.0": "https://huggingface.co/WizardLM/WizardCoder-15B-V1.0", - "WizardCoder-3B-V1.0": "https://huggingface.co/WizardLM/WizardCoder-3B-V1.0", - "WizardCoder-1B-V1.0": "https://huggingface.co/WizardLM/WizardCoder-1B-V1.0", - "WizardCoder-Python-34B-V1.0": "https://huggingface.co/WizardLM/WizardCoder-Python-34B-V1.0", - "WizardCoder-Python-13B-V1.0": "https://huggingface.co/WizardLM/WizardCoder-Python-13B-V1.0", - "OctoCoder-15B": "https://huggingface.co/bigcode/octocoder", - "OctoGeeX-7B": "https://huggingface.co/bigcode/octogeex", - "StableCode-3B": "https://huggingface.co/stabilityai/stablecode-completion-alpha-3b", - "StarCoder-15B": "https://huggingface.co/bigcode/starcoder", - "StarCoderBase-15B": "https://huggingface.co/bigcode/starcoderbase", - "StarCoderBase-7B": "https://huggingface.co/bigcode/starcoderbase-7b", - "StarCoderBase-3B": "https://huggingface.co/bigcode/starcoderbase-3b", - "StarCoderBase-1.1B": "https://huggingface.co/bigcode/starcoderbase-1b", - "SantaCoder-1.1B": "https://huggingface.co/bigcode/santacoder", - "Replit-2.7B": "https://huggingface.co/replit/replit-code-v1-3b", - "CodeGeex2-6B": "https://huggingface.co/THUDM/codegeex2-6b", - "CodeGen25-7B-multi": "https://huggingface.co/Salesforce/codegen25-7b-multi", - "CodeGen25-7B-mono": "https://huggingface.co/Salesforce/codegen25-7b-mono", - "CodeGen-16B-Multi": "https://huggingface.co/Salesforce/codegen-16B-multi", - "DeciCoder-1B": "https://huggingface.co/Deci/DeciCoder-1b", - "Phind-CodeLlama-34B-v1": "https://huggingface.co/phind/Phind-CodeLlama-34B-v1", - "Phind-CodeLlama-34B-Python-v1": "https://huggingface.co/phind/Phind-CodeLlama-34B-Python-v1", - "Phind-CodeLlama-34B-v2": "https://huggingface.co/phind/Phind-CodeLlama-34B-v2", - "Falcon-180B": "https://huggingface.co/tiiuae/falcon-180B", - "Refact-1.6B": "https://huggingface.co/smallcloudai/Refact-1_6B-fim", - "Phi-1": "https://huggingface.co/microsoft/phi-1", - "CodeShell-7B": "https://huggingface.co/WisdomShell/CodeShell-7B", -} - -codellamas = ['CodeLlama-7b', 'CodeLlama-7b-Python', 'CodeLlama-7b-Instruct', 'CodeLlama-13b', 'CodeLlama-13b-Python', 'CodeLlama-13b-Instruct', 'CodeLlama-34b', 'CodeLlama-34b-Python', 'CodeLlama-34b-Instruct'] -for codellama in codellamas: - links[codellama] = f"https://huggingface.co/codellama/{codellama}-hf" - -df["Links"] = df["Models"].map(links) - -df.insert(0, "T", "🟢") -patterns = ["WizardCoder", "Octo", "Instruct", "Phind", "Refact"] -df.loc[df["Models"].str.contains('|'.join(patterns)), "T"] = "🔶" -df.loc[df["Models"].str.contains('|'.join(patterns)), "T"] = "🔶" -df.loc[df["Models"].str.contains('|'.join(["CodeShell"])), "T"] = "🔴" -# add clumn submission_pr with empty fiels except for CodeShell with link AA -df["Submission PR"] = "" -df.loc[df["Models"].str.contains('|'.join(["CodeShell"])), "Submission PR"] = "[PR/16](https://huggingface.co/spaces/bigcode/bigcode-models-leaderboard/discussions/16)" -# print first 5 rows and 10 cols -print(df.iloc[:5, :-1]) -df.to_csv("data/code_eval_board.csv", index=False) - -# fill readme -add_model_readme(df) -print("Readme filled") \ No newline at end of file diff --git a/spaces/bigcode/near-deduplication/app.py b/spaces/bigcode/near-deduplication/app.py deleted file mode 100644 index 460507dd40cfce1a669f33049f9d06f9c6a5dfe5..0000000000000000000000000000000000000000 --- a/spaces/bigcode/near-deduplication/app.py +++ /dev/null @@ -1,126 +0,0 @@ -import streamlit as st -import plotly.graph_objects as go -import numpy as np -import scipy.integrate as integrate - -def _false_positive_probability(threshold, b, r): - def _probability(s): - return 1 - (1 - s ** float(r)) ** float(b) - a, err = integrate.quad(_probability, 0.0, threshold) - return a - - -def _false_negative_probability(threshold, b, r): - def _probability(s): - return 1 - (1 - (1 - s ** float(r)) ** float(b)) - - a, err = integrate.quad(_probability, threshold, 1.0) - return a - - -def _optimal_param(threshold, num_perm, false_positive_weight, false_negative_weight): - """ - Compute the optimal `MinHashLSH` parameter that minimizes the weighted sum - of probabilities of false positive and false negative. - """ - min_error = float("inf") - opt = (0, 0) - for b in range(1, num_perm + 1): - max_r = int(num_perm / b) - for r in range(1, max_r + 1): - fp = _false_positive_probability(threshold, b, r) - fn = _false_negative_probability(threshold, b, r) - error = fp * false_positive_weight + fn * false_negative_weight - if error < min_error: - min_error = error - opt = (b, r) - return opt - - -col1, col2 = st.columns(2) -s = col1.slider("Select a Jaccard similarity", 0.0, 1.0, 0.1) -p = col2.slider("Select a number of permutations", 0, 1000, 10) -optimal_b, optimal_r = _optimal_param(s, p, 1, 1) - -b = col1.slider("Select a number of bands", 1, 100, 1) -r = col2.slider("Select a number of rows per band", 1, 100, 1) - -col1.metric(label="Optimal number of bands", value=optimal_b) -col2.metric(label="Optimal number of rows per band", value=optimal_r) - -st.markdown("---") - -st.markdown(f"Two documents that have a Jaccard similarity of $s={s}$ will have:") -st.markdown(f"1. ${s * 100:.2f}\%$ of their k-shingles will be the same") -st.markdown(f"2. ${s * 100:.2f}\%$ of their k-shingles' hashes will be the same") -st.markdown(f"4. ${s * 100:.2f}\%$ of the time, a particular hash will be the same for two documents") -st.markdown( - f"3. $s^r={100 * s ** r:.2f}\%$ of the time, they will have the same hashes for a particular band of $r={r}$ rows" -) -st.markdown( - f"5. $1 - s^r = {100 * (1 - s ** r):.2f}\%$ of the time, they will have at least one different hash for a particular band" -) -st.markdown( - f"6. $(1 - s^r)^b = {100 * (1 - s ** r)**b:.2f}\%$ of the time, they will have at least one different hash for all $b={b}$ bands" -) -st.markdown( - f"7. $1 - (1 - s^r)^b={100 * (1 - (1 - s ** r)**b):.2f}\%$ of the time, they will have at least one band with the same hashes" -) - -t = st.slider("Select a Jaccard similarity threshold", 0.0, 1.0, 0.1) - -x = np.linspace(0, 1, 1000) -y = 1 - (1 - x**r) ** b - -fig = go.Figure( - data=go.Scatter( - x=x, - y=y, - showlegend=False, - ) -) -fig = fig.add_shape( - type="line", - x0=t, - y0=0, - x1=t, - y1=1, - line=dict( - color="Red", - width=4, - ), -) -false_positive_x = [d for d in x if d <= t] + [t] -false_positive_y = [d for i, d in enumerate(y) if x[i] <= t] + [0] -fig.add_trace( - go.Scatter( - x=false_positive_x, - y=false_positive_y, - fill="tozeroy", - fillcolor="rgba(255, 0, 0, 0.2)", - line_color="rgba(255, 0, 0, 0)", - showlegend=False, - ) -) - -false_negative_x = [d for d in x if d > t] -false_negative_y = [d for i, d in enumerate(y) if x[i] > t] -fig.add_trace( - go.Scatter( - x=[t] + false_negative_x + [1], - y=[1] + false_negative_y + [1], - fill="toself", - fillcolor="rgba(0, 255, 0, 0.2)", - line_color="rgba(0, 255, 0, 0)", - showlegend=False, - ) -) - -st.plotly_chart(fig) - -false_positive = integrate.quad(lambda x: 1 - (1 - x**r) ** b, 0, t)[0] -false_negative = integrate.quad(lambda x: (1 - x**r) ** b, t, 1)[0] - -cols = st.columns(2) -cols[0].metric(label="False positive area", value=f"{false_positive:.2f}") -cols[1].metric(label="False negative area", value=f"{false_negative:.2f}") \ No newline at end of file diff --git a/spaces/bigjoker/stable-diffusion-webui/scripts/xyz_grid.py b/spaces/bigjoker/stable-diffusion-webui/scripts/xyz_grid.py deleted file mode 100644 index e457d53de2ade37a53cc200d6902323d3d6f25ce..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/scripts/xyz_grid.py +++ /dev/null @@ -1,620 +0,0 @@ -from collections import namedtuple -from copy import copy -from itertools import permutations, chain -import random -import csv -from io import StringIO -from PIL import Image -import numpy as np - -import modules.scripts as scripts -import gradio as gr - -from modules import images, paths, sd_samplers, processing, sd_models, sd_vae -from modules.processing import process_images, Processed, StableDiffusionProcessingTxt2Img -from modules.shared import opts, cmd_opts, state -import modules.shared as shared -import modules.sd_samplers -import modules.sd_models -import modules.sd_vae -import glob -import os -import re - -from modules.ui_components import ToolButton - -fill_values_symbol = "\U0001f4d2" # 📒 - -AxisInfo = namedtuple('AxisInfo', ['axis', 'values']) - - -def apply_field(field): - def fun(p, x, xs): - setattr(p, field, x) - - return fun - - -def apply_prompt(p, x, xs): - if xs[0] not in p.prompt and xs[0] not in p.negative_prompt: - raise RuntimeError(f"Prompt S/R did not find {xs[0]} in prompt or negative prompt.") - - p.prompt = p.prompt.replace(xs[0], x) - p.negative_prompt = p.negative_prompt.replace(xs[0], x) - - -def apply_order(p, x, xs): - token_order = [] - - # Initally grab the tokens from the prompt, so they can be replaced in order of earliest seen - for token in x: - token_order.append((p.prompt.find(token), token)) - - token_order.sort(key=lambda t: t[0]) - - prompt_parts = [] - - # Split the prompt up, taking out the tokens - for _, token in token_order: - n = p.prompt.find(token) - prompt_parts.append(p.prompt[0:n]) - p.prompt = p.prompt[n + len(token):] - - # Rebuild the prompt with the tokens in the order we want - prompt_tmp = "" - for idx, part in enumerate(prompt_parts): - prompt_tmp += part - prompt_tmp += x[idx] - p.prompt = prompt_tmp + p.prompt - - -def apply_sampler(p, x, xs): - sampler_name = sd_samplers.samplers_map.get(x.lower(), None) - if sampler_name is None: - raise RuntimeError(f"Unknown sampler: {x}") - - p.sampler_name = sampler_name - - -def confirm_samplers(p, xs): - for x in xs: - if x.lower() not in sd_samplers.samplers_map: - raise RuntimeError(f"Unknown sampler: {x}") - - -def apply_checkpoint(p, x, xs): - info = modules.sd_models.get_closet_checkpoint_match(x) - if info is None: - raise RuntimeError(f"Unknown checkpoint: {x}") - modules.sd_models.reload_model_weights(shared.sd_model, info) - - -def confirm_checkpoints(p, xs): - for x in xs: - if modules.sd_models.get_closet_checkpoint_match(x) is None: - raise RuntimeError(f"Unknown checkpoint: {x}") - - -def apply_clip_skip(p, x, xs): - opts.data["CLIP_stop_at_last_layers"] = x - - -def apply_upscale_latent_space(p, x, xs): - if x.lower().strip() != '0': - opts.data["use_scale_latent_for_hires_fix"] = True - else: - opts.data["use_scale_latent_for_hires_fix"] = False - - -def find_vae(name: str): - if name.lower() in ['auto', 'automatic']: - return modules.sd_vae.unspecified - if name.lower() == 'none': - return None - else: - choices = [x for x in sorted(modules.sd_vae.vae_dict, key=lambda x: len(x)) if name.lower().strip() in x.lower()] - if len(choices) == 0: - print(f"No VAE found for {name}; using automatic") - return modules.sd_vae.unspecified - else: - return modules.sd_vae.vae_dict[choices[0]] - - -def apply_vae(p, x, xs): - modules.sd_vae.reload_vae_weights(shared.sd_model, vae_file=find_vae(x)) - - -def apply_styles(p: StableDiffusionProcessingTxt2Img, x: str, _): - p.styles.extend(x.split(',')) - - -def format_value_add_label(p, opt, x): - if type(x) == float: - x = round(x, 8) - - return f"{opt.label}: {x}" - - -def format_value(p, opt, x): - if type(x) == float: - x = round(x, 8) - return x - - -def format_value_join_list(p, opt, x): - return ", ".join(x) - - -def do_nothing(p, x, xs): - pass - - -def format_nothing(p, opt, x): - return "" - - -def str_permutations(x): - """dummy function for specifying it in AxisOption's type when you want to get a list of permutations""" - return x - - -class AxisOption: - def __init__(self, label, type, apply, format_value=format_value_add_label, confirm=None, cost=0.0, choices=None): - self.label = label - self.type = type - self.apply = apply - self.format_value = format_value - self.confirm = confirm - self.cost = cost - self.choices = choices - - -class AxisOptionImg2Img(AxisOption): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.is_img2img = True - -class AxisOptionTxt2Img(AxisOption): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.is_img2img = False - - -axis_options = [ - AxisOption("Nothing", str, do_nothing, format_value=format_nothing), - AxisOption("Seed", int, apply_field("seed")), - AxisOption("Var. seed", int, apply_field("subseed")), - AxisOption("Var. strength", float, apply_field("subseed_strength")), - AxisOption("Steps", int, apply_field("steps")), - AxisOptionTxt2Img("Hires steps", int, apply_field("hr_second_pass_steps")), - AxisOption("CFG Scale", float, apply_field("cfg_scale")), - AxisOptionImg2Img("Image CFG Scale", float, apply_field("image_cfg_scale")), - AxisOption("Prompt S/R", str, apply_prompt, format_value=format_value), - AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list), - AxisOptionTxt2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), - AxisOptionImg2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img]), - AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_value, confirm=confirm_checkpoints, cost=1.0, choices=lambda: list(sd_models.checkpoints_list)), - AxisOption("Sigma Churn", float, apply_field("s_churn")), - AxisOption("Sigma min", float, apply_field("s_tmin")), - AxisOption("Sigma max", float, apply_field("s_tmax")), - AxisOption("Sigma noise", float, apply_field("s_noise")), - AxisOption("Eta", float, apply_field("eta")), - AxisOption("Clip skip", int, apply_clip_skip), - AxisOption("Denoising", float, apply_field("denoising_strength")), - AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]), - AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")), - AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: list(sd_vae.vae_dict)), - AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)), -] - - -def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend, include_lone_images, include_sub_grids, first_axes_processed, second_axes_processed, margin_size): - hor_texts = [[images.GridAnnotation(x)] for x in x_labels] - ver_texts = [[images.GridAnnotation(y)] for y in y_labels] - title_texts = [[images.GridAnnotation(z)] for z in z_labels] - - # Temporary list of all the images that are generated to be populated into the grid. - # Will be filled with empty images for any individual step that fails to process properly - image_cache = [None] * (len(xs) * len(ys) * len(zs)) - - processed_result = None - cell_mode = "P" - cell_size = (1, 1) - - state.job_count = len(xs) * len(ys) * len(zs) * p.n_iter - - def process_cell(x, y, z, ix, iy, iz): - nonlocal image_cache, processed_result, cell_mode, cell_size - - def index(ix, iy, iz): - return ix + iy * len(xs) + iz * len(xs) * len(ys) - - state.job = f"{index(ix, iy, iz) + 1} out of {len(xs) * len(ys) * len(zs)}" - - processed: Processed = cell(x, y, z) - - try: - # this dereference will throw an exception if the image was not processed - # (this happens in cases such as if the user stops the process from the UI) - processed_image = processed.images[0] - - if processed_result is None: - # Use our first valid processed result as a template container to hold our full results - processed_result = copy(processed) - cell_mode = processed_image.mode - cell_size = processed_image.size - processed_result.images = [Image.new(cell_mode, cell_size)] - processed_result.all_prompts = [processed.prompt] - processed_result.all_seeds = [processed.seed] - processed_result.infotexts = [processed.infotexts[0]] - - image_cache[index(ix, iy, iz)] = processed_image - if include_lone_images: - processed_result.images.append(processed_image) - processed_result.all_prompts.append(processed.prompt) - processed_result.all_seeds.append(processed.seed) - processed_result.infotexts.append(processed.infotexts[0]) - except: - image_cache[index(ix, iy, iz)] = Image.new(cell_mode, cell_size) - - if first_axes_processed == 'x': - for ix, x in enumerate(xs): - if second_axes_processed == 'y': - for iy, y in enumerate(ys): - for iz, z in enumerate(zs): - process_cell(x, y, z, ix, iy, iz) - else: - for iz, z in enumerate(zs): - for iy, y in enumerate(ys): - process_cell(x, y, z, ix, iy, iz) - elif first_axes_processed == 'y': - for iy, y in enumerate(ys): - if second_axes_processed == 'x': - for ix, x in enumerate(xs): - for iz, z in enumerate(zs): - process_cell(x, y, z, ix, iy, iz) - else: - for iz, z in enumerate(zs): - for ix, x in enumerate(xs): - process_cell(x, y, z, ix, iy, iz) - elif first_axes_processed == 'z': - for iz, z in enumerate(zs): - if second_axes_processed == 'x': - for ix, x in enumerate(xs): - for iy, y in enumerate(ys): - process_cell(x, y, z, ix, iy, iz) - else: - for iy, y in enumerate(ys): - for ix, x in enumerate(xs): - process_cell(x, y, z, ix, iy, iz) - - if not processed_result: - print("Unexpected error: draw_xyz_grid failed to return even a single processed image") - return Processed(p, []) - - sub_grids = [None] * len(zs) - for i in range(len(zs)): - start_index = i * len(xs) * len(ys) - end_index = start_index + len(xs) * len(ys) - grid = images.image_grid(image_cache[start_index:end_index], rows=len(ys)) - if draw_legend: - grid = images.draw_grid_annotations(grid, cell_size[0], cell_size[1], hor_texts, ver_texts, margin_size) - sub_grids[i] = grid - if include_sub_grids and len(zs) > 1: - processed_result.images.insert(i+1, grid) - - sub_grid_size = sub_grids[0].size - z_grid = images.image_grid(sub_grids, rows=1) - if draw_legend: - z_grid = images.draw_grid_annotations(z_grid, sub_grid_size[0], sub_grid_size[1], title_texts, [[images.GridAnnotation()]]) - processed_result.images[0] = z_grid - - return processed_result, sub_grids - - -class SharedSettingsStackHelper(object): - def __enter__(self): - self.CLIP_stop_at_last_layers = opts.CLIP_stop_at_last_layers - self.vae = opts.sd_vae - - def __exit__(self, exc_type, exc_value, tb): - opts.data["sd_vae"] = self.vae - modules.sd_models.reload_model_weights() - modules.sd_vae.reload_vae_weights() - - opts.data["CLIP_stop_at_last_layers"] = self.CLIP_stop_at_last_layers - - -re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*") -re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\(([+-]\d+(?:.\d*)?)\s*\))?\s*") - -re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*") -re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*") - - -class Script(scripts.Script): - def title(self): - return "X/Y/Z plot" - - def ui(self, is_img2img): - self.current_axis_options = [x for x in axis_options if type(x) == AxisOption or x.is_img2img == is_img2img] - - with gr.Row(): - with gr.Column(scale=19): - with gr.Row(): - x_type = gr.Dropdown(label="X type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[1].label, type="index", elem_id=self.elem_id("x_type")) - x_values = gr.Textbox(label="X values", lines=1, elem_id=self.elem_id("x_values")) - fill_x_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_x_tool_button", visible=False) - - with gr.Row(): - y_type = gr.Dropdown(label="Y type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("y_type")) - y_values = gr.Textbox(label="Y values", lines=1, elem_id=self.elem_id("y_values")) - fill_y_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_y_tool_button", visible=False) - - with gr.Row(): - z_type = gr.Dropdown(label="Z type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("z_type")) - z_values = gr.Textbox(label="Z values", lines=1, elem_id=self.elem_id("z_values")) - fill_z_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_z_tool_button", visible=False) - - with gr.Row(variant="compact", elem_id="axis_options"): - with gr.Column(): - draw_legend = gr.Checkbox(label='Draw legend', value=True, elem_id=self.elem_id("draw_legend")) - no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False, elem_id=self.elem_id("no_fixed_seeds")) - with gr.Column(): - include_lone_images = gr.Checkbox(label='Include Sub Images', value=False, elem_id=self.elem_id("include_lone_images")) - include_sub_grids = gr.Checkbox(label='Include Sub Grids', value=False, elem_id=self.elem_id("include_sub_grids")) - with gr.Column(): - margin_size = gr.Slider(label="Grid margins (px)", minimum=0, maximum=500, value=0, step=2, elem_id=self.elem_id("margin_size")) - - with gr.Row(variant="compact", elem_id="swap_axes"): - swap_xy_axes_button = gr.Button(value="Swap X/Y axes", elem_id="xy_grid_swap_axes_button") - swap_yz_axes_button = gr.Button(value="Swap Y/Z axes", elem_id="yz_grid_swap_axes_button") - swap_xz_axes_button = gr.Button(value="Swap X/Z axes", elem_id="xz_grid_swap_axes_button") - - def swap_axes(axis1_type, axis1_values, axis2_type, axis2_values): - return self.current_axis_options[axis2_type].label, axis2_values, self.current_axis_options[axis1_type].label, axis1_values - - xy_swap_args = [x_type, x_values, y_type, y_values] - swap_xy_axes_button.click(swap_axes, inputs=xy_swap_args, outputs=xy_swap_args) - yz_swap_args = [y_type, y_values, z_type, z_values] - swap_yz_axes_button.click(swap_axes, inputs=yz_swap_args, outputs=yz_swap_args) - xz_swap_args = [x_type, x_values, z_type, z_values] - swap_xz_axes_button.click(swap_axes, inputs=xz_swap_args, outputs=xz_swap_args) - - def fill(x_type): - axis = self.current_axis_options[x_type] - return ", ".join(axis.choices()) if axis.choices else gr.update() - - fill_x_button.click(fn=fill, inputs=[x_type], outputs=[x_values]) - fill_y_button.click(fn=fill, inputs=[y_type], outputs=[y_values]) - fill_z_button.click(fn=fill, inputs=[z_type], outputs=[z_values]) - - def select_axis(x_type): - return gr.Button.update(visible=self.current_axis_options[x_type].choices is not None) - - x_type.change(fn=select_axis, inputs=[x_type], outputs=[fill_x_button]) - y_type.change(fn=select_axis, inputs=[y_type], outputs=[fill_y_button]) - z_type.change(fn=select_axis, inputs=[z_type], outputs=[fill_z_button]) - - self.infotext_fields = ( - (x_type, "X Type"), - (x_values, "X Values"), - (y_type, "Y Type"), - (y_values, "Y Values"), - (z_type, "Z Type"), - (z_values, "Z Values"), - ) - - return [x_type, x_values, y_type, y_values, z_type, z_values, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size] - - def run(self, p, x_type, x_values, y_type, y_values, z_type, z_values, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size): - if not no_fixed_seeds: - modules.processing.fix_seed(p) - - if not opts.return_grid: - p.batch_size = 1 - - def process_axis(opt, vals): - if opt.label == 'Nothing': - return [0] - - valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals)))] - - if opt.type == int: - valslist_ext = [] - - for val in valslist: - m = re_range.fullmatch(val) - mc = re_range_count.fullmatch(val) - if m is not None: - start = int(m.group(1)) - end = int(m.group(2))+1 - step = int(m.group(3)) if m.group(3) is not None else 1 - - valslist_ext += list(range(start, end, step)) - elif mc is not None: - start = int(mc.group(1)) - end = int(mc.group(2)) - num = int(mc.group(3)) if mc.group(3) is not None else 1 - - valslist_ext += [int(x) for x in np.linspace(start=start, stop=end, num=num).tolist()] - else: - valslist_ext.append(val) - - valslist = valslist_ext - elif opt.type == float: - valslist_ext = [] - - for val in valslist: - m = re_range_float.fullmatch(val) - mc = re_range_count_float.fullmatch(val) - if m is not None: - start = float(m.group(1)) - end = float(m.group(2)) - step = float(m.group(3)) if m.group(3) is not None else 1 - - valslist_ext += np.arange(start, end + step, step).tolist() - elif mc is not None: - start = float(mc.group(1)) - end = float(mc.group(2)) - num = int(mc.group(3)) if mc.group(3) is not None else 1 - - valslist_ext += np.linspace(start=start, stop=end, num=num).tolist() - else: - valslist_ext.append(val) - - valslist = valslist_ext - elif opt.type == str_permutations: - valslist = list(permutations(valslist)) - - valslist = [opt.type(x) for x in valslist] - - # Confirm options are valid before starting - if opt.confirm: - opt.confirm(p, valslist) - - return valslist - - x_opt = self.current_axis_options[x_type] - xs = process_axis(x_opt, x_values) - - y_opt = self.current_axis_options[y_type] - ys = process_axis(y_opt, y_values) - - z_opt = self.current_axis_options[z_type] - zs = process_axis(z_opt, z_values) - - def fix_axis_seeds(axis_opt, axis_list): - if axis_opt.label in ['Seed', 'Var. seed']: - return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list] - else: - return axis_list - - if not no_fixed_seeds: - xs = fix_axis_seeds(x_opt, xs) - ys = fix_axis_seeds(y_opt, ys) - zs = fix_axis_seeds(z_opt, zs) - - if x_opt.label == 'Steps': - total_steps = sum(xs) * len(ys) * len(zs) - elif y_opt.label == 'Steps': - total_steps = sum(ys) * len(xs) * len(zs) - elif z_opt.label == 'Steps': - total_steps = sum(zs) * len(xs) * len(ys) - else: - total_steps = p.steps * len(xs) * len(ys) * len(zs) - - if isinstance(p, StableDiffusionProcessingTxt2Img) and p.enable_hr: - if x_opt.label == "Hires steps": - total_steps += sum(xs) * len(ys) * len(zs) - elif y_opt.label == "Hires steps": - total_steps += sum(ys) * len(xs) * len(zs) - elif z_opt.label == "Hires steps": - total_steps += sum(zs) * len(xs) * len(ys) - elif p.hr_second_pass_steps: - total_steps += p.hr_second_pass_steps * len(xs) * len(ys) * len(zs) - else: - total_steps *= 2 - - total_steps *= p.n_iter - - image_cell_count = p.n_iter * p.batch_size - cell_console_text = f"; {image_cell_count} images per cell" if image_cell_count > 1 else "" - plural_s = 's' if len(zs) > 1 else '' - print(f"X/Y/Z plot will create {len(xs) * len(ys) * len(zs) * image_cell_count} images on {len(zs)} {len(xs)}x{len(ys)} grid{plural_s}{cell_console_text}. (Total steps to process: {total_steps})") - shared.total_tqdm.updateTotal(total_steps) - - grid_infotext = [None] - - state.xyz_plot_x = AxisInfo(x_opt, xs) - state.xyz_plot_y = AxisInfo(y_opt, ys) - state.xyz_plot_z = AxisInfo(z_opt, zs) - - # If one of the axes is very slow to change between (like SD model - # checkpoint), then make sure it is in the outer iteration of the nested - # `for` loop. - first_axes_processed = 'x' - second_axes_processed = 'y' - if x_opt.cost > y_opt.cost and x_opt.cost > z_opt.cost: - first_axes_processed = 'x' - if y_opt.cost > z_opt.cost: - second_axes_processed = 'y' - else: - second_axes_processed = 'z' - elif y_opt.cost > x_opt.cost and y_opt.cost > z_opt.cost: - first_axes_processed = 'y' - if x_opt.cost > z_opt.cost: - second_axes_processed = 'x' - else: - second_axes_processed = 'z' - elif z_opt.cost > x_opt.cost and z_opt.cost > y_opt.cost: - first_axes_processed = 'z' - if x_opt.cost > y_opt.cost: - second_axes_processed = 'x' - else: - second_axes_processed = 'y' - - def cell(x, y, z): - if shared.state.interrupted: - return Processed(p, [], p.seed, "") - - pc = copy(p) - pc.styles = pc.styles[:] - x_opt.apply(pc, x, xs) - y_opt.apply(pc, y, ys) - z_opt.apply(pc, z, zs) - - res = process_images(pc) - - if grid_infotext[0] is None: - pc.extra_generation_params = copy(pc.extra_generation_params) - pc.extra_generation_params['Script'] = self.title() - - if x_opt.label != 'Nothing': - pc.extra_generation_params["X Type"] = x_opt.label - pc.extra_generation_params["X Values"] = x_values - if x_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds: - pc.extra_generation_params["Fixed X Values"] = ", ".join([str(x) for x in xs]) - - if y_opt.label != 'Nothing': - pc.extra_generation_params["Y Type"] = y_opt.label - pc.extra_generation_params["Y Values"] = y_values - if y_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds: - pc.extra_generation_params["Fixed Y Values"] = ", ".join([str(y) for y in ys]) - - if z_opt.label != 'Nothing': - pc.extra_generation_params["Z Type"] = z_opt.label - pc.extra_generation_params["Z Values"] = z_values - if z_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds: - pc.extra_generation_params["Fixed Z Values"] = ", ".join([str(z) for z in zs]) - - grid_infotext[0] = processing.create_infotext(pc, pc.all_prompts, pc.all_seeds, pc.all_subseeds) - - return res - - with SharedSettingsStackHelper(): - processed, sub_grids = draw_xyz_grid( - p, - xs=xs, - ys=ys, - zs=zs, - x_labels=[x_opt.format_value(p, x_opt, x) for x in xs], - y_labels=[y_opt.format_value(p, y_opt, y) for y in ys], - z_labels=[z_opt.format_value(p, z_opt, z) for z in zs], - cell=cell, - draw_legend=draw_legend, - include_lone_images=include_lone_images, - include_sub_grids=include_sub_grids, - first_axes_processed=first_axes_processed, - second_axes_processed=second_axes_processed, - margin_size=margin_size - ) - - if opts.grid_save and len(sub_grids) > 1: - for sub_grid in sub_grids: - images.save_image(sub_grid, p.outpath_grids, "xyz_grid", info=grid_infotext[0], extension=opts.grid_format, prompt=p.prompt, seed=processed.seed, grid=True, p=p) - - if opts.grid_save: - images.save_image(processed.images[0], p.outpath_grids, "xyz_grid", info=grid_infotext[0], extension=opts.grid_format, prompt=p.prompt, seed=processed.seed, grid=True, p=p) - - return processed diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/docs/conf.py b/spaces/brjathu/HMR2.0/vendor/detectron2/docs/conf.py deleted file mode 100644 index 1fb3e30f97dcc02b497e7c6de6bcc9e47ea94885..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/docs/conf.py +++ /dev/null @@ -1,395 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -# flake8: noqa - -# Configuration file for the Sphinx documentation builder. -# -# This file does only contain a selection of the most common options. For a -# full list see the documentation: -# http://www.sphinx-doc.org/en/master/config - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -import os -import sys -from unittest import mock -from sphinx.domains import Domain -from typing import Dict, List, Tuple - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -import sphinx_rtd_theme - - -class GithubURLDomain(Domain): - """ - Resolve certain links in markdown files to github source. - """ - - name = "githuburl" - ROOT = "https://github.com/facebookresearch/detectron2/blob/main/" - LINKED_DOC = ["tutorials/install", "tutorials/getting_started"] - - def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode): - github_url = None - if not target.endswith("html") and target.startswith("../../"): - url = target.replace("../", "") - github_url = url - if fromdocname in self.LINKED_DOC: - # unresolved links in these docs are all github links - github_url = target - - if github_url is not None: - if github_url.endswith("MODEL_ZOO") or github_url.endswith("README"): - # bug of recommonmark. - # https://github.com/readthedocs/recommonmark/blob/ddd56e7717e9745f11300059e4268e204138a6b1/recommonmark/parser.py#L152-L155 - github_url += ".md" - print("Ref {} resolved to github:{}".format(target, github_url)) - contnode["refuri"] = self.ROOT + github_url - return [("githuburl:any", contnode)] - else: - return [] - - -# to support markdown -from recommonmark.parser import CommonMarkParser - -sys.path.insert(0, os.path.abspath("../")) -os.environ["_DOC_BUILDING"] = "True" -DEPLOY = os.environ.get("READTHEDOCS") == "True" - - -# -- Project information ----------------------------------------------------- - -# fmt: off -try: - import torch # noqa -except ImportError: - for m in [ - "torch", "torchvision", "torch.nn", "torch.nn.parallel", "torch.distributed", "torch.multiprocessing", "torch.autograd", - "torch.autograd.function", "torch.nn.modules", "torch.nn.modules.utils", "torch.utils", "torch.utils.data", "torch.onnx", - "torchvision", "torchvision.ops", - ]: - sys.modules[m] = mock.Mock(name=m) - sys.modules['torch'].__version__ = "1.7" # fake version - HAS_TORCH = False -else: - try: - torch.ops.detectron2 = mock.Mock(name="torch.ops.detectron2") - except: - pass - HAS_TORCH = True - -for m in [ - "cv2", "scipy", "portalocker", "detectron2._C", - "pycocotools", "pycocotools.mask", "pycocotools.coco", "pycocotools.cocoeval", - "google", "google.protobuf", "google.protobuf.internal", "onnx", - "caffe2", "caffe2.proto", "caffe2.python", "caffe2.python.utils", "caffe2.python.onnx", "caffe2.python.onnx.backend", -]: - sys.modules[m] = mock.Mock(name=m) -# fmt: on -sys.modules["cv2"].__version__ = "3.4" - -import detectron2 # isort: skip - -if HAS_TORCH: - from detectron2.utils.env import fixup_module_metadata - - fixup_module_metadata("torch.nn", torch.nn.__dict__) - fixup_module_metadata("torch.utils.data", torch.utils.data.__dict__) - - -project = "detectron2" -copyright = "2019-2020, detectron2 contributors" -author = "detectron2 contributors" - -# The short X.Y version -version = detectron2.__version__ -# The full version, including alpha/beta/rc tags -release = version - - -# -- General configuration --------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -# -needs_sphinx = "3.0" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "recommonmark", - "sphinx.ext.autodoc", - "sphinx.ext.napoleon", - "sphinx.ext.intersphinx", - "sphinx.ext.todo", - "sphinx.ext.coverage", - "sphinx.ext.mathjax", - "sphinx.ext.viewcode", - "sphinx.ext.githubpages", -] - -# -- Configurations for plugins ------------ -napoleon_google_docstring = True -napoleon_include_init_with_doc = True -napoleon_include_special_with_doc = True -napoleon_numpy_docstring = False -napoleon_use_rtype = False -autodoc_inherit_docstrings = False -autodoc_member_order = "bysource" - -if DEPLOY: - intersphinx_timeout = 10 -else: - # skip this when building locally - intersphinx_timeout = 0.5 -intersphinx_mapping = { - "python": ("https://docs.python.org/3.7", None), - "numpy": ("https://docs.scipy.org/doc/numpy/", None), - "torch": ("https://pytorch.org/docs/master/", None), -} -# ------------------------- - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -source_suffix = [".rst", ".md"] - -# The master toctree document. -master_doc = "index" - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "build", "README.md", "tutorials/README.md"] - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - - -# -- Options for HTML output ------------------------------------------------- - -html_theme = "sphinx_rtd_theme" -html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# -# html_theme_options = {} - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] -html_css_files = ["css/custom.css"] - -# Custom sidebar templates, must be a dictionary that maps document names -# to template names. -# -# The default sidebars (for documents that don't match any pattern) are -# defined by theme itself. Builtin themes are using these templates by -# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', -# 'searchbox.html']``. -# -# html_sidebars = {} - - -# -- Options for HTMLHelp output --------------------------------------------- - -# Output file base name for HTML help builder. -htmlhelp_basename = "detectron2doc" - - -# -- Options for LaTeX output ------------------------------------------------ - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # - # 'preamble': '', - # Latex figure (float) alignment - # - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, "detectron2.tex", "detectron2 Documentation", "detectron2 contributors", "manual") -] - - -# -- Options for manual page output ------------------------------------------ - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [(master_doc, "detectron2", "detectron2 Documentation", [author], 1)] - - -# -- Options for Texinfo output ---------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "detectron2", - "detectron2 Documentation", - author, - "detectron2", - "One line description of project.", - "Miscellaneous", - ) -] - - -# -- Options for todo extension ---------------------------------------------- - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -def autodoc_skip_member(app, what, name, obj, skip, options): - # we hide something deliberately - if getattr(obj, "__HIDE_SPHINX_DOC__", False): - return True - - # Hide some that are deprecated or not intended to be used - HIDDEN = { - "ResNetBlockBase", - "GroupedBatchSampler", - "build_transform_gen", - "apply_transform_gens", - "TransformGen", - "apply_augmentations", - "StandardAugInput", - "build_batch_data_loader", - "draw_panoptic_seg_predictions", - "WarmupCosineLR", - "WarmupMultiStepLR", - "downgrade_config", - "upgrade_config", - "add_export_config", - } - try: - if name in HIDDEN or ( - hasattr(obj, "__doc__") and obj.__doc__.lower().strip().startswith("deprecated") - ): - print("Skipping deprecated object: {}".format(name)) - return True - except: - pass - return skip - - -_PAPER_DATA = { - "resnet": ("1512.03385", "Deep Residual Learning for Image Recognition"), - "fpn": ("1612.03144", "Feature Pyramid Networks for Object Detection"), - "mask r-cnn": ("1703.06870", "Mask R-CNN"), - "faster r-cnn": ( - "1506.01497", - "Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks", - ), - "deformconv": ("1703.06211", "Deformable Convolutional Networks"), - "deformconv2": ("1811.11168", "Deformable ConvNets v2: More Deformable, Better Results"), - "panopticfpn": ("1901.02446", "Panoptic Feature Pyramid Networks"), - "retinanet": ("1708.02002", "Focal Loss for Dense Object Detection"), - "cascade r-cnn": ("1712.00726", "Cascade R-CNN: Delving into High Quality Object Detection"), - "lvis": ("1908.03195", "LVIS: A Dataset for Large Vocabulary Instance Segmentation"), - "rrpn": ("1703.01086", "Arbitrary-Oriented Scene Text Detection via Rotation Proposals"), - "imagenet in 1h": ("1706.02677", "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour"), - "xception": ("1610.02357", "Xception: Deep Learning with Depthwise Separable Convolutions"), - "mobilenet": ( - "1704.04861", - "MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications", - ), - "deeplabv3+": ( - "1802.02611", - "Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation", - ), - "dds": ("2003.13678", "Designing Network Design Spaces"), - "scaling": ("2103.06877", "Fast and Accurate Model Scaling"), - "fcos": ("2006.09214", "FCOS: A Simple and Strong Anchor-free Object Detector"), - "rethinking-batchnorm": ("2105.07576", 'Rethinking "Batch" in BatchNorm'), - "vitdet": ("2203.16527", "Exploring Plain Vision Transformer Backbones for Object Detection"), - "mvitv2": ( - "2112.01526", - "MViTv2: Improved Multiscale Vision Transformers for Classification and Detection", - ), - "swin": ( - "2103.14030", - "Swin Transformer: Hierarchical Vision Transformer using Shifted Windows", - ), - "omni3d": ( - "2207.10660", - "Omni3D: A Large Benchmark and Model for 3D Object Detection in the Wild", - ), -} - - -def paper_ref_role( - typ: str, - rawtext: str, - text: str, - lineno: int, - inliner, - options: Dict = {}, - content: List[str] = [], -): - """ - Parse :paper:`xxx`. Similar to the "extlinks" sphinx extension. - """ - from docutils import nodes, utils - from sphinx.util.nodes import split_explicit_title - - text = utils.unescape(text) - has_explicit_title, title, link = split_explicit_title(text) - link = link.lower() - if link not in _PAPER_DATA: - inliner.reporter.warning("Cannot find paper " + link) - paper_url, paper_title = "#", link - else: - paper_url, paper_title = _PAPER_DATA[link] - if "/" not in paper_url: - paper_url = "https://arxiv.org/abs/" + paper_url - if not has_explicit_title: - title = paper_title - pnode = nodes.reference(title, title, internal=False, refuri=paper_url) - return [pnode], [] - - -def setup(app): - from recommonmark.transform import AutoStructify - - app.add_domain(GithubURLDomain) - app.connect("autodoc-skip-member", autodoc_skip_member) - app.add_role("paper", paper_ref_role) - app.add_config_value( - "recommonmark_config", - {"enable_math": True, "enable_inline_math": True, "enable_eval_rst": True}, - True, - ) - app.add_transform(AutoStructify) diff --git a/spaces/caffeinum/VToonify/vtoonify/model/stylegan/__init__.py b/spaces/caffeinum/VToonify/vtoonify/model/stylegan/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/cakiki/netlogo-ants/index.html b/spaces/cakiki/netlogo-ants/index.html deleted file mode 100644 index f20ee0f60ace79c9b5028c11d1918ddefbecaaa4..0000000000000000000000000000000000000000 --- a/spaces/cakiki/netlogo-ants/index.html +++ /dev/null @@ -1,77275 +0,0 @@ - - - - - - - - - - - - - - - - -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/spaces/candlend/vits-hoshimi/sovits/inference_main.py b/spaces/candlend/vits-hoshimi/sovits/inference_main.py deleted file mode 100644 index 74d0bd5fed125b2c5cdf3b929d5c42efb0085147..0000000000000000000000000000000000000000 --- a/spaces/candlend/vits-hoshimi/sovits/inference_main.py +++ /dev/null @@ -1,65 +0,0 @@ -import io -import logging -import time -from pathlib import Path - -import librosa -import numpy as np -import soundfile - -from sovits.inference import infer_tool -from sovits.inference import slicer -from sovits.inference.infer_tool import Svc - -logging.getLogger('numba').setLevel(logging.WARNING) -chunks_dict = infer_tool.read_temp("inference/chunks_temp.json") - -model_path = "logs/32k/G_174000-Copy1.pth" -config_path = "configs/config.json" -svc_model = Svc(model_path, config_path) -infer_tool.mkdir(["raw", "results"]) - -# 支持多个wav文件,放在raw文件夹下 -clean_names = ["君の知らない物語-src"] -trans = [-5] # 音高调整,支持正负(半音) -spk_list = ['yunhao'] # 每次同时合成多语者音色 -slice_db = -40 # 默认-40,嘈杂的音频可以-30,干声保留呼吸可以-50 -wav_format = 'flac' # 音频输出格式 - -infer_tool.fill_a_to_b(trans, clean_names) -for clean_name, tran in zip(clean_names, trans): - raw_audio_path = f"raw/{clean_name}" - if "." not in raw_audio_path: - raw_audio_path += ".wav" - infer_tool.format_wav(raw_audio_path) - wav_path = Path(raw_audio_path).with_suffix('.wav') - audio, sr = librosa.load(wav_path, mono=True, sr=None) - wav_hash = infer_tool.get_md5(audio) - if wav_hash in chunks_dict.keys(): - print("load chunks from temp") - chunks = chunks_dict[wav_hash]["chunks"] - else: - chunks = slicer.cut(wav_path, db_thresh=slice_db) - print(chunks) - chunks_dict[wav_hash] = {"chunks": chunks, "time": int(time.time())} - infer_tool.write_temp("inference/chunks_temp.json", chunks_dict) - audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks) - - for spk in spk_list: - audio = [] - for (slice_tag, data) in audio_data: - print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======') - length = int(np.ceil(len(data) / audio_sr * svc_model.target_sample)) - raw_path = io.BytesIO() - soundfile.write(raw_path, data, audio_sr, format="wav") - raw_path.seek(0) - if slice_tag: - print('jump empty segment') - _audio = np.zeros(length) - else: - out_audio, out_sr = svc_model.infer(spk, tran, raw_path) - _audio = out_audio.cpu().numpy() - audio.extend(list(_audio)) - - res_path = f'./results/{clean_name}_{tran}key_{spk}.{wav_format}' - soundfile.write(res_path, audio, svc_model.target_sample, format=wav_format) diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/ViTDet/configs/COCO/cascade_mask_rcnn_mvitv2_l_in21k_50ep.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/ViTDet/configs/COCO/cascade_mask_rcnn_mvitv2_l_in21k_50ep.py deleted file mode 100644 index c64f0c18aea5dfe49fef028a6300ab1dc9f2537a..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/ViTDet/configs/COCO/cascade_mask_rcnn_mvitv2_l_in21k_50ep.py +++ /dev/null @@ -1,22 +0,0 @@ -from .cascade_mask_rcnn_mvitv2_b_in21k_100ep import ( - dataloader, - lr_multiplier, - model, - train, - optimizer, -) - -model.backbone.bottom_up.embed_dim = 144 -model.backbone.bottom_up.depth = 48 -model.backbone.bottom_up.num_heads = 2 -model.backbone.bottom_up.last_block_indexes = (1, 7, 43, 47) -model.backbone.bottom_up.drop_path_rate = 0.5 - - -train.init_checkpoint = "detectron2://ImageNetPretrained/mvitv2/MViTv2_L_in21k.pyth" - -train.max_iter = train.max_iter // 2 # 100ep -> 50ep -lr_multiplier.scheduler.milestones = [ - milestone // 2 for milestone in lr_multiplier.scheduler.milestones -] -lr_multiplier.scheduler.num_updates = train.max_iter diff --git a/spaces/ccolas/TastyPiano/src/music/utils.py b/spaces/ccolas/TastyPiano/src/music/utils.py deleted file mode 100644 index 0b479f8c2a5f315ac81e066e4cb541e37f106cf5..0000000000000000000000000000000000000000 --- a/spaces/ccolas/TastyPiano/src/music/utils.py +++ /dev/null @@ -1,308 +0,0 @@ -import os - -import pandas as pd -from pydub import AudioSegment -import numpy as np -from moviepy.editor import * -import time -import pickle -import audioread -import librosa # install numba==0.49.1 -# setup A: numba 0.51.2, librosa 0.6.3, llvmlite: 0.34.0 -# setupB: numba==0.49.1, llvmlite-0.32.1 -from src.music.config import RATE_AUDIO_SAVE -import hashlib -import unicodedata -import re - -# from src.music.piano_detection_model.piano_detection_model import SR - -def clean_removed_mp3_from_csv(path): - print(f"Cleaning meta_data.csv using files from the folder, in {path}") - files = os.listdir(path) - indexes_to_remove = [] - meta_data = pd.read_csv(path + 'meta_data.csv') - for i, fn in enumerate(meta_data['filename']): - if fn not in files: - indexes_to_remove.append(i) - meta_data = meta_data.drop(indexes_to_remove) - meta_data.to_csv(path + 'meta_data.csv', index=False) - print('\tDone.') - -def clean_removed_csv_from_folder(path): - print(f"Cleaning files from folder using meta_data.csv listed file, in {path}") - files = os.listdir(path) - meta_data = pd.read_csv(path + 'meta_data.csv') - hashes = set(meta_data['hash']) - count = 0 - for f in files: - if f not in ['meta_data.csv', 'url.txt']: - if f[:-4] not in hashes: - count += 1 - print(count) - # os.remove(path + f) - stop = 1 - print('\tDone.') - -# def convert_mp3_to_mono_16k(path): -# print(f"\n\n\t\tConverting mp3 to mono and 16k sample rate, in {path}\n") -# if '.mp3' == path[-4:]: -# audio = AudioFileClip(path) -# audio.write_audiofile(path[:-4] + '.mp3', -# verbose=False, -# logger=None, -# fps=FPS, -# ffmpeg_params=["-ac", "1"]) -# else: -# list_files = os.listdir(path) -# for i, f in enumerate(list_files): -# print(compute_progress(i, len(list_files))) -# if ".mp3" in f: -# audio = AudioFileClip(path + f) -# audio.write_audiofile(path + f[:-4] + '.mp3', -# verbose=False, -# logger=None, -# fps=FPS, # 16000 sr -# ffmpeg_params=["-ac", "1"] # make it mono -# ) -# print('\tDone.') - - - -def load_audio(path, sr=22050, mono=True, offset=0.0, duration=None, - dtype=np.float32, res_type='kaiser_best', - backends=[audioread.ffdec.FFmpegAudioFile]): - """Load audio. Copied from librosa.core.load() except that ffmpeg backend is - always used in this function. Code from piano_transcription_inference""" - - y = [] - with audioread.audio_open(os.path.realpath(path), backends=backends) as input_file: - sr_native = input_file.samplerate - n_channels = input_file.channels - - s_start = int(np.round(sr_native * offset)) * n_channels - - if duration is None: - s_end = np.inf - else: - s_end = s_start + (int(np.round(sr_native * duration)) - * n_channels) - - n = 0 - - for frame in input_file: - frame = librosa.core.audio.util.buf_to_float(frame, dtype=dtype) - n_prev = n - n = n + len(frame) - - if n < s_start: - # offset is after the current frame - # keep reading - continue - - if s_end < n_prev: - # we're off the end. stop reading - break - - if s_end < n: - # the end is in this frame. crop. - frame = frame[:s_end - n_prev] - - if n_prev <= s_start <= n: - # beginning is in this frame - frame = frame[(s_start - n_prev):] - - # tack on the current frame - y.append(frame) - - if y: - y = np.concatenate(y) - - if n_channels > 1: - y = y.reshape((-1, n_channels)).T - if mono: - y = librosa.core.audio.to_mono(y) - - if sr is not None: - y = librosa.core.audio.resample(y, sr_native, sr, res_type=res_type) - - else: - sr = sr_native - - # Final cleanup for dtype and contiguity - y = np.ascontiguousarray(y, dtype=dtype) - - return (y, sr) - -def compute_progress(iter, total): - return f"{int((iter+ 1) / total * 100)}%" - -def compute_progress_and_eta(times, iter, total, n_av=3000): - av_time = np.mean(times[-n_av:]) - progress = int(((iter + 1) / total) * 100) - eta_h = int(av_time * (total - iter) // 3600) - eta_m = int((av_time * (total - iter) - (eta_h * 3600)) // 60) - eta_s = int((av_time * (total - iter) - (eta_h * 3600) - eta_m * 60)) - eta = f"Progress: {progress}%, ETA: {eta_h}H{eta_m}M{eta_s}S." - return eta - -def crop_mp3_from_meta_data_constraints(path, clean_constraints=True): - print(f"Cropping mp3 using constraints from meta_data.csv, in {path}") - meta_data = pd.read_csv(path + 'meta_data.csv') - constraint_start = meta_data['constraint_start'].copy() - length = meta_data['length'].copy() - constraint_end = meta_data['constraint_end'].copy() - filenames = meta_data['filename'].copy() - times = [5] - for i, c_start, c_end, fn, l in zip(range(len(constraint_start)), constraint_start, constraint_end, filenames, length): - if c_start != 0 or c_end != l: - i_time = time.time() - print(compute_progress_and_eta(times, i, len(constraint_start), n_av=100)) - song = AudioSegment.from_mp3(path + fn) - extract = song[c_start*1000:c_end*1000] - extract.export(path + fn, format="mp3") - if clean_constraints: - constraint_start[i] = 0 - constraint_end[i] = length[i] - meta_data['constraint_start'] = constraint_start - meta_data['constraint_end'] = constraint_end - meta_data.to_csv(path + 'meta_data.csv', index=False) - times.append(time.time() - i_time) - print('\tDone.') - -def get_all_subfiles_with_extension(path, max_depth=3, extension='.*', current_depth=0): - folders = [f for f in os.listdir(path) if os.path.isdir(path + f)] - # get all files in current folder with a given extension - if isinstance(extension, list): - assert all([isinstance(e, str) for e in extension]), 'extension can be a str or a list' - files = [path + f for f in os.listdir(path) if os.path.isfile(path + f) and any([ext == f[-len(ext):] for ext in extension])] - elif isinstance(extension, str): - assert extension[0] == '.', 'extension should be an extension or a list of extensions' - if extension == '.*': - files = [path + f for f in os.listdir(path) if os.path.isfile(path + f)] - else: - files = [path + f for f in os.listdir(path) if os.path.isfile(path + f) and f[-len(extension):]==extension] - else: - print('Error: extension should be either a str or a list') - raise ValueError - - if current_depth < max_depth: - for fold in folders: - files += get_all_subfiles_with_extension(path + fold + '/', max_depth=max_depth, extension=extension, current_depth=current_depth+1) - return files - -def get_out_path(in_path, in_word, out_word, out_extension, exclude_paths=()): - splitted_in_path = in_path.split('/') - for i in range(len(splitted_in_path)): - if splitted_in_path[i] == in_word: - splitted_in_path[i] = out_word - playlist_index = i + 1 - file_index = len(splitted_in_path) - 1 - if splitted_in_path[playlist_index] in exclude_paths: - to_exclude = True - return None, to_exclude, None - else: - to_exclude = False - if out_word != 'midi': - splitted_in_path[playlist_index] = '_'.join(splitted_in_path[playlist_index].split('_')[:-len(in_word.split('_'))]) + '_' + out_word - else: - splitted_in_path[playlist_index] += '_' + out_word - if 'fake' not in splitted_in_path: - os.makedirs('/'.join(splitted_in_path[:playlist_index + 1]), exist_ok=True) - if out_word != 'midi': - new_filename = '_'.join(splitted_in_path[file_index].split('_')[:-len(in_word.split('_'))]) + '_' + out_word + out_extension - else: - new_filename = '.'.join(splitted_in_path[file_index].split('.')[:-len(in_word.split('_'))]) + '_' + out_word + out_extension - splitted_in_path[file_index] = new_filename - splitted_in_path = splitted_in_path[:playlist_index + 1] + [splitted_in_path[file_index]] - out_path = '/'.join(splitted_in_path) - return out_path, to_exclude, splitted_in_path[playlist_index] - -def set_all_seeds(seed): - import random - import numpy as np - import torch - torch.manual_seed(seed) - random.seed(seed) - np.random.seed(seed) - -def get_paths_in_and_out(in_path, in_word, in_extension, out_word, out_extension, max_depth, exclude_paths=()): - # find all files with the in_extension in subfolders of in_path up to max_depth. - # for each, replace the in_word keyword in folders with the out_word, and append out_word to filenames. - all_in_paths = get_all_subfiles_with_extension(in_path, max_depth=max_depth, extension=in_extension) - indexes_not_transcribed = [] - all_out_paths = [] - all_playlists = [] - for i_path, in_path in enumerate(all_in_paths): - out_path, to_exclude, playlist = get_out_path(in_path=in_path, in_word=in_word, out_word=out_word, out_extension=out_extension, exclude_paths=exclude_paths) - if not to_exclude: - indexes_not_transcribed.append(i_path) - all_out_paths.append(out_path) - all_playlists.append(playlist) - all_in_paths = [in_path for i, in_path in enumerate(all_in_paths) if i in indexes_not_transcribed] - assert len(all_out_paths) == len(all_in_paths) - return all_in_paths, all_out_paths, all_playlists - -def get_path_and_filter_existing(in_path, in_word, in_extension, out_word, out_extension, max_depth, exclude_paths=()): - # find all files with the in_extension in subfolders of in_path up to max_depth. - # for each, replace the in_word keyword in folders with the out_word, and append out_word to filenames. - all_in_paths = get_all_subfiles_with_extension(in_path, max_depth=max_depth, extension=in_extension) - indexes_to_process = [] - all_out_paths = [] - all_playlists = [] - for i_path, in_path in enumerate(all_in_paths): - out_path, to_exclude, playlist = get_out_path(in_path=in_path, in_word=in_word, out_word=out_word, out_extension=out_extension, exclude_paths=exclude_paths) - if not to_exclude: - if not os.path.exists(out_path): - indexes_to_process.append(i_path) - all_out_paths.append(out_path) - all_playlists.append(playlist) - all_in_paths = list(np.array(all_in_paths)[indexes_to_process])#[in_path for i, in_path in enumerate(all_in_paths) if i in indexes_to_process] - assert len(all_out_paths) == len(all_in_paths) - return all_in_paths, all_out_paths, all_playlists - -def md5sum(filename, blocksize=65536): - hash = hashlib.md5() - with open(filename, "rb") as f: - for block in iter(lambda: f.read(blocksize), b""): - hash.update(block) - return hash.hexdigest() - - -emoji_pattern = re.compile("[" - u"\U0001F600-\U0001F64F" # emoticons - u"\U0001F300-\U0001F5FF" # symbols & pictographs - u"\U0001F680-\U0001F6FF" # transport & map symbols - u"\U0001F1E0-\U0001F1FF" # flags (iOS) - "]+", flags=re.UNICODE) -def slugify(value, allow_unicode=False): - """ - Taken from https://github.com/django/django/blob/master/django/utils/text.py - Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated - dashes to single dashes. Remove characters that aren't alphanumerics, - underscores, or hyphens. Convert to lowercase. Also strip leading and - trailing whitespace, dashes, and underscores. - """ - value = str(value).lower() - if allow_unicode: - value = unicodedata.normalize('NFKC', value) - else: - value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii') - value = re.sub(r'[^\w\s-]', '', value.lower()) - value = emoji_pattern.sub(r'', value) - value = re.sub(r'[-\s]+', '_', value).strip('-_') - # if value == '': - # for i in range(10): - # value += str(np.random.choice(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])) - return value - -if __name__ == '__main__': - path = "/home/cedric/Documents/pianocktail/data/midi/street_piano/" - # for folder in ['my_sheet_music_transcriptions']:#os.listdir(path): - # print('\n\n\t\t', folder) - # convert_mp4_to_mp3(path + folder + '/') - - clean_removed_csv_from_folder(path) - # folder = 'street_piano/' - # for folder in ['street_piano/']: - # clean_removed_mp3_from_csv(path + folder) diff --git a/spaces/cdleong/phonemize-audio/README.md b/spaces/cdleong/phonemize-audio/README.md deleted file mode 100644 index 3f238f1d897820425c0bf9e84f64e8130dc50cde..0000000000000000000000000000000000000000 --- a/spaces/cdleong/phonemize-audio/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Phonemize Audio -emoji: 🗣️ -colorFrom: blue -colorTo: gray -sdk: streamlit -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/demo/OpenVINO/cpp/yolox_openvino.cpp b/spaces/chendl/compositional_test/multimodal/YOLOX/demo/OpenVINO/cpp/yolox_openvino.cpp deleted file mode 100644 index f42344141cd760737e9d2b617d776480d4379a7d..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/multimodal/YOLOX/demo/OpenVINO/cpp/yolox_openvino.cpp +++ /dev/null @@ -1,529 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include -#include -#include -#include - -using namespace InferenceEngine; - -/** - * @brief Define names based depends on Unicode path support - */ -#define tcout std::cout -#define file_name_t std::string -#define imread_t cv::imread -#define NMS_THRESH 0.45 -#define BBOX_CONF_THRESH 0.3 - -static const int INPUT_W = 416; -static const int INPUT_H = 416; -static const int NUM_CLASSES = 80; // COCO has 80 classes. Modify this value on your own dataset. - -cv::Mat static_resize(cv::Mat& img) { - float r = std::min(INPUT_W / (img.cols*1.0), INPUT_H / (img.rows*1.0)); - // r = std::min(r, 1.0f); - int unpad_w = r * img.cols; - int unpad_h = r * img.rows; - cv::Mat re(unpad_h, unpad_w, CV_8UC3); - cv::resize(img, re, re.size()); - //cv::Mat out(INPUT_W, INPUT_H, CV_8UC3, cv::Scalar(114, 114, 114)); - cv::Mat out(INPUT_H, INPUT_W, CV_8UC3, cv::Scalar(114, 114, 114)); - re.copyTo(out(cv::Rect(0, 0, re.cols, re.rows))); - return out; -} - -void blobFromImage(cv::Mat& img, Blob::Ptr& blob){ - int channels = 3; - int img_h = img.rows; - int img_w = img.cols; - InferenceEngine::MemoryBlob::Ptr mblob = InferenceEngine::as(blob); - if (!mblob) - { - THROW_IE_EXCEPTION << "We expect blob to be inherited from MemoryBlob in matU8ToBlob, " - << "but by fact we were not able to cast inputBlob to MemoryBlob"; - } - // locked memory holder should be alive all time while access to its buffer happens - auto mblobHolder = mblob->wmap(); - - float *blob_data = mblobHolder.as(); - - for (size_t c = 0; c < channels; c++) - { - for (size_t h = 0; h < img_h; h++) - { - for (size_t w = 0; w < img_w; w++) - { - blob_data[c * img_w * img_h + h * img_w + w] = - (float)img.at(h, w)[c]; - } - } - } -} - - -struct Object -{ - cv::Rect_ rect; - int label; - float prob; -}; - -struct GridAndStride -{ - int grid0; - int grid1; - int stride; -}; - -static void generate_grids_and_stride(const int target_w, const int target_h, std::vector& strides, std::vector& grid_strides) -{ - for (auto stride : strides) - { - int num_grid_w = target_w / stride; - int num_grid_h = target_h / stride; - for (int g1 = 0; g1 < num_grid_h; g1++) - { - for (int g0 = 0; g0 < num_grid_w; g0++) - { - grid_strides.push_back((GridAndStride){g0, g1, stride}); - } - } - } -} - - -static void generate_yolox_proposals(std::vector grid_strides, const float* feat_ptr, float prob_threshold, std::vector& objects) -{ - - const int num_anchors = grid_strides.size(); - - for (int anchor_idx = 0; anchor_idx < num_anchors; anchor_idx++) - { - const int grid0 = grid_strides[anchor_idx].grid0; - const int grid1 = grid_strides[anchor_idx].grid1; - const int stride = grid_strides[anchor_idx].stride; - - const int basic_pos = anchor_idx * (NUM_CLASSES + 5); - - // yolox/models/yolo_head.py decode logic - // outputs[..., :2] = (outputs[..., :2] + grids) * strides - // outputs[..., 2:4] = torch.exp(outputs[..., 2:4]) * strides - float x_center = (feat_ptr[basic_pos + 0] + grid0) * stride; - float y_center = (feat_ptr[basic_pos + 1] + grid1) * stride; - float w = exp(feat_ptr[basic_pos + 2]) * stride; - float h = exp(feat_ptr[basic_pos + 3]) * stride; - float x0 = x_center - w * 0.5f; - float y0 = y_center - h * 0.5f; - - float box_objectness = feat_ptr[basic_pos + 4]; - for (int class_idx = 0; class_idx < NUM_CLASSES; class_idx++) - { - float box_cls_score = feat_ptr[basic_pos + 5 + class_idx]; - float box_prob = box_objectness * box_cls_score; - if (box_prob > prob_threshold) - { - Object obj; - obj.rect.x = x0; - obj.rect.y = y0; - obj.rect.width = w; - obj.rect.height = h; - obj.label = class_idx; - obj.prob = box_prob; - - objects.push_back(obj); - } - - } // class loop - - } // point anchor loop -} - -static inline float intersection_area(const Object& a, const Object& b) -{ - cv::Rect_ inter = a.rect & b.rect; - return inter.area(); -} - -static void qsort_descent_inplace(std::vector& faceobjects, int left, int right) -{ - int i = left; - int j = right; - float p = faceobjects[(left + right) / 2].prob; - - while (i <= j) - { - while (faceobjects[i].prob > p) - i++; - - while (faceobjects[j].prob < p) - j--; - - if (i <= j) - { - // swap - std::swap(faceobjects[i], faceobjects[j]); - - i++; - j--; - } - } - - #pragma omp parallel sections - { - #pragma omp section - { - if (left < j) qsort_descent_inplace(faceobjects, left, j); - } - #pragma omp section - { - if (i < right) qsort_descent_inplace(faceobjects, i, right); - } - } -} - - -static void qsort_descent_inplace(std::vector& objects) -{ - if (objects.empty()) - return; - - qsort_descent_inplace(objects, 0, objects.size() - 1); -} - -static void nms_sorted_bboxes(const std::vector& faceobjects, std::vector& picked, float nms_threshold) -{ - picked.clear(); - - const int n = faceobjects.size(); - - std::vector areas(n); - for (int i = 0; i < n; i++) - { - areas[i] = faceobjects[i].rect.area(); - } - - for (int i = 0; i < n; i++) - { - const Object& a = faceobjects[i]; - - int keep = 1; - for (int j = 0; j < (int)picked.size(); j++) - { - const Object& b = faceobjects[picked[j]]; - - // intersection over union - float inter_area = intersection_area(a, b); - float union_area = areas[i] + areas[picked[j]] - inter_area; - // float IoU = inter_area / union_area - if (inter_area / union_area > nms_threshold) - keep = 0; - } - - if (keep) - picked.push_back(i); - } -} - - -static void decode_outputs(const float* prob, std::vector& objects, float scale, const int img_w, const int img_h) { - std::vector proposals; - std::vector strides = {8, 16, 32}; - std::vector grid_strides; - - generate_grids_and_stride(INPUT_W, INPUT_H, strides, grid_strides); - generate_yolox_proposals(grid_strides, prob, BBOX_CONF_THRESH, proposals); - qsort_descent_inplace(proposals); - - std::vector picked; - nms_sorted_bboxes(proposals, picked, NMS_THRESH); - int count = picked.size(); - objects.resize(count); - - for (int i = 0; i < count; i++) - { - objects[i] = proposals[picked[i]]; - - // adjust offset to original unpadded - float x0 = (objects[i].rect.x) / scale; - float y0 = (objects[i].rect.y) / scale; - float x1 = (objects[i].rect.x + objects[i].rect.width) / scale; - float y1 = (objects[i].rect.y + objects[i].rect.height) / scale; - - // clip - x0 = std::max(std::min(x0, (float)(img_w - 1)), 0.f); - y0 = std::max(std::min(y0, (float)(img_h - 1)), 0.f); - x1 = std::max(std::min(x1, (float)(img_w - 1)), 0.f); - y1 = std::max(std::min(y1, (float)(img_h - 1)), 0.f); - - objects[i].rect.x = x0; - objects[i].rect.y = y0; - objects[i].rect.width = x1 - x0; - objects[i].rect.height = y1 - y0; - } -} - -const float color_list[80][3] = -{ - {0.000, 0.447, 0.741}, - {0.850, 0.325, 0.098}, - {0.929, 0.694, 0.125}, - {0.494, 0.184, 0.556}, - {0.466, 0.674, 0.188}, - {0.301, 0.745, 0.933}, - {0.635, 0.078, 0.184}, - {0.300, 0.300, 0.300}, - {0.600, 0.600, 0.600}, - {1.000, 0.000, 0.000}, - {1.000, 0.500, 0.000}, - {0.749, 0.749, 0.000}, - {0.000, 1.000, 0.000}, - {0.000, 0.000, 1.000}, - {0.667, 0.000, 1.000}, - {0.333, 0.333, 0.000}, - {0.333, 0.667, 0.000}, - {0.333, 1.000, 0.000}, - {0.667, 0.333, 0.000}, - {0.667, 0.667, 0.000}, - {0.667, 1.000, 0.000}, - {1.000, 0.333, 0.000}, - {1.000, 0.667, 0.000}, - {1.000, 1.000, 0.000}, - {0.000, 0.333, 0.500}, - {0.000, 0.667, 0.500}, - {0.000, 1.000, 0.500}, - {0.333, 0.000, 0.500}, - {0.333, 0.333, 0.500}, - {0.333, 0.667, 0.500}, - {0.333, 1.000, 0.500}, - {0.667, 0.000, 0.500}, - {0.667, 0.333, 0.500}, - {0.667, 0.667, 0.500}, - {0.667, 1.000, 0.500}, - {1.000, 0.000, 0.500}, - {1.000, 0.333, 0.500}, - {1.000, 0.667, 0.500}, - {1.000, 1.000, 0.500}, - {0.000, 0.333, 1.000}, - {0.000, 0.667, 1.000}, - {0.000, 1.000, 1.000}, - {0.333, 0.000, 1.000}, - {0.333, 0.333, 1.000}, - {0.333, 0.667, 1.000}, - {0.333, 1.000, 1.000}, - {0.667, 0.000, 1.000}, - {0.667, 0.333, 1.000}, - {0.667, 0.667, 1.000}, - {0.667, 1.000, 1.000}, - {1.000, 0.000, 1.000}, - {1.000, 0.333, 1.000}, - {1.000, 0.667, 1.000}, - {0.333, 0.000, 0.000}, - {0.500, 0.000, 0.000}, - {0.667, 0.000, 0.000}, - {0.833, 0.000, 0.000}, - {1.000, 0.000, 0.000}, - {0.000, 0.167, 0.000}, - {0.000, 0.333, 0.000}, - {0.000, 0.500, 0.000}, - {0.000, 0.667, 0.000}, - {0.000, 0.833, 0.000}, - {0.000, 1.000, 0.000}, - {0.000, 0.000, 0.167}, - {0.000, 0.000, 0.333}, - {0.000, 0.000, 0.500}, - {0.000, 0.000, 0.667}, - {0.000, 0.000, 0.833}, - {0.000, 0.000, 1.000}, - {0.000, 0.000, 0.000}, - {0.143, 0.143, 0.143}, - {0.286, 0.286, 0.286}, - {0.429, 0.429, 0.429}, - {0.571, 0.571, 0.571}, - {0.714, 0.714, 0.714}, - {0.857, 0.857, 0.857}, - {0.000, 0.447, 0.741}, - {0.314, 0.717, 0.741}, - {0.50, 0.5, 0} -}; - -static void draw_objects(const cv::Mat& bgr, const std::vector& objects) -{ - static const char* class_names[] = { - "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", - "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", - "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", - "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", - "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", - "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", - "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", - "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", - "hair drier", "toothbrush" - }; - - cv::Mat image = bgr.clone(); - - for (size_t i = 0; i < objects.size(); i++) - { - const Object& obj = objects[i]; - - fprintf(stderr, "%d = %.5f at %.2f %.2f %.2f x %.2f\n", obj.label, obj.prob, - obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height); - - cv::Scalar color = cv::Scalar(color_list[obj.label][0], color_list[obj.label][1], color_list[obj.label][2]); - float c_mean = cv::mean(color)[0]; - cv::Scalar txt_color; - if (c_mean > 0.5){ - txt_color = cv::Scalar(0, 0, 0); - }else{ - txt_color = cv::Scalar(255, 255, 255); - } - - cv::rectangle(image, obj.rect, color * 255, 2); - - char text[256]; - sprintf(text, "%s %.1f%%", class_names[obj.label], obj.prob * 100); - - int baseLine = 0; - cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.4, 1, &baseLine); - - cv::Scalar txt_bk_color = color * 0.7 * 255; - - int x = obj.rect.x; - int y = obj.rect.y + 1; - //int y = obj.rect.y - label_size.height - baseLine; - if (y > image.rows) - y = image.rows; - //if (x + label_size.width > image.cols) - //x = image.cols - label_size.width; - - cv::rectangle(image, cv::Rect(cv::Point(x, y), cv::Size(label_size.width, label_size.height + baseLine)), - txt_bk_color, -1); - - cv::putText(image, text, cv::Point(x, y + label_size.height), - cv::FONT_HERSHEY_SIMPLEX, 0.4, txt_color, 1); - } - - cv::imwrite("_demo.jpg" , image); - fprintf(stderr, "save vis file\n"); - /* cv::imshow("image", image); */ - /* cv::waitKey(0); */ -} - - -int main(int argc, char* argv[]) { - try { - // ------------------------------ Parsing and validation of input arguments - // --------------------------------- - if (argc != 4) { - tcout << "Usage : " << argv[0] << " " << std::endl; - return EXIT_FAILURE; - } - - const file_name_t input_model {argv[1]}; - const file_name_t input_image_path {argv[2]}; - const std::string device_name {argv[3]}; - // ----------------------------------------------------------------------------------------------------- - - // --------------------------- Step 1. Initialize inference engine core - // ------------------------------------- - Core ie; - // ----------------------------------------------------------------------------------------------------- - - // Step 2. Read a model in OpenVINO Intermediate Representation (.xml and - // .bin files) or ONNX (.onnx file) format - CNNNetwork network = ie.ReadNetwork(input_model); - if (network.getOutputsInfo().size() != 1) - throw std::logic_error("Sample supports topologies with 1 output only"); - if (network.getInputsInfo().size() != 1) - throw std::logic_error("Sample supports topologies with 1 input only"); - // ----------------------------------------------------------------------------------------------------- - - // --------------------------- Step 3. Configure input & output - // --------------------------------------------- - // --------------------------- Prepare input blobs - // ----------------------------------------------------- - InputInfo::Ptr input_info = network.getInputsInfo().begin()->second; - std::string input_name = network.getInputsInfo().begin()->first; - - /* Mark input as resizable by setting of a resize algorithm. - * In this case we will be able to set an input blob of any shape to an - * infer request. Resize and layout conversions are executed automatically - * during inference */ - //input_info->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR); - //input_info->setLayout(Layout::NHWC); - //input_info->setPrecision(Precision::FP32); - - // --------------------------- Prepare output blobs - // ---------------------------------------------------- - if (network.getOutputsInfo().empty()) { - std::cerr << "Network outputs info is empty" << std::endl; - return EXIT_FAILURE; - } - DataPtr output_info = network.getOutputsInfo().begin()->second; - std::string output_name = network.getOutputsInfo().begin()->first; - - output_info->setPrecision(Precision::FP32); - // ----------------------------------------------------------------------------------------------------- - - // --------------------------- Step 4. Loading a model to the device - // ------------------------------------------ - ExecutableNetwork executable_network = ie.LoadNetwork(network, device_name); - // ----------------------------------------------------------------------------------------------------- - - // --------------------------- Step 5. Create an infer request - // ------------------------------------------------- - InferRequest infer_request = executable_network.CreateInferRequest(); - // ----------------------------------------------------------------------------------------------------- - - // --------------------------- Step 6. Prepare input - // -------------------------------------------------------- - /* Read input image to a blob and set it to an infer request without resize - * and layout conversions. */ - cv::Mat image = imread_t(input_image_path); - cv::Mat pr_img = static_resize(image); - Blob::Ptr imgBlob = infer_request.GetBlob(input_name); // just wrap Mat data by Blob::Ptr - blobFromImage(pr_img, imgBlob); - - // infer_request.SetBlob(input_name, imgBlob); // infer_request accepts input blob of any size - // ----------------------------------------------------------------------------------------------------- - - // --------------------------- Step 7. Do inference - // -------------------------------------------------------- - /* Running the request synchronously */ - infer_request.Infer(); - // ----------------------------------------------------------------------------------------------------- - - // --------------------------- Step 8. Process output - // ------------------------------------------------------ - const Blob::Ptr output_blob = infer_request.GetBlob(output_name); - MemoryBlob::CPtr moutput = as(output_blob); - if (!moutput) { - throw std::logic_error("We expect output to be inherited from MemoryBlob, " - "but by fact we were not able to cast output to MemoryBlob"); - } - // locked memory holder should be alive all time while access to its buffer - // happens - auto moutputHolder = moutput->rmap(); - const float* net_pred = moutputHolder.as::value_type*>(); - - int img_w = image.cols; - int img_h = image.rows; - float scale = std::min(INPUT_W / (image.cols*1.0), INPUT_H / (image.rows*1.0)); - std::vector objects; - - decode_outputs(net_pred, objects, scale, img_w, img_h); - draw_objects(image, objects); - - // ----------------------------------------------------------------------------------------------------- - } catch (const std::exception& ex) { - std::cerr << ex.what() << std::endl; - return EXIT_FAILURE; - } - return EXIT_SUCCESS; -} diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/docs/demo/onnx_readme.md b/spaces/chendl/compositional_test/multimodal/YOLOX/docs/demo/onnx_readme.md deleted file mode 100644 index bd85ab19678b58cf2a33ac1bdd3cecb449f951a2..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/multimodal/YOLOX/docs/demo/onnx_readme.md +++ /dev/null @@ -1 +0,0 @@ -../../demo/ONNXRuntime/README.md \ No newline at end of file diff --git a/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/README.md b/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/README.md deleted file mode 100644 index 5a3c2dbd3506be28401a846501c70efe8be9d3f5..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/README.md +++ /dev/null @@ -1,334 +0,0 @@ - - -# Sequence-to-Sequence Training and Evaluation - -This directory contains examples for finetuning and evaluating transformers on summarization and translation tasks. -For deprecated `bertabs` instructions, see [`bertabs/README.md`](https://github.com/huggingface/transformers/blob/main/examples/research_projects/bertabs/README.md). - -### Supported Architectures - -- `BartForConditionalGeneration` -- `MarianMTModel` -- `PegasusForConditionalGeneration` -- `MBartForConditionalGeneration` -- `FSMTForConditionalGeneration` -- `T5ForConditionalGeneration` - -### Download the Datasets - -#### XSUM - -```bash -cd examples/legacy/seq2seq -wget https://cdn-datasets.huggingface.co/summarization/xsum.tar.gz -tar -xzvf xsum.tar.gz -export XSUM_DIR=${PWD}/xsum -``` -this should make a directory called `xsum/` with files like `test.source`. -To use your own data, copy that files format. Each article to be summarized is on its own line. - -#### CNN/DailyMail - -```bash -cd examples/legacy/seq2seq -wget https://cdn-datasets.huggingface.co/summarization/cnn_dm_v2.tgz -tar -xzvf cnn_dm_v2.tgz # empty lines removed -mv cnn_cln cnn_dm -export CNN_DIR=${PWD}/cnn_dm -``` -this should make a directory called `cnn_dm/` with 6 files. - -#### WMT16 English-Romanian Translation Data - -download with this command: -```bash -wget https://cdn-datasets.huggingface.co/translation/wmt_en_ro.tar.gz -tar -xzvf wmt_en_ro.tar.gz -export ENRO_DIR=${PWD}/wmt_en_ro -``` -this should make a directory called `wmt_en_ro/` with 6 files. - -#### WMT English-German - -```bash -wget https://cdn-datasets.huggingface.co/translation/wmt_en_de.tgz -tar -xzvf wmt_en_de.tgz -export DATA_DIR=${PWD}/wmt_en_de -``` - -#### FSMT datasets (wmt) - -Refer to the scripts starting with `eval_` under: -https://github.com/huggingface/transformers/tree/main/scripts/fsmt - -#### Pegasus (multiple datasets) - -Multiple eval datasets are available for download from: -https://github.com/stas00/porting/tree/master/datasets/pegasus - - -#### Your Data - -If you are using your own data, it must be formatted as one directory with 6 files: -``` -train.source -train.target -val.source -val.target -test.source -test.target -``` -The `.source` files are the input, the `.target` files are the desired output. - -### Potential issues - -- native AMP (`--fp16` and no apex) may lead to a huge memory leak and require 10x gpu memory. This has been fixed in pytorch-nightly and the minimal official version to have this fix will be pytorch-1.7.1. Until then if you have to use mixed precision please use AMP only with pytorch-nightly or NVIDIA's apex. Reference: https://github.com/huggingface/transformers/issues/8403 - - -### Tips and Tricks - -General Tips: -- since you need to run from `examples/legacy/seq2seq`, and likely need to modify code, the easiest workflow is fork transformers, clone your fork, and run `pip install -e .` before you get started. -- try `--freeze_encoder` or `--freeze_embeds` for faster training/larger batch size. (3hr per epoch with bs=8, see the "xsum_shared_task" command below) -- `fp16_opt_level=O1` (the default works best). -- In addition to the pytorch-lightning .ckpt checkpoint, a transformers checkpoint will be saved. -Load it with `BartForConditionalGeneration.from_pretrained(f'{output_dir}/best_tfmr)`. -- At the moment, `--do_predict` does not work in a multi-gpu setting. You need to use `evaluate_checkpoint` or the `run_eval.py` code. -- This warning can be safely ignored: - > "Some weights of BartForConditionalGeneration were not initialized from the model checkpoint at facebook/bart-large-xsum and are newly initialized: ['final_logits_bias']" -- Both finetuning and eval are 30% faster with `--fp16`. For that you need to [install apex](https://github.com/NVIDIA/apex#quick-start). -- Read scripts before you run them! - -Summarization Tips: -- (summ) 1 epoch at batch size 1 for bart-large takes 24 hours and requires 13GB GPU RAM with fp16 on an NVIDIA-V100. -- If you want to run experiments on improving the summarization finetuning process, try the XSUM Shared Task (below). It's faster to train than CNNDM because the summaries are shorter. -- For CNN/DailyMail, the default `val_max_target_length` and `test_max_target_length` will truncate the ground truth labels, resulting in slightly higher rouge scores. To get accurate rouge scores, you should rerun calculate_rouge on the `{output_dir}/test_generations.txt` file saved by `trainer.test()` -- `--max_target_length=60 --val_max_target_length=60 --test_max_target_length=100 ` is a reasonable setting for XSUM. -- `wandb` can be used by specifying `--logger_name wandb`. It is useful for reproducibility. Specify the environment variable `WANDB_PROJECT='hf_xsum'` to do the XSUM shared task. -- If you are finetuning on your own dataset, start from `distilbart-cnn-12-6` if you want long summaries and `distilbart-xsum-12-6` if you want short summaries. -(It rarely makes sense to start from `bart-large` unless you are a researching finetuning methods). - -**Update 2018-07-18** -Datasets: `LegacySeq2SeqDataset` will be used for all tokenizers without a `prepare_seq2seq_batch` method. Otherwise, `Seq2SeqDataset` will be used. -Future work/help wanted: A new dataset to support multilingual tasks. - - -### Fine-tuning using Seq2SeqTrainer -To use `Seq2SeqTrainer` for fine-tuning you should use the `finetune_trainer.py` script. It subclasses `Trainer` to extend it for seq2seq training. Except the `Trainer`-related `TrainingArguments`, it shares the same argument names as that of `finetune.py` file. One notable difference is that calculating generative metrics (BLEU, ROUGE) is optional and is controlled using the `--predict_with_generate` argument. - -With PyTorch 1.6+ it'll automatically use `native AMP` when `--fp16` is set. - -To see all the possible command line options, run: - -```bash -python finetune_trainer.py --help -``` - -For multi-gpu training use `torch.distributed.launch`, e.g. with 2 gpus: -```bash -python -m torch.distributed.launch --nproc_per_node=2 finetune_trainer.py ... -``` - -**At the moment, `Seq2SeqTrainer` does not support *with teacher* distillation.** - -All `Seq2SeqTrainer`-based fine-tuning scripts are included in the `builtin_trainer` directory. - -#### TPU Training -`Seq2SeqTrainer` supports TPU training with few caveats -1. As `generate` method does not work on TPU at the moment, `predict_with_generate` cannot be used. You should use `--prediction_loss_only` to only calculate loss, and do not set `--do_predict` and `--predict_with_generate`. -2. All sequences should be padded to be of equal length to avoid extremely slow training. (`finetune_trainer.py` does this automatically when running on TPU.) - -We provide a very simple launcher script named `xla_spawn.py` that lets you run our example scripts on multiple TPU cores without any boilerplate. Just pass a `--num_cores` flag to this script, then your regular training script with its arguments (this is similar to the `torch.distributed.launch` helper for `torch.distributed`). - -`builtin_trainer/finetune_tpu.sh` script provides minimal arguments needed for TPU training. - -The following command fine-tunes `sshleifer/student_marian_en_ro_6_3` on TPU V3-8 and should complete one epoch in ~5-6 mins. - -```bash -./builtin_trainer/train_distil_marian_enro_tpu.sh -``` - -## Evaluation Commands - -To create summaries for each article in dataset, we use `run_eval.py`, here are a few commands that run eval for different tasks and models. -If 'translation' is in your task name, the computed metric will be BLEU. Otherwise, ROUGE will be used. - -For t5, you need to specify --task translation_{src}_to_{tgt} as follows: -```bash -export DATA_DIR=wmt_en_ro -./run_eval.py t5-base \ - $DATA_DIR/val.source t5_val_generations.txt \ - --reference_path $DATA_DIR/val.target \ - --score_path enro_bleu.json \ - --task translation_en_to_ro \ - --n_obs 100 \ - --device cuda \ - --fp16 \ - --bs 32 -``` - -This command works for MBART, although the BLEU score is suspiciously low. -```bash -export DATA_DIR=wmt_en_ro -./run_eval.py facebook/mbart-large-en-ro $DATA_DIR/val.source mbart_val_generations.txt \ - --reference_path $DATA_DIR/val.target \ - --score_path enro_bleu.json \ - --task translation \ - --n_obs 100 \ - --device cuda \ - --fp16 \ - --bs 32 -``` - -Summarization (xsum will be very similar): -```bash -export DATA_DIR=cnn_dm -./run_eval.py sshleifer/distilbart-cnn-12-6 $DATA_DIR/val.source dbart_val_generations.txt \ - --reference_path $DATA_DIR/val.target \ - --score_path cnn_rouge.json \ - --task summarization \ - --n_obs 100 \ - -th 56 \ - --fp16 \ - --bs 32 -``` - -### Multi-GPU Evaluation -here is a command to run xsum evaluation on 8 GPUS. It is more than linearly faster than run_eval.py in some cases -because it uses SortishSampler to minimize padding. You can also use it on 1 GPU. `data_dir` must have -`{type_path}.source` and `{type_path}.target`. Run `./run_distributed_eval.py --help` for all clargs. - -```bash -python -m torch.distributed.launch --nproc_per_node=8 run_distributed_eval.py \ - --model_name sshleifer/distilbart-large-xsum-12-3 \ - --save_dir xsum_generations \ - --data_dir xsum \ - --fp16 # you can pass generate kwargs like num_beams here, just like run_eval.py -``` - -Contributions that implement this command for other distributed hardware setups are welcome! - -#### Single-GPU Eval: Tips and Tricks - -When using `run_eval.py`, the following features can be useful: - -* if you running the script multiple times and want to make it easier to track what arguments produced that output, use `--dump-args`. Along with the results it will also dump any custom params that were passed to the script. For example if you used: `--num_beams 8 --early_stopping true`, the output will be: - ``` - {'bleu': 26.887, 'n_obs': 10, 'runtime': 1, 'seconds_per_sample': 0.1, 'num_beams': 8, 'early_stopping': True} - ``` - - `--info` is an additional argument available for the same purpose of tracking the conditions of the experiment. It's useful to pass things that weren't in the argument list, e.g. a language pair `--info "lang:en-ru"`. But also if you pass `--info` without a value it will fallback to the current date/time string, e.g. `2020-09-13 18:44:43`. - - If using `--dump-args --info`, the output will be: - - ``` - {'bleu': 26.887, 'n_obs': 10, 'runtime': 1, 'seconds_per_sample': 0.1, 'num_beams': 8, 'early_stopping': True, 'info': '2020-09-13 18:44:43'} - ``` - - If using `--dump-args --info "pair:en-ru chkpt=best`, the output will be: - - ``` - {'bleu': 26.887, 'n_obs': 10, 'runtime': 1, 'seconds_per_sample': 0.1, 'num_beams': 8, 'early_stopping': True, 'info': 'pair=en-ru chkpt=best'} - ``` - - -* if you need to perform a parametric search in order to find the best ones that lead to the highest BLEU score, let `run_eval_search.py` to do the searching for you. - - The script accepts the exact same arguments as `run_eval.py`, plus an additional argument `--search`. The value of `--search` is parsed, reformatted and fed to ``run_eval.py`` as additional args. - - The format for the `--search` value is a simple string with hparams and colon separated values to try, e.g.: - ``` - --search "num_beams=5:10 length_penalty=0.8:1.0:1.2 early_stopping=true:false" - ``` - which will generate `12` `(2*3*2)` searches for a product of each hparam. For example the example that was just used will invoke `run_eval.py` repeatedly with: - - ``` - --num_beams 5 --length_penalty 0.8 --early_stopping true - --num_beams 5 --length_penalty 0.8 --early_stopping false - [...] - --num_beams 10 --length_penalty 1.2 --early_stopping false - ``` - - On completion, this function prints a markdown table of the results sorted by the best BLEU score and the winning arguments. - -``` -bleu | num_beams | length_penalty | early_stopping ------ | --------- | -------------- | -------------- -26.71 | 5 | 1.1 | 1 -26.66 | 5 | 0.9 | 1 -26.66 | 5 | 0.9 | 0 -26.41 | 5 | 1.1 | 0 -21.94 | 1 | 0.9 | 1 -21.94 | 1 | 0.9 | 0 -21.94 | 1 | 1.1 | 1 -21.94 | 1 | 1.1 | 0 - -Best score args: -stas/wmt19-en-ru data/en-ru/val.source data/en-ru/test_translations.txt --reference_path data/en-ru/val.target --score_path data/en-ru/test_bleu.json --bs 8 --task translation --num_beams 5 --length_penalty 1.1 --early_stopping True -``` - -If you pass `--info "some experiment-specific info"` it will get printed before the results table - this is useful for scripting and multiple runs, so one can tell the different sets of results from each other. - - -### Contributing -- follow the standard contributing guidelines and code of conduct. -- add tests to `test_seq2seq_examples.py` -- To run only the seq2seq tests, you must be in the root of the repository and run: -```bash -pytest examples/seq2seq/ -``` - -### Converting pytorch-lightning checkpoints -pytorch lightning ``-do_predict`` often fails, after you are done training, the best way to evaluate your model is to convert it. - -This should be done for you, with a file called `{save_dir}/best_tfmr`. - -If that file doesn't exist but you have a lightning `.ckpt` file, you can run -```bash -python convert_pl_checkpoint_to_hf.py PATH_TO_CKPT randomly_initialized_hf_model_path save_dir/best_tfmr -``` -Then either `run_eval` or `run_distributed_eval` with `save_dir/best_tfmr` (see previous sections) - - -# Experimental Features -These features are harder to use and not always useful. - -### Dynamic Batch Size for MT -`finetune.py` has a command line arg `--max_tokens_per_batch` that allows batches to be dynamically sized. -This feature can only be used: -- with fairseq installed -- on 1 GPU -- without sortish sampler -- after calling `./save_len_file.py $tok $data_dir` - -For example, -```bash -./save_len_file.py Helsinki-NLP/opus-mt-en-ro wmt_en_ro -./dynamic_bs_example.sh --max_tokens_per_batch=2000 --output_dir benchmark_dynamic_bs -``` -splits `wmt_en_ro/train` into 11,197 uneven lengthed batches and can finish 1 epoch in 8 minutes on a v100. - -For comparison, -```bash -./dynamic_bs_example.sh --sortish_sampler --train_batch_size 48 -``` -uses 12,723 batches of length 48 and takes slightly more time 9.5 minutes. - -The feature is still experimental, because: -+ we can make it much more robust if we have memory mapped/preprocessed datasets. -+ The speedup over sortish sampler is not that large at the moment. diff --git a/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/train_mbart_cc25_enro.sh b/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/train_mbart_cc25_enro.sh deleted file mode 100644 index 2b603eda7c35e6865eb65e05c611f263ce6b7a83..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/train_mbart_cc25_enro.sh +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -python finetune_trainer.py \ - --model_name_or_path=facebook/mbart-large-cc25 \ - --data_dir $ENRO_DIR \ - --output_dir mbart_cc25_enro --overwrite_output_dir \ - --learning_rate=3e-5 \ - --warmup_steps 500 \ - --fp16 \ - --label_smoothing 0.1 \ - --adam_eps 1e-06 \ - --src_lang en_XX --tgt_lang ro_RO \ - --freeze_embeds \ - --per_device_train_batch_size=4 --per_device_eval_batch_size=4 \ - --max_source_length 128 --max_target_length 128 --val_max_target_length 128 --test_max_target_length 128\ - --sortish_sampler \ - --num_train_epochs 6 \ - --save_steps 25000 --eval_steps 25000 --logging_steps 1000 \ - --do_train --do_eval --do_predict \ - --evaluation_strategy steps \ - --predict_with_generate --logging_first_step \ - --task translation \ - "$@" diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/README.md b/spaces/chendl/compositional_test/transformers/examples/research_projects/README.md deleted file mode 100644 index 32d7fee0453c506409679b1410faad41bcd15047..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/research_projects/README.md +++ /dev/null @@ -1,28 +0,0 @@ - - -# Research projects - -This folder contains various research projects using 🤗 Transformers. They are not maintained and require a specific -version of 🤗 Transformers that is indicated in the requirements file of each folder. Updating them to the most recent version of the library will require some work. - -To use any of them, just run the command -``` -pip install -r requirements.txt -``` -inside the folder of your choice. - -If you need help with any of those, contact the author(s), indicated at the top of the `README` of each folder. diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/adversarial/utils_hans.py b/spaces/chendl/compositional_test/transformers/examples/research_projects/adversarial/utils_hans.py deleted file mode 100644 index f051e60f84fefdb54ee991a1668d14528da28ac0..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/research_projects/adversarial/utils_hans.py +++ /dev/null @@ -1,339 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. -# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import os -from dataclasses import dataclass -from typing import List, Optional, Union - -import tqdm -from filelock import FileLock - -from transformers import ( - BartTokenizer, - BartTokenizerFast, - DataProcessor, - PreTrainedTokenizer, - RobertaTokenizer, - RobertaTokenizerFast, - XLMRobertaTokenizer, - is_tf_available, - is_torch_available, -) - - -logger = logging.getLogger(__name__) - - -@dataclass(frozen=True) -class InputExample: - """ - A single training/test example for simple sequence classification. - - Args: - guid: Unique id for the example. - text_a: string. The untokenized text of the first sequence. For single - sequence tasks, only this sequence must be specified. - text_b: (Optional) string. The untokenized text of the second sequence. - Only must be specified for sequence pair tasks. - label: (Optional) string. The label of the example. This should be - specified for train and dev examples, but not for test examples. - pairID: (Optional) string. Unique identifier for the pair of sentences. - """ - - guid: str - text_a: str - text_b: Optional[str] = None - label: Optional[str] = None - pairID: Optional[str] = None - - -@dataclass(frozen=True) -class InputFeatures: - """ - A single set of features of data. - Property names are the same names as the corresponding inputs to a model. - - Args: - input_ids: Indices of input sequence tokens in the vocabulary. - attention_mask: Mask to avoid performing attention on padding token indices. - Mask values selected in ``[0, 1]``: - Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens. - token_type_ids: (Optional) Segment token indices to indicate first and second - portions of the inputs. Only some models use them. - label: (Optional) Label corresponding to the input. Int for classification problems, - float for regression problems. - pairID: (Optional) Unique identifier for the pair of sentences. - """ - - input_ids: List[int] - attention_mask: Optional[List[int]] = None - token_type_ids: Optional[List[int]] = None - label: Optional[Union[int, float]] = None - pairID: Optional[int] = None - - -if is_torch_available(): - import torch - from torch.utils.data import Dataset - - class HansDataset(Dataset): - """ - This will be superseded by a framework-agnostic approach - soon. - """ - - features: List[InputFeatures] - - def __init__( - self, - data_dir: str, - tokenizer: PreTrainedTokenizer, - task: str, - max_seq_length: Optional[int] = None, - overwrite_cache=False, - evaluate: bool = False, - ): - processor = hans_processors[task]() - - cached_features_file = os.path.join( - data_dir, - "cached_{}_{}_{}_{}".format( - "dev" if evaluate else "train", - tokenizer.__class__.__name__, - str(max_seq_length), - task, - ), - ) - label_list = processor.get_labels() - if tokenizer.__class__ in ( - RobertaTokenizer, - RobertaTokenizerFast, - XLMRobertaTokenizer, - BartTokenizer, - BartTokenizerFast, - ): - # HACK(label indices are swapped in RoBERTa pretrained model) - label_list[1], label_list[2] = label_list[2], label_list[1] - self.label_list = label_list - - # Make sure only the first process in distributed training processes the dataset, - # and the others will use the cache. - lock_path = cached_features_file + ".lock" - with FileLock(lock_path): - if os.path.exists(cached_features_file) and not overwrite_cache: - logger.info(f"Loading features from cached file {cached_features_file}") - self.features = torch.load(cached_features_file) - else: - logger.info(f"Creating features from dataset file at {data_dir}") - - examples = ( - processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir) - ) - - logger.info("Training examples: %s", len(examples)) - self.features = hans_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer) - logger.info("Saving features into cached file %s", cached_features_file) - torch.save(self.features, cached_features_file) - - def __len__(self): - return len(self.features) - - def __getitem__(self, i) -> InputFeatures: - return self.features[i] - - def get_labels(self): - return self.label_list - - -if is_tf_available(): - import tensorflow as tf - - class TFHansDataset: - """ - This will be superseded by a framework-agnostic approach - soon. - """ - - features: List[InputFeatures] - - def __init__( - self, - data_dir: str, - tokenizer: PreTrainedTokenizer, - task: str, - max_seq_length: Optional[int] = 128, - overwrite_cache=False, - evaluate: bool = False, - ): - processor = hans_processors[task]() - label_list = processor.get_labels() - if tokenizer.__class__ in ( - RobertaTokenizer, - RobertaTokenizerFast, - XLMRobertaTokenizer, - BartTokenizer, - BartTokenizerFast, - ): - # HACK(label indices are swapped in RoBERTa pretrained model) - label_list[1], label_list[2] = label_list[2], label_list[1] - self.label_list = label_list - - examples = processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir) - self.features = hans_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer) - - def gen(): - for ex_index, ex in tqdm.tqdm(enumerate(self.features), desc="convert examples to features"): - if ex_index % 10000 == 0: - logger.info("Writing example %d of %d" % (ex_index, len(examples))) - - yield ( - { - "example_id": 0, - "input_ids": ex.input_ids, - "attention_mask": ex.attention_mask, - "token_type_ids": ex.token_type_ids, - }, - ex.label, - ) - - self.dataset = tf.data.Dataset.from_generator( - gen, - ( - { - "example_id": tf.int32, - "input_ids": tf.int32, - "attention_mask": tf.int32, - "token_type_ids": tf.int32, - }, - tf.int64, - ), - ( - { - "example_id": tf.TensorShape([]), - "input_ids": tf.TensorShape([None, None]), - "attention_mask": tf.TensorShape([None, None]), - "token_type_ids": tf.TensorShape([None, None]), - }, - tf.TensorShape([]), - ), - ) - - def get_dataset(self): - return self.dataset - - def __len__(self): - return len(self.features) - - def __getitem__(self, i) -> InputFeatures: - return self.features[i] - - def get_labels(self): - return self.label_list - - -class HansProcessor(DataProcessor): - """Processor for the HANS data set.""" - - def get_train_examples(self, data_dir): - """See base class.""" - return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_train_set.txt")), "train") - - def get_dev_examples(self, data_dir): - """See base class.""" - return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_evaluation_set.txt")), "dev") - - def get_labels(self): - """See base class. - Note that we follow the standard three labels for MNLI - (see :class:`~transformers.data.processors.utils.MnliProcessor`) - but the HANS evaluation groups `contradiction` and `neutral` into `non-entailment` (label 0) while - `entailment` is label 1.""" - return ["contradiction", "entailment", "neutral"] - - def _create_examples(self, lines, set_type): - """Creates examples for the training and dev sets.""" - examples = [] - for i, line in enumerate(lines): - if i == 0: - continue - guid = "%s-%s" % (set_type, line[0]) - text_a = line[5] - text_b = line[6] - pairID = line[7][2:] if line[7].startswith("ex") else line[7] - label = line[0] - examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, pairID=pairID)) - return examples - - -def hans_convert_examples_to_features( - examples: List[InputExample], - label_list: List[str], - max_length: int, - tokenizer: PreTrainedTokenizer, -): - """ - Loads a data file into a list of ``InputFeatures`` - - Args: - examples: List of ``InputExamples`` containing the examples. - label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method. - max_length: Maximum example length. - tokenizer: Instance of a tokenizer that will tokenize the examples. - - Returns: - A list of task-specific ``InputFeatures`` which can be fed to the model. - - """ - - label_map = {label: i for i, label in enumerate(label_list)} - - features = [] - for ex_index, example in tqdm.tqdm(enumerate(examples), desc="convert examples to features"): - if ex_index % 10000 == 0: - logger.info("Writing example %d" % (ex_index)) - - inputs = tokenizer( - example.text_a, - example.text_b, - add_special_tokens=True, - max_length=max_length, - padding="max_length", - truncation=True, - return_overflowing_tokens=True, - ) - - label = label_map[example.label] if example.label in label_map else 0 - - pairID = int(example.pairID) - - features.append(InputFeatures(**inputs, label=label, pairID=pairID)) - - for i, example in enumerate(examples[:5]): - logger.info("*** Example ***") - logger.info(f"guid: {example}") - logger.info(f"features: {features[i]}") - - return features - - -hans_tasks_num_labels = { - "hans": 3, -} - -hans_processors = { - "hans": HansProcessor, -} diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/distillation/scripts/token_counts.py b/spaces/chendl/compositional_test/transformers/examples/research_projects/distillation/scripts/token_counts.py deleted file mode 100644 index 736b564ee76ea46791504f4016d5d421b884e961..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/research_projects/distillation/scripts/token_counts.py +++ /dev/null @@ -1,56 +0,0 @@ -# coding=utf-8 -# Copyright 2019-present, the HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Preprocessing script before training the distilled model. -""" -import argparse -import logging -import pickle -from collections import Counter - - -logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO -) -logger = logging.getLogger(__name__) - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)" - ) - parser.add_argument( - "--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset." - ) - parser.add_argument( - "--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file." - ) - parser.add_argument("--vocab_size", default=30522, type=int) - args = parser.parse_args() - - logger.info(f"Loading data from {args.data_file}") - with open(args.data_file, "rb") as fp: - data = pickle.load(fp) - - logger.info("Counting occurrences for MLM.") - counter = Counter() - for tk_ids in data: - counter.update(tk_ids) - counts = [0] * args.vocab_size - for k, v in counter.items(): - counts[k] = v - - logger.info(f"Dump to {args.token_counts_dump}") - with open(args.token_counts_dump, "wb") as handle: - pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL) diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/seq2seq-distillation/run_eval.py b/spaces/chendl/compositional_test/transformers/examples/research_projects/seq2seq-distillation/run_eval.py deleted file mode 100644 index 98c9786d2c95cd4f06e057766725ede553c6dadd..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/research_projects/seq2seq-distillation/run_eval.py +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/env python - -import argparse -import datetime -import json -import time -import warnings -from logging import getLogger -from pathlib import Path -from typing import Dict, List - -import torch -from tqdm import tqdm - -from transformers import AutoModelForSeq2SeqLM, AutoTokenizer -from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params - - -logger = getLogger(__name__) - - -DEFAULT_DEVICE = "cuda" if torch.cuda.is_available() else "cpu" - - -def generate_summaries_or_translations( - examples: List[str], - out_file: str, - model_name: str, - batch_size: int = 8, - device: str = DEFAULT_DEVICE, - fp16=False, - task="summarization", - prefix=None, - **generate_kwargs, -) -> Dict: - """Save model.generate results to , and return how long it took.""" - fout = Path(out_file).open("w", encoding="utf-8") - model_name = str(model_name) - model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device) - if fp16: - model = model.half() - - tokenizer = AutoTokenizer.from_pretrained(model_name) - logger.info(f"Inferred tokenizer type: {tokenizer.__class__}") # if this is wrong, check config.model_type. - - start_time = time.time() - # update config with task specific params - use_task_specific_params(model, task) - if prefix is None: - prefix = prefix or getattr(model.config, "prefix", "") or "" - for examples_chunk in tqdm(list(chunks(examples, batch_size))): - examples_chunk = [prefix + text for text in examples_chunk] - batch = tokenizer(examples_chunk, return_tensors="pt", truncation=True, padding="longest").to(device) - summaries = model.generate( - input_ids=batch.input_ids, - attention_mask=batch.attention_mask, - **generate_kwargs, - ) - dec = tokenizer.batch_decode(summaries, skip_special_tokens=True, clean_up_tokenization_spaces=False) - for hypothesis in dec: - fout.write(hypothesis + "\n") - fout.flush() - fout.close() - runtime = int(time.time() - start_time) # seconds - n_obs = len(examples) - return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs, 4)} - - -def datetime_now(): - return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") - - -def run_generate(verbose=True): - """ - - Takes input text, generates output, and then using reference calculates the BLEU scores. - - The results are saved to a file and returned to the caller, and printed out unless ``verbose=False`` is passed. - - Args: - verbose (:obj:`bool`, `optional`, defaults to :obj:`True`): print results to stdout - - Returns: - a tuple: ``(scores, params}`` - - ``scores``: a dict of scores data ``{'bleu': 39.6501, 'n_obs': 2000, 'runtime': 186, 'seconds_per_sample': 0.093}`` - - ``params``: a dict of custom params, e.g. ``{'num_beams': 5, 'length_penalty': 0.8}`` - """ - - parser = argparse.ArgumentParser() - parser.add_argument("model_name", type=str, help="like facebook/bart-large-cnn,t5-base, etc.") - parser.add_argument("input_path", type=str, help="like cnn_dm/test.source") - parser.add_argument("save_path", type=str, help="where to save summaries") - parser.add_argument("--reference_path", type=str, required=False, help="like cnn_dm/test.target") - parser.add_argument("--score_path", type=str, required=False, default="metrics.json", help="where to save metrics") - parser.add_argument("--device", type=str, required=False, default=DEFAULT_DEVICE, help="cuda, cuda:1, cpu etc.") - parser.add_argument( - "--prefix", type=str, required=False, default=None, help="will be added to the begininng of src examples" - ) - parser.add_argument("--task", type=str, default="summarization", help="used for task_specific_params + metrics") - parser.add_argument("--bs", type=int, default=8, required=False, help="batch size") - parser.add_argument( - "--n_obs", type=int, default=-1, required=False, help="How many observations. Defaults to all." - ) - parser.add_argument("--fp16", action="store_true") - parser.add_argument("--dump-args", action="store_true", help="print the custom hparams with the results") - parser.add_argument( - "--info", - nargs="?", - type=str, - const=datetime_now(), - help=( - "use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g." - " lang=en-ru. If no value is passed, the current datetime string will be used." - ), - ) - # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate - args, rest = parser.parse_known_args() - parsed_args = parse_numeric_n_bool_cl_kwargs(rest) - if parsed_args and verbose: - print(f"parsed the following generate kwargs: {parsed_args}") - with open(args.input_path) as f: - examples = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in f.readlines()] - if args.n_obs > 0: - examples = examples[: args.n_obs] - Path(args.save_path).parent.mkdir(exist_ok=True) - if args.reference_path is None and Path(args.score_path).exists(): - warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c.") - runtime_metrics = generate_summaries_or_translations( - examples, - args.save_path, - args.model_name, - batch_size=args.bs, - device=args.device, - fp16=args.fp16, - task=args.task, - prefix=args.prefix, - **parsed_args, - ) - - if args.reference_path is None: - return {} - - # Compute scores - score_fn = calculate_bleu if "translation" in args.task else calculate_rouge - output_lns = [x.rstrip() for x in open(args.save_path).readlines()] - reference_lns = [x.rstrip() for x in open(args.reference_path).readlines()][: len(output_lns)] - scores: dict = score_fn(output_lns, reference_lns) - scores.update(runtime_metrics) - - if args.dump_args: - scores.update(parsed_args) - if args.info: - scores["info"] = args.info - - if verbose: - print(scores) - - if args.score_path is not None: - json.dump(scores, open(args.score_path, "w")) - - return scores - - -if __name__ == "__main__": - # Usage for MT: - # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ - run_generate(verbose=True) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/google/protobuf/proto_builder.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/google/protobuf/proto_builder.py deleted file mode 100644 index 8dab8b3ee0847af949e43c0aaeddf05325d963cc..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/google/protobuf/proto_builder.py +++ /dev/null @@ -1,134 +0,0 @@ -# Protocol Buffers - Google's data interchange format -# Copyright 2008 Google Inc. All rights reserved. -# https://developers.google.com/protocol-buffers/ -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Dynamic Protobuf class creator.""" - -from collections import OrderedDict -import hashlib -import os - -from google.protobuf import descriptor_pb2 -from google.protobuf import descriptor -from google.protobuf import descriptor_pool -from google.protobuf import message_factory - - -def _GetMessageFromFactory(pool, full_name): - """Get a proto class from the MessageFactory by name. - - Args: - pool: a descriptor pool. - full_name: str, the fully qualified name of the proto type. - Returns: - A class, for the type identified by full_name. - Raises: - KeyError, if the proto is not found in the factory's descriptor pool. - """ - proto_descriptor = pool.FindMessageTypeByName(full_name) - proto_cls = message_factory.GetMessageClass(proto_descriptor) - return proto_cls - - -def MakeSimpleProtoClass(fields, full_name=None, pool=None): - """Create a Protobuf class whose fields are basic types. - - Note: this doesn't validate field names! - - Args: - fields: dict of {name: field_type} mappings for each field in the proto. If - this is an OrderedDict the order will be maintained, otherwise the - fields will be sorted by name. - full_name: optional str, the fully-qualified name of the proto type. - pool: optional DescriptorPool instance. - Returns: - a class, the new protobuf class with a FileDescriptor. - """ - pool_instance = pool or descriptor_pool.DescriptorPool() - if full_name is not None: - try: - proto_cls = _GetMessageFromFactory(pool_instance, full_name) - return proto_cls - except KeyError: - # The factory's DescriptorPool doesn't know about this class yet. - pass - - # Get a list of (name, field_type) tuples from the fields dict. If fields was - # an OrderedDict we keep the order, but otherwise we sort the field to ensure - # consistent ordering. - field_items = fields.items() - if not isinstance(fields, OrderedDict): - field_items = sorted(field_items) - - # Use a consistent file name that is unlikely to conflict with any imported - # proto files. - fields_hash = hashlib.sha1() - for f_name, f_type in field_items: - fields_hash.update(f_name.encode('utf-8')) - fields_hash.update(str(f_type).encode('utf-8')) - proto_file_name = fields_hash.hexdigest() + '.proto' - - # If the proto is anonymous, use the same hash to name it. - if full_name is None: - full_name = ('net.proto2.python.public.proto_builder.AnonymousProto_' + - fields_hash.hexdigest()) - try: - proto_cls = _GetMessageFromFactory(pool_instance, full_name) - return proto_cls - except KeyError: - # The factory's DescriptorPool doesn't know about this class yet. - pass - - # This is the first time we see this proto: add a new descriptor to the pool. - pool_instance.Add( - _MakeFileDescriptorProto(proto_file_name, full_name, field_items)) - return _GetMessageFromFactory(pool_instance, full_name) - - -def _MakeFileDescriptorProto(proto_file_name, full_name, field_items): - """Populate FileDescriptorProto for MessageFactory's DescriptorPool.""" - package, name = full_name.rsplit('.', 1) - file_proto = descriptor_pb2.FileDescriptorProto() - file_proto.name = os.path.join(package.replace('.', '/'), proto_file_name) - file_proto.package = package - desc_proto = file_proto.message_type.add() - desc_proto.name = name - for f_number, (f_name, f_type) in enumerate(field_items, 1): - field_proto = desc_proto.field.add() - field_proto.name = f_name - # # If the number falls in the reserved range, reassign it to the correct - # # number after the range. - if f_number >= descriptor.FieldDescriptor.FIRST_RESERVED_FIELD_NUMBER: - f_number += ( - descriptor.FieldDescriptor.LAST_RESERVED_FIELD_NUMBER - - descriptor.FieldDescriptor.FIRST_RESERVED_FIELD_NUMBER + 1) - field_proto.number = f_number - field_proto.label = descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL - field_proto.type = f_type - return file_proto diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-928645ac.css b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-928645ac.css deleted file mode 100644 index 4329ebb21b609937b3a2fdd0c3a1ef2edf96b04c..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-928645ac.css +++ /dev/null @@ -1 +0,0 @@ -.container.svelte-19on2m6.svelte-19on2m6{display:flex;flex-direction:column;gap:var(--spacing-sm);padding:var(--block-padding)}.hl.svelte-19on2m6+.hl.svelte-19on2m6{margin-left:var(--size-1)}.textspan.svelte-19on2m6:last-child>.label.svelte-19on2m6{margin-right:0}.category-legend.svelte-19on2m6.svelte-19on2m6{display:flex;flex-wrap:wrap;gap:var(--spacing-sm);color:#000}.category-label.svelte-19on2m6.svelte-19on2m6{cursor:pointer;border-radius:var(--radius-xs);padding-right:var(--size-2);padding-left:var(--size-2);font-weight:var(--weight-semibold)}.color-legend.svelte-19on2m6.svelte-19on2m6{display:flex;justify-content:space-between;border-radius:var(--radius-xs);background:linear-gradient(to right,var(--color-purple),rgba(255,255,255,0),var(--color-red));padding:var(--size-1) var(--size-2);font-weight:var(--weight-semibold)}.textfield.svelte-19on2m6.svelte-19on2m6{box-sizing:border-box;border-radius:var(--radius-xs);background:var(--background-fill-primary);background-color:transparent;max-width:var(--size-full);line-height:var(--scale-4);word-break:break-all}.textspan.svelte-19on2m6.svelte-19on2m6{transition:.15s;border-radius:var(--radius-xs);padding-top:2.5px;padding-right:var(--size-1);padding-bottom:3.5px;padding-left:var(--size-1);color:#000}.label.svelte-19on2m6.svelte-19on2m6{transition:.15s;margin-top:1px;margin-right:calc(var(--size-1) * -1);border-radius:var(--radius-xs);padding:1px 5px;color:var(--body-text-color);color:#fff;font-weight:var(--weight-bold);font-size:var(--text-sm);text-transform:uppercase}.text.svelte-19on2m6.svelte-19on2m6{color:#000}.score-text.svelte-19on2m6 .text.svelte-19on2m6{color:var(--body-text-color)}.score-text.svelte-19on2m6.svelte-19on2m6{margin-right:var(--size-1);padding:var(--size-1)}.no-cat.svelte-19on2m6.svelte-19on2m6,.no-label.svelte-19on2m6.svelte-19on2m6{color:var(--body-text-color)}.selectable.svelte-19on2m6.svelte-19on2m6{cursor:pointer} diff --git "a/spaces/cihyFjudo/fairness-paper-search/Coding4Fun ? Slap Your Boss Away With Skype And LeapMotion (Im Getting Ready For\302\2402020!) __EXCLUSIVE__.md" "b/spaces/cihyFjudo/fairness-paper-search/Coding4Fun ? Slap Your Boss Away With Skype And LeapMotion (Im Getting Ready For\302\2402020!) __EXCLUSIVE__.md" deleted file mode 100644 index 6437c65ce7876481509db0993efe5823c8f90afd..0000000000000000000000000000000000000000 --- "a/spaces/cihyFjudo/fairness-paper-search/Coding4Fun ? Slap Your Boss Away With Skype And LeapMotion (Im Getting Ready For\302\2402020!) __EXCLUSIVE__.md" +++ /dev/null @@ -1,6 +0,0 @@ -

Coding4Fun – Slap your boss away with Skype and LeapMotion (I'm getting ready for 2020!)


Downloadhttps://tinurli.com/2uwiwh



- - aaccfb2cb3
-
-
-

diff --git a/spaces/codersgyan/espnet-kan-bayashi_ljspeech_vits/app.py b/spaces/codersgyan/espnet-kan-bayashi_ljspeech_vits/app.py deleted file mode 100644 index 219893fd3c94fbd71240128dc3db7506005bb935..0000000000000000000000000000000000000000 --- a/spaces/codersgyan/espnet-kan-bayashi_ljspeech_vits/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/espnet/kan-bayashi_ljspeech_vits").launch() \ No newline at end of file diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/flacdsp_init_arm.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/flacdsp_init_arm.c deleted file mode 100644 index 9962cc89f4533b54585c8ddbdb6f979dad9a4ecd..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/flacdsp_init_arm.c +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (c) 2012 Mans Rullgard - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavutil/attributes.h" -#include "libavcodec/flacdsp.h" - -void ff_flac_lpc_16_arm(int32_t *samples, const int coeffs[32], int order, - int qlevel, int len); - -av_cold void ff_flacdsp_init_arm(FLACDSPContext *c, enum AVSampleFormat fmt, int channels) -{ - c->lpc16 = ff_flac_lpc_16_arm; -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aura.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aura.c deleted file mode 100644 index 78914fe9f2432bb420a48c34856a2aa4689c5921..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aura.c +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Aura 2 decoder - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * Aura 2 decoder - */ - -#include "avcodec.h" -#include "codec_internal.h" -#include "decode.h" -#include "libavutil/internal.h" - -static av_cold int aura_decode_init(AVCodecContext *avctx) -{ - /* width needs to be divisible by 4 for this codec to work */ - if (avctx->width & 0x3) - return AVERROR(EINVAL); - avctx->pix_fmt = AV_PIX_FMT_YUV422P; - - return 0; -} - -static int aura_decode_frame(AVCodecContext *avctx, AVFrame *frame, - int *got_frame, AVPacket *pkt) -{ - uint8_t *Y, *U, *V; - uint8_t val; - int x, y, ret; - const uint8_t *buf = pkt->data; - - /* prediction error tables (make it clear that they are signed values) */ - const int8_t *delta_table = (const int8_t*)buf + 16; - - if (pkt->size != 48 + avctx->height * avctx->width) { - av_log(avctx, AV_LOG_ERROR, "got a buffer with %d bytes when %d were expected\n", - pkt->size, 48 + avctx->height * avctx->width); - return AVERROR_INVALIDDATA; - } - - /* pixel data starts 48 bytes in, after 3x16-byte tables */ - buf += 48; - - if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) - return ret; - - Y = frame->data[0]; - U = frame->data[1]; - V = frame->data[2]; - - /* iterate through each line in the height */ - for (y = 0; y < avctx->height; y++) { - /* reset predictors */ - val = *buf++; - U[0] = val & 0xF0; - Y[0] = val << 4; - val = *buf++; - V[0] = val & 0xF0; - Y[1] = Y[0] + delta_table[val & 0xF]; - Y += 2; U++; V++; - - /* iterate through the remaining pixel groups (4 pixels/group) */ - for (x = 1; x < (avctx->width >> 1); x++) { - val = *buf++; - U[0] = U[-1] + delta_table[val >> 4]; - Y[0] = Y[-1] + delta_table[val & 0xF]; - val = *buf++; - V[0] = V[-1] + delta_table[val >> 4]; - Y[1] = Y[ 0] + delta_table[val & 0xF]; - Y += 2; U++; V++; - } - Y += frame->linesize[0] - avctx->width; - U += frame->linesize[1] - (avctx->width >> 1); - V += frame->linesize[2] - (avctx->width >> 1); - } - - *got_frame = 1; - - return pkt->size; -} - -const FFCodec ff_aura2_decoder = { - .p.name = "aura2", - CODEC_LONG_NAME("Auravision Aura 2"), - .p.type = AVMEDIA_TYPE_VIDEO, - .p.id = AV_CODEC_ID_AURA2, - .init = aura_decode_init, - FF_CODEC_DECODE_CB(aura_decode_frame), - .p.capabilities = AV_CODEC_CAP_DR1, -}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/avdct.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/avdct.h deleted file mode 100644 index 6411fab6f637fee80978029fbee26203911a9edf..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/avdct.h +++ /dev/null @@ -1,88 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_AVDCT_H -#define AVCODEC_AVDCT_H - -#include "libavutil/opt.h" - -/** - * AVDCT context. - * @note function pointers can be NULL if the specific features have been - * disabled at build time. - */ -typedef struct AVDCT { - const AVClass *av_class; - - void (*idct)(int16_t *block /* align 16 */); - - /** - * IDCT input permutation. - * Several optimized IDCTs need a permutated input (relative to the - * normal order of the reference IDCT). - * This permutation must be performed before the idct_put/add. - * Note, normally this can be merged with the zigzag/alternate scan
- * An example to avoid confusion: - * - (->decode coeffs -> zigzag reorder -> dequant -> reference IDCT -> ...) - * - (x -> reference DCT -> reference IDCT -> x) - * - (x -> reference DCT -> simple_mmx_perm = idct_permutation - * -> simple_idct_mmx -> x) - * - (-> decode coeffs -> zigzag reorder -> simple_mmx_perm -> dequant - * -> simple_idct_mmx -> ...) - */ - uint8_t idct_permutation[64]; - - void (*fdct)(int16_t *block /* align 16 */); - - - /** - * DCT algorithm. - * must use AVOptions to set this field. - */ - int dct_algo; - - /** - * IDCT algorithm. - * must use AVOptions to set this field. - */ - int idct_algo; - - void (*get_pixels)(int16_t *block /* align 16 */, - const uint8_t *pixels /* align 8 */, - ptrdiff_t line_size); - - int bits_per_sample; - - void (*get_pixels_unaligned)(int16_t *block /* align 16 */, - const uint8_t *pixels, - ptrdiff_t line_size); -} AVDCT; - -/** - * Allocates a AVDCT context. - * This needs to be initialized with avcodec_dct_init() after optionally - * configuring it with AVOptions. - * - * To free it use av_free() - */ -AVDCT *avcodec_dct_alloc(void); -int avcodec_dct_init(AVDCT *); - -const AVClass *avcodec_dct_get_class(void); - -#endif /* AVCODEC_AVDCT_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/libuavs3d.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/libuavs3d.c deleted file mode 100644 index 5c08d2b8d3e03874c2441371f2697a0c1616279e..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/libuavs3d.c +++ /dev/null @@ -1,278 +0,0 @@ -/* - * AVS3-P2/IEEE1857.10 video decoder (using the uavs3d library) - * Copyright (c) 2020 Zhenyu Wang - * Bingjie Han - * Huiwen Ren - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavutil/avutil.h" -#include "libavutil/common.h" -#include "libavutil/cpu.h" -#include "libavutil/imgutils.h" -#include "libavutil/intreadwrite.h" -#include "libavutil/opt.h" -#include "avcodec.h" -#include "avs3.h" -#include "codec_internal.h" -#include "decode.h" -#include "uavs3d.h" - -typedef struct uavs3d_context { - AVCodecContext *avctx; - void *dec_handle; - int frame_threads; - int got_seqhdr; - uavs3d_io_frm_t dec_frame; -} uavs3d_context; - -#define UAVS3D_CHECK_START_CODE(data_ptr, PIC_START_CODE) \ - (AV_RL32(data_ptr) != (PIC_START_CODE << 24) + AVS3_NAL_START_CODE) -static int uavs3d_find_next_start_code(const unsigned char *bs_data, int bs_len, int *left) -{ - const unsigned char *data_ptr = bs_data + 4; - int count = bs_len - 4; - - while (count >= 4 && - UAVS3D_CHECK_START_CODE(data_ptr, AVS3_INTER_PIC_START_CODE) && - UAVS3D_CHECK_START_CODE(data_ptr, AVS3_INTRA_PIC_START_CODE) && - UAVS3D_CHECK_START_CODE(data_ptr, AVS3_SEQ_START_CODE) && - UAVS3D_CHECK_START_CODE(data_ptr, AVS3_FIRST_SLICE_START_CODE) && - UAVS3D_CHECK_START_CODE(data_ptr, AVS3_SEQ_END_CODE)) { - data_ptr++; - count--; - } - - if (count >= 4) { - *left = count; - return 1; - } - - return 0; -} - -static void uavs3d_output_callback(uavs3d_io_frm_t *dec_frame) { - uavs3d_io_frm_t frm_out; - AVFrame *frm = (AVFrame *)dec_frame->priv; - int i; - - if (!frm || !frm->data[0]) { - dec_frame->got_pic = 0; - av_log(NULL, AV_LOG_ERROR, "Invalid AVFrame in uavs3d output.\n"); - return; - } - - frm->pts = dec_frame->pts; - frm->pkt_dts = dec_frame->dts; -#if FF_API_FRAME_PKT -FF_DISABLE_DEPRECATION_WARNINGS - frm->pkt_pos = dec_frame->pkt_pos; - frm->pkt_size = dec_frame->pkt_size; -FF_ENABLE_DEPRECATION_WARNINGS -#endif -#if FF_API_FRAME_PICTURE_NUMBER -FF_DISABLE_DEPRECATION_WARNINGS - frm->coded_picture_number = dec_frame->dtr; - frm->display_picture_number = dec_frame->ptr; -FF_ENABLE_DEPRECATION_WARNINGS -#endif - - if (dec_frame->type < 0 || dec_frame->type >= FF_ARRAY_ELEMS(ff_avs3_image_type)) { - av_log(NULL, AV_LOG_WARNING, "Error frame type in uavs3d: %d.\n", dec_frame->type); - } else { - frm->pict_type = ff_avs3_image_type[dec_frame->type]; - frm->key_frame = (frm->pict_type == AV_PICTURE_TYPE_I); - } - - for (i = 0; i < 3; i++) { - frm_out.width [i] = dec_frame->width[i]; - frm_out.height[i] = dec_frame->height[i]; - frm_out.stride[i] = frm->linesize[i]; - frm_out.buffer[i] = frm->data[i]; - } - - uavs3d_img_cpy_cvt(&frm_out, dec_frame, dec_frame->bit_depth); -} - -static av_cold int libuavs3d_init(AVCodecContext *avctx) -{ - uavs3d_context *h = avctx->priv_data; - uavs3d_cfg_t cdsc; - - cdsc.frm_threads = avctx->thread_count > 0 ? avctx->thread_count : av_cpu_count(); - cdsc.check_md5 = 0; - h->dec_handle = uavs3d_create(&cdsc, uavs3d_output_callback, NULL); - h->got_seqhdr = 0; - - if (!h->dec_handle) { - return AVERROR(ENOMEM); - } - - return 0; -} - -static av_cold int libuavs3d_end(AVCodecContext *avctx) -{ - uavs3d_context *h = avctx->priv_data; - - if (h->dec_handle) { - uavs3d_flush(h->dec_handle, NULL); - uavs3d_delete(h->dec_handle); - h->dec_handle = NULL; - } - h->got_seqhdr = 0; - - return 0; -} - -static void libuavs3d_flush(AVCodecContext * avctx) -{ - uavs3d_context *h = avctx->priv_data; - - if (h->dec_handle) { - uavs3d_reset(h->dec_handle); - } -} - -#define UAVS3D_CHECK_INVALID_RANGE(v, l, r) ((v)<(l)||(v)>(r)) -static int libuavs3d_decode_frame(AVCodecContext *avctx, AVFrame *frm, - int *got_frame, AVPacket *avpkt) -{ - uavs3d_context *h = avctx->priv_data; - const uint8_t *buf = avpkt->data; - int buf_size = avpkt->size; - const uint8_t *buf_end; - const uint8_t *buf_ptr = buf; - int left_bytes; - int ret, finish = 0; - - *got_frame = 0; - frm->pts = -1; - frm->pict_type = AV_PICTURE_TYPE_NONE; - - if (!buf_size) { - if (h->got_seqhdr) { - if (!frm->data[0] && (ret = ff_get_buffer(avctx, frm, 0)) < 0) { - return ret; - } - h->dec_frame.priv = frm; // AVFrame - } - do { - ret = uavs3d_flush(h->dec_handle, &h->dec_frame); - } while (ret > 0 && !h->dec_frame.got_pic); - } else { - uavs3d_io_frm_t *frm_dec = &h->dec_frame; - - buf_end = buf + buf_size; -#if FF_API_FRAME_PKT -FF_DISABLE_DEPRECATION_WARNINGS - frm_dec->pkt_pos = avpkt->pos; - frm_dec->pkt_size = avpkt->size; -FF_ENABLE_DEPRECATION_WARNINGS -#endif - - while (!finish) { - int bs_len; - - if (h->got_seqhdr) { - if (!frm->data[0] && (ret = ff_get_buffer(avctx, frm, 0)) < 0) { - return ret; - } - h->dec_frame.priv = frm; // AVFrame - } - - if (uavs3d_find_next_start_code(buf_ptr, buf_end - buf_ptr, &left_bytes)) { - bs_len = buf_end - buf_ptr - left_bytes; - } else { - bs_len = buf_end - buf_ptr; - finish = 1; - } - frm_dec->bs = (unsigned char *)buf_ptr; - frm_dec->bs_len = bs_len; - frm_dec->pts = avpkt->pts; - frm_dec->dts = avpkt->dts; - uavs3d_decode(h->dec_handle, frm_dec); - buf_ptr += bs_len; - - if (frm_dec->nal_type == NAL_SEQ_HEADER) { - struct uavs3d_com_seqh_t *seqh = frm_dec->seqhdr; - if (UAVS3D_CHECK_INVALID_RANGE(seqh->frame_rate_code, 0, 15)) { - av_log(avctx, AV_LOG_ERROR, "Invalid frame rate code: %d.\n", seqh->frame_rate_code); - seqh->frame_rate_code = 3; // default 25 fps - } else { - avctx->framerate.num = ff_avs3_frame_rate_tab[seqh->frame_rate_code].num; - avctx->framerate.den = ff_avs3_frame_rate_tab[seqh->frame_rate_code].den; - } - avctx->has_b_frames = seqh->output_reorder_delay; - avctx->pix_fmt = seqh->bit_depth_internal == 8 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUV420P10LE; - ret = ff_set_dimensions(avctx, seqh->horizontal_size, seqh->vertical_size); - if (ret < 0) - return ret; - h->got_seqhdr = 1; - - if (seqh->colour_description) { - if (UAVS3D_CHECK_INVALID_RANGE(seqh->colour_primaries, 0, 9) || - UAVS3D_CHECK_INVALID_RANGE(seqh->transfer_characteristics, 0, 14) || - UAVS3D_CHECK_INVALID_RANGE(seqh->matrix_coefficients, 0, 11)) { - av_log(avctx, AV_LOG_ERROR, - "Invalid colour description: primaries: %d" - "transfer characteristics: %d" - "matrix coefficients: %d.\n", - seqh->colour_primaries, - seqh->transfer_characteristics, - seqh->matrix_coefficients); - } else { - avctx->color_primaries = ff_avs3_color_primaries_tab[seqh->colour_primaries]; - avctx->color_trc = ff_avs3_color_transfer_tab [seqh->transfer_characteristics]; - avctx->colorspace = ff_avs3_color_matrix_tab [seqh->matrix_coefficients]; - } - } - } - if (frm_dec->got_pic) { - break; - } - } - } - - *got_frame = h->dec_frame.got_pic; - - if (!(*got_frame)) { - av_frame_unref(frm); - } - - return buf_ptr - buf; -} - -const FFCodec ff_libuavs3d_decoder = { - .p.name = "libuavs3d", - CODEC_LONG_NAME("libuavs3d AVS3-P2/IEEE1857.10"), - .p.type = AVMEDIA_TYPE_VIDEO, - .p.id = AV_CODEC_ID_AVS3, - .priv_data_size = sizeof(uavs3d_context), - .init = libuavs3d_init, - .close = libuavs3d_end, - FF_CODEC_DECODE_CB(libuavs3d_decode_frame), - .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_OTHER_THREADS, - .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE | - FF_CODEC_CAP_AUTO_THREADS, - .flush = libuavs3d_flush, - .p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, - AV_PIX_FMT_YUV420P10LE, - AV_PIX_FMT_NONE }, - .p.wrapper_name = "libuavs3d", -}; diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download 1200 in 1 Game APK and Turn Your Android into a Retro Gaming Machine.md b/spaces/congsaPfin/Manga-OCR/logs/Download 1200 in 1 Game APK and Turn Your Android into a Retro Gaming Machine.md deleted file mode 100644 index 96daeb1890cb0180b787005fe69452519851e4c7..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download 1200 in 1 Game APK and Turn Your Android into a Retro Gaming Machine.md +++ /dev/null @@ -1,97 +0,0 @@ - -
- - -
-

How to Download NES 1200 Games in 1 APK: Play Unlimited Games

-

Introduction

-

Do you love playing classic games on your mobile devices? If yes, then you should definitely check out NES 1200 Games in 1 APK. This app is developed by Mang JessNoLimit and includes games such as Super Mario Bros., Contra, Tetris, and many more. You can play all these games for free with NES 1200 Games in 1 APK.

-

1200 in 1 game apk download


Download Zip –––––>>> https://urlca.com/2uOaiH



-

Why should you download NES 1200 Games in 1 APK? Well, there are many reasons to do so. For one thing, you can enjoy playing hundreds of games without having to download them separately. You can also save space on your device as you only need one app to play all these games. Moreover, you can relive your childhood memories and have fun with your friends and family.

-

What are some of the features of this app? Well, there are many features that make this app amazing. For example, you can adjust the screen size and orientation according to your preference. You can also use different controllers such as touch screen, keyboard, or joystick. You can also save and load your game progress anytime you want.

-

How to Download NES 120 Games in 1 APK?

-

Step 1: Enable Unknown Sources on Your Device

-

Before you can install NES 1200 Games in 1 APK, you need to enable unknown sources on your device. This will allow you to install apps that are not from the official app store. Here's how to do it on Android and iOS devices:

-

How to play 1200 in 1 NES games on android
-NES 1200 in 1 apk free download for android
-Best retro games in NES 1200 in 1 apk
-NES 1200 in 1 apk download latest version
-NES 1200 in 1 apk mod unlimited coins
-NES 1200 in 1 apk offline no internet
-NES 1200 in 1 apk review and rating
-NES 1200 in 1 apk cheats and tips
-NES 1200 in 1 apk features and benefits
-NES 1200 in 1 apk installation guide
-NES 1200 in 1 apk alternatives and similar apps
-NES 1200 in 1 apk problems and solutions
-NES 1200 in 1 apk support and contact
-NES 1200 in 1 apk history and development
-NES 1200 in 1 apk legal and safe
-NES 1200 in 1 game list and categories
-NES 1200 in 1 game favorites and recommendations
-NES 1200 in 1 game secrets and easter eggs
-NES 1200 in 1 game graphics and sound
-NES 1200 in 1 game controller and emulator
-NES 1200 in 1 game fun and nostalgia
-NES 1200 in 1 game trivia and facts
-NES 1200 in 1 game comparison and ranking
-NES 1200 in 1 game challenges and achievements
-NES 1200 in 1 game community and forum
-Download nes games for android free
-Download nes emulator for android apk
-Download nes roms for android apk
-Download nes classic games for android
-Download nes all games for android
-Play nes games online free no download
-Play nes games on pc free download
-Play nes games on tv with chromecast
-Play nes games with friends online multiplayer
-Play nes games with bluetooth controller android
-Best nes games of all time download
-Best nes games for kids download
-Best nes games for adults download
-Best nes games for beginners download
-Best nes games for experts download

-
    -
  • On Android devices, go to Settings > Security > Unknown Sources and toggle it on.
  • -
  • On iOS devices, go to Settings > General > Device Management and trust the profile of the app.
  • -
-

Step 2: Download the APK File from a Trusted Source

-

Next, you need to download the APK file of NES 1200 Games in 1 from a trusted source. You can find the download link on the official website of the app or on other reputable sites such as APKPure or APKMirror. Make sure to verify the file before downloading it. You can do this by checking the file size, name, and signature.

-

Step 3: Install the APK File on Your Device

-

After downloading the APK file, you need to install it on your device. Here's how to do it on Android and iOS devices:

-
    -
  • On Android devices, locate the APK file in your file manager and tap on it. Follow the instructions on the screen to complete the installation.
  • -
  • On iOS devices, open the app installer and select the APK file. Follow the instructions on the screen to complete the installation.
  • -
-

Step 4: Launch the App and Enjoy Playing

-

Finally, you can launch the app and enjoy playing hundreds of games. Here's how to access and customize the games:

-
    -
  • To access the games, tap on the game icon on the main screen. You will see a list of games categorized by genre. You can also use the search function to find a specific game.
  • -
  • To customize the settings, tap on the menu icon on the top right corner. You will see options such as screen size, orientation, controller, save/load, and more. You can adjust them according to your preference.
  • -
-

What are Some of the Best Games to Play on NES 1200 Games in 1 APK?

-

NES 1200 Games in 1 APK has a huge collection of games that you can play for hours. However, if you are looking for some recommendations, here are some of the best games to play on this app:

-

Super Mario Bros.

-

This is one of the most iconic games of all time. You play as Mario, a plumber who has to rescue Princess Peach from the evil Bowser. You have to run, jump, and collect coins and power-ups along the way. You also have to face various enemies and obstacles such as Goombas, Koopas, Piranha Plants, and more. This game is fun, challenging, and nostalgic.

-

Contra

-

This is one of the most difficult games of all time. You play as a commando who has to fight against an alien invasion. You have to shoot, dodge, and jump your way through various stages such as jungles, bases, waterfalls, and more. You also have to face various enemies and bosses such as soldiers, tanks, helicopters, aliens, and more. This game is action-packed, intense, and rewarding.

-

Tetris

-

This is one of the most addictive games of all time. You have to arrange falling blocks of different shapes and colors into horizontal lines. You have to clear as many lines as possible before the blocks reach the top of the screen. You also have to deal with increasing speed and difficulty as you progress. This game is simple, relaxing, and satisfying.

-

Conclusion

-

NES 1200 Games in 1 APK is a great app for anyone who loves playing classic games on their mobile devices. It allows you to play hundreds of games for free with one app. It also has many features that make it user-friendly and customizable. To download NES 1200 Games in 1 APK, you just need to follow four simple steps: enable unknown sources, download the APK file, install it on your device, and launch it. Then you can enjoy playing some of the best games such as Super Mario Bros., Contra, Tetris, and more.

-

If you are looking for a way to have fun and nostalgia with your mobile device, then you should definitely try out NES 1200 Games in 1 APK. It will give you hours of entertainment and excitement. So what are you waiting for? Download NES 1200 Games in 1 APK now and have fun!

-

FAQs

-

Here are some of the frequently asked questions about NES 1200 Games in 1 APK:

-

Q: Is NES 1200 Games in 1 APK safe to use?

-

A: Yes, NES 1200 Games in 1 APK is safe to use as long as you download it from a trusted source and verify the file. However, you should always be careful when installing apps from unknown sources and scan them for viruses or malware.

-

Q: Is NES 1200 Games in 1 APK legal to use?

-

A: NES 1200 Games in 1 APK is legal to use as long as you do not distribute or sell the app or the games. However, you should always respect the intellectual property rights of the original game developers and publishers.

-

Q: How can I update NES 1200 Games in 1 APK?

-

A: To update NES 1200 Games in 1 APK, you need to download the latest version of the app from the same source and install it over the existing one. You can also check for updates within the app by tapping on the menu icon and selecting "Check for updates".

-

Q: How can I uninstall NES 1200 Games in 1 APK?

-

A: To uninstall NES 1200 Games in 1 APK, you need to go to your device settings and select "Apps" or "Applications". Then, find and tap on "NES 1200 Games in 1" and select "Uninstall". You can also delete the APK file from your file manager.

-

Q: How can I contact the developer of NES 1200 Games in 1 APK?

-

A: To contact the developer of NES 1200 Games in 1 APK, you can visit their official website or their social media pages. You can also send them an email at mangjessnolimit@gmail.com.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Blood Knight Idle 3D RPG APK and Become an Invincible Warrior.md b/spaces/congsaPfin/Manga-OCR/logs/Download Blood Knight Idle 3D RPG APK and Become an Invincible Warrior.md deleted file mode 100644 index 80cd9b599f9544b93402e08bfdd104e9302efe33..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download Blood Knight Idle 3D RPG APK and Become an Invincible Warrior.md +++ /dev/null @@ -1,79 +0,0 @@ -
-

Blood Knight: Idle 3D RPG APK - A Review

-

If you are looking for a new and exciting idle RPG game to play on your Android device, you might want to check out Blood Knight: Idle 3D RPG APK. This game is a 3D idle RPG game with thrilling skill combos and various growth aspects that will keep you hooked for hours. In this article, we will review what this game is about, what are its features, and how to download and install it on your device.

-

blood knight idle rpg apk


Download File 🗸 https://urlca.com/2uO95E



-

What is Blood Knight: Idle 3D RPG?

-

A 3D idle RPG game with exciting skill combos and various growth aspects

-

Blood Knight: Idle 3D RPG is a game where you play as a blood knight warrior who sips devil's blood and becomes invincible. You can unleash powerful skill combos with various weapons and abilities to defeat your enemies and grow stronger. You can also customize your blood knight with different outfits and accessories to suit your style.

-

Powered by Unreal Engine 4 with stunning 3D graphics

-

Blood Knight: Idle 3D RPG is powered by Unreal Engine 4, which is one of the most advanced game engines in the world. This means that the game has stunning 3D graphics, realistic physics, dynamic lighting, and smooth animations. You will be amazed by the visual quality and details of the game world, characters, and effects.

-

What are the features of Blood Knight: Idle 3D RPG?

-

Fast-paced growth with various advancement systems

-

Upgrade abilities, weapons, and challenges

-

Blood Knight: Idle 3D RPG has various advancement systems that make your blood knight stronger and more invincible. You can upgrade your abilities, weapons, and challenges with golds that you collect from the game. You can also enhance your skills with runes that give you extra effects and bonuses. -

Collect golds and resources from dungeons and raids

-

Blood Knight: Idle 3D RPG has different dungeons and raids that you can explore and conquer. You can collect golds and resources from these dungeons and raids as rewards. These resources can be used to craft items, potions, equipment, pets, and more. You can also encounter rare bosses and enemies that drop valuable loot. -

Full of RPG elements with different modes and systems

-

Growth Dungeon, Resource Dungeon, World Boss Raid, Pet System, Quest/Achievements, PVP System

-

Blood Knight: Idle 3D RPG is full of RPG elements that make the game more fun and diverse. You can choose from different modes and systems to play according to your preference. For example, you can enter the Growth Dungeon to level up your blood knight faster, or the Resource Dungeon to gather more materials. You can also join the World Boss Raid to fight against powerful bosses with other players, or the Pet System to collect cute pets that assist you in battle. You can also complete quests and achievements to earn rewards, or challenge other players in the PVP System to test your skills.

-

blood knight 3d idle rpg game
-blood knight idle rpg download
-blood knight idle rpg mod apk
-blood knight idle rpg review
-blood knight idle rpg hack
-blood knight idle rpg cheats
-blood knight idle rpg tips
-blood knight idle rpg guide
-blood knight idle rpg gameplay
-blood knight idle rpg reddit
-blood knight idle rpg wiki
-blood knight idle rpg android
-blood knight idle rpg ios
-blood knight idle rpg pc
-blood knight idle rpg online
-blood knight idle rpg offline
-blood knight idle rpg free
-blood knight idle rpg premium
-blood knight idle rpg superbox
-blood knight idle rpg unreal engine 4
-blood knight idle rpg update
-blood knight idle rpg latest version
-blood knight idle rpg best build
-blood knight idle rpg best pet
-blood knight idle rpg best weapon
-blood knight idle rpg dungeon
-blood knight idle rpg raid
-blood knight idle rpg pvp
-blood knight idle rpg quest
-blood knight idle rpg achievement
-blood knight idle rpg skill combo
-blood knight idle rpg growth system
-blood knight idle rpg resource collection
-blood knight idle rpg devil's blood
-blood knight idle rpg invincible warrior
-blood knight idle rpg fierce growth
-blood knight idle rpg wow factor
-blood knight idle rpg mid-core game
-blood knight idle rpg simulation game
-blood knight idle rpg clicker game

-

Supports idle mode with offline rewards and simple gameplay

-

Blood Knight: Idle 3D RPG supports idle mode, which means that you can let the game run by itself and still earn golds and resources even when you are offline. You can also enjoy the simple gameplay that does not require complicated controls or strategies. You just need to tap the screen to unleash your skills and combos, and watch your blood knight dominate the battlefield.

-

How to download and install Blood Knight: Idle 3D RPG APK?

-

Download the APK file from a trusted source

-

Blood Knight: Idle 3D RPG APK is not available on the Google Play Store, so you need to download it from a trusted source. You can find the APK file on various websites that offer free and safe APK downloads, such as [APKPure] or [APKMirror]. Make sure to check the file size, version, and reviews before downloading it.

-

Enable unknown sources on your device settings

-

Before you can install the APK file, you need to enable unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and toggle it on. You may also need to grant permissions for the app to access your storage, camera, microphone, etc.

-

Install the APK file and launch the game

-

After you have downloaded and enabled unknown sources, you can install the APK file by tapping on it and following the instructions. Once the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer. You can now enjoy playing Blood Knight: Idle 3D RPG on your device.

-

Conclusion and FAQs

-

Blood Knight: Idle 3D RPG is a 3D idle RPG game that offers fast-paced growth, thrilling skill combos, stunning graphics, various modes and systems, and idle mode with offline rewards. It is a game that will appeal to fans of RPG games who want to have fun and relax without spending too much time or effort. If you want to try this game, you can download and install its APK file from a trusted source and follow the steps we have provided in this article.

-

Here are some FAQs that you may have about this game:

- - - - - - -
Q: Is Blood Knight: Idle 3D RPG free to play?A: Yes, it is free to play with optional in-app purchases.
Q: What are the minimum requirements for this game?A: You need an Android device with at least 4 GB of RAM and 2 GB of free storage space.
Q: How can I contact the developer of this game?A: You can contact them through their official Facebook page or email address.
Q: How can I get more golds and resources in this game?A: You can get more golds and resources by playing dungeons and raids, completing quests and achievements, watching ads, or buying them with real money.
Q: How can I get more pets in this game?A: You can get more pets by collecting pet eggs from dungeons and raids, or buying them with real money.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/FL Studio 12 APK A Review of the Pros and Cons.md b/spaces/congsaPfin/Manga-OCR/logs/FL Studio 12 APK A Review of the Pros and Cons.md deleted file mode 100644 index ee27fbecf279ec659bdd3967157a472f1a6eadc2..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/FL Studio 12 APK A Review of the Pros and Cons.md +++ /dev/null @@ -1,184 +0,0 @@ - -

FL Studio 12 APK: A Complete Guide

-

If you are looking for a powerful and easy-to-use music production app for your Android device, you might want to check out FL Studio 12 APK. FL Studio, formerly known as Fruity Loops, is one of the most popular digital audio workstations (DAWs) in the world, used by millions of musicians, DJs, and producers. With FL Studio 12 APK, you can enjoy the same features and functionality of the desktop version on your mobile device, anywhere and anytime.

-

In this article, we will give you a complete guide on FL Studio 12 APK, including its features, how to download and install it, how to use it, its pros and cons, and some alternatives. By the end of this article, you will have a better understanding of what FL Studio 12 APK can do for you and your music.

-

fl studio 12 apk


Download Filehttps://urlca.com/2uOdzB



-

Features of FL Studio 12 APK

-

FL Studio 12 APK is a full-featured music production app that offers a wide range of tools and options for creating, editing, mixing, and mastering your music. Here are some of the main features of FL Studio 12 APK:

-

Powerful and versatile music production tools

-

FL Studio 12 APK comes with a variety of music production tools that let you create any kind of music you want. You can use the step sequencer, the piano roll, or the playlist to arrange your tracks, add patterns, loops, samples, instruments, effects, automation clips, and more. You can also record audio from your device's microphone or external sources, edit audio clips with tools like cut, copy, paste, fade, normalize, reverse, pitch shift, time stretch, etc., and apply effects like reverb, delay, chorus, flanger, phaser, distortion, EQ, compressor, limiter, etc.

-

Large and diverse sound library

-

FL Studio 12 APK comes with a large and diverse sound library that includes over 1 GB of high-quality samples, loops, presets, instruments, and effects. You can choose from hundreds of drum kits, synths, guitars, pianos, strings, brasses, vocals, sound effects, and more. You can also import your own sounds or download additional content from the online store.

-

Intuitive and customizable user interface

-

FL Studio 12 APK has an intuitive and customizable user interface that adapts to your device's screen size and orientation. You can easily access all the functions and options with a few taps or gestures. You can also resize, rearrange, or hide any window or panel according to your preference. You can also change the color scheme or theme of the app to suit your mood or style.

-

fl studio 12 apk download for android
-fl studio 12 apk cracked full version
-fl studio 12 apk obb free download
-fl studio 12 apk mod unlocked
-fl studio 12 apk latest version 2023
-fl studio 12 apk no root required
-fl studio 12 apk + data offline
-fl studio 12 apk pro premium
-fl studio 12 apk rexdl
-fl studio 12 apk revdl
-fl studio 12 apk mirror
-fl studio 12 apk pure
-fl studio 12 apk uptodown
-fl studio 12 apk apkpure
-fl studio 12 apk mob.org
-fl studio 12 apk hack
-fl studio 12 apk cheat
-fl studio 12 apk patcher
-fl studio 12 apk keygen
-fl studio 12 apk serial number
-fl studio 12 apk license key
-fl studio 12 apk registration code
-fl studio 12 apk activation key
-fl studio 12 apk product key
-fl studio 12 apk generator online
-fl studio 12 apk tutorial pdf
-fl studio 12 apk guide video
-fl studio 12 apk tips and tricks
-fl studio 12 apk review rating
-fl studio 12 apk features comparison
-fl studio 12 apk alternatives list
-fl studio 12 apk competitors analysis
-fl studio 12 apk best plugins pack
-fl studio 12 apk free samples download
-fl studio 12 apk free loops download
-fl studio 12 apk free beats download
-fl studio 12 apk free instruments download
-fl studio 12 apk free presets download
-fl studio 12 apk free soundfonts download
-fl studio 12 apk free midi files download
-fl studio 12 apk free projects download
-fl studio 12 apk free templates download
-fl studio 12 apk free skins download[^2^]
-fl studio 12 apk beta test[^1^]
-fl studio 12 apk update news[^3^]
-fl studio 12 apk support forum
-fl studio 12 apk customer service number
-fl studio 12 apk refund policy

-

Support for external plugins and MIDI controllers

-

FL Studio 12 APK supports external plugins and MIDI controllers that can enhance your music production experience. You can use VST or AU plugins to add more instruments or effects to your project. You can also connect your MIDI keyboard, drum pad, or controller to your device and use it to play or control the app. You can also sync FL Studio 12 APK with other apps or devices via MIDI or Wi-Fi.

-

How to download and install FL Studio 12 APK

-

If you want to download and install FL Studio 12 APK on your Android device, you need to follow these steps:

-

Requirements and compatibility

-

Before you download and install FL Studio 12 APK, you need to make sure that your device meets the minimum requirements and is compatible with the app. Here are the requirements and compatibility for FL Studio 12 APK:

-
    -
  • Your device must have Android 4.1 or higher operating system.
  • -
  • Your device must have at least 1 GB of RAM and 2 GB of free storage space.
  • -
  • Your device must support OpenGL ES 2.0 or higher graphics.
  • -
  • Your device must have a multi-touch screen with a minimum resolution of 800 x 480 pixels.
  • -
  • Your device must have a microphone or an audio input for recording.
  • -
  • Your device must have a speaker or an audio output for playback.
  • -
  • Your device must have an internet connection for downloading additional content or updates.
  • -
-

Steps to download and install FL Studio 12 APK

-

Once you have checked the requirements and compatibility, you can proceed to download and install FL Studio 12 APK on your device. Here are the steps to do so:

-
    -
  1. Go to the official website of FL Studio 12 APK and click on the download button. Alternatively, you can use this link: [FL Studio Mobile - Apps on Google Play].
  2. -
  3. Wait for the download to finish and locate the APK file on your device. You may need to enable the installation of apps from unknown sources in your device's settings.
  4. -
  5. Tap on the APK file and follow the instructions to install the app on your device. You may need to grant some permissions to the app during the installation process.
  6. -
  7. Launch the app and enjoy creating music with FL Studio 12 APK.
  8. -
-

Tips to avoid malware and viruses

-

While downloading and installing FL Studio 12 APK, you need to be careful about malware and viruses that may harm your device or compromise your data. Here are some tips to avoid malware and viruses:

-
    -
  • Only download FL Studio 12 APK from the official website or a trusted source. Do not download it from any third-party or unverified websites or links.
  • -
  • Scan the APK file with a reliable antivirus or anti-malware software before installing it on your device. Delete any suspicious or infected files immediately.
  • -
  • Do not open any attachments or click on any links that claim to be related to FL Studio 12 APK unless you are sure about their authenticity and safety.
  • -
  • Do not grant any unnecessary or excessive permissions to the app during the installation or usage process. Only allow the permissions that are essential for the app's functionality.
  • -
  • Update the app regularly to get the latest features, bug fixes, and security patches.
  • -
-

How to use FL Studio 12 APK

-

After you have downloaded and installed FL Studio 12 APK on your device, you can start using it to create music. Here are some basic steps on how to use FL Studio 12 APK:

-

Creating a new project

-

To create a new project, you need to do the following:

-
    -
  1. Open the app and tap on the menu icon at the top left corner of the screen.
  2. -
  3. Select "New" from the menu and choose a template for your project. You can choose from different genres, styles, or moods, or start from scratch with an empty project.
  4. -
  5. Name your project and tap on "OK". Your project will be created and ready for editing.
  6. -
-

Adding and editing tracks

-

To add and edit tracks, you need to do the following:

-
    -
  1. Tap on the "+" icon at the bottom left corner of the screen to add a new track. You can choose from different types of tracks, such as audio, instrument, drum, automation, etc.
  2. -
  3. Select a sound source for your track, such as a sample, a loop, a preset, an instrument, etc. You can browse through different categories, genres, or packs, or use the search function to find what you want.
  4. -
  5. Drag and drop the sound source onto the track. You can adjust its position, length, volume, pitch, pan, etc. by using the handles, knobs, sliders, or buttons on the track. You can also double-tap on the track to open the editor window, where you can use more advanced tools and options to edit your track.
  6. -
  7. Repeat the steps above to add more tracks to your project. You can have up to 99 tracks in a project. You can also rename, duplicate, delete, mute, solo, or group your tracks as you wish.
  8. -
-

Mixing and mastering your music

-

To mix and master your music, you need to do the following:

-
    -
  1. Tap on the mixer icon at the top right corner of the screen to open the mixer window. You can see all your tracks and their levels, panning, effects, and routing on the mixer.
  2. -
  3. Adjust the levels and panning of your tracks to balance the volume and stereo image of your music. You can use the faders, knobs, or meters to do so.
  4. -
  5. Add effects to your tracks to enhance or modify their sound. You can tap on the FX button on each track to open the effects rack, where you can choose from over 100 effects, such as reverb, delay, chorus, flanger, phaser, distortion, EQ, compressor, limiter, etc. You can also adjust the parameters and order of the effects as you like.
  6. -
  7. Route your tracks to different buses or outputs to create subgroups or send effects. You can tap on the routing button on each track to open the routing window, where you can select the destination and amount of the signal for each track.
  8. -
  9. Use the master track to apply global effects or adjustments to your music. You can use the same tools and options as the other tracks, but they will affect the entire project.
  10. -
-

Exporting and sharing your music

-

To export and share your music, you need to do the following:

-
    -
  1. Tap on the menu icon at the top left corner of the screen and select "Export" from the menu. You can choose from different formats and quality options for your export, such as WAV, MP3, OGG, FLAC, MIDI, etc.
  2. -
  3. Wait for the export process to finish and locate the exported file on your device. You can also access it from the app's folder or gallery.
  4. -
  5. Share your music with others via email, social media, cloud storage, or any other app or service that supports audio files. You can also upload your music to online platforms like SoundCloud or YouTube.
  6. -
-

Pros and cons of FL Studio 12 APK

-

FL Studio 12 APK is a great app for music production on Android devices, but it also has some pros and cons that you should consider before using it. Here are some of them:

-

Pros of FL Studio 12 APK

-
    -
  • It has a powerful and versatile set of music production tools that can handle any kind of music genre or style.
  • -
  • It has a large and diverse sound library that includes over 1 GB of high-quality samples, loops, presets, instruments, and effects.
  • -
  • It has an intuitive and customizable user interface that adapts to your device's screen size and orientation.
  • -
  • It supports external plugins and MIDI controllers that can enhance your music production experience.
  • -
  • It allows you to export your music in different formats and quality options and share it with others easily.
  • -
-

Cons of FL Studio 12 APK

-
    -
  • It requires a relatively high-end device with enough RAM, storage space, graphics, and audio capabilities to run smoothly.
  • -
  • It may consume a lot of battery power and data when using it for a long time or downloading additional content or updates.
  • -
  • It may not be compatible with some devices or Android versions due to technical issues or limitations.
  • -
  • It may not have all the features or functions of the desktop version of FL Studio due to mobile platform restrictions or differences.
  • -
  • It may encounter some bugs or errors that may affect its performance or functionality.
  • -
-

Alternatives to FL Studio 12 APK

-

If you are looking for some alternatives to FL Studio 12 APK that can also provide you with a good music production experience on your Android device, here are some of the alternatives to FL Studio 12 APK that you can try:

-

Caustic 3

-

Caustic 3 is another music production app that is inspired by rack-mounted synthesizers and samplers. It has 14 different machines that you can use to create your music, such as a subtractive synth, a modular synth, a vocoder, a drum machine, and more. You can also add effects, record audio, edit samples, and mix your tracks with the mixer. Caustic 3 is free to download, but you can unlock more features and content with the paid version. You can also export your music in WAV or OGG format.

-

Audio Evolution Mobile Studio

-

Audio Evolution Mobile Studio is a full-fledged DAW for Android devices that offers a lot of features and options for music production. You can record audio or MIDI, edit tracks with tools like cut, copy, paste, trim, split, etc., add effects like reverb, delay, chorus, etc., mix your tracks with volume, pan, solo, mute, etc., and master your music with EQ, compression, limiter, etc. You can also use virtual instruments or external plugins to enhance your sound. Audio Evolution Mobile Studio is not free, but it has a trial version that you can try before buying.

-

Music Maker Jam

-

Music Maker Jam is a fun and easy music production app that lets you create your own music in minutes. You can choose from over 300 music styles and thousands of loops and samples to create your own beats and melodies. You can also adjust the tempo, pitch, and harmony of your music, and add effects like filters, flanger, phaser, etc. You can also record your own vocals or instruments and mix them with your music. Music Maker Jam is free to download, but you can buy more styles and sounds with in-app purchases. You can also share your music with the Music Maker Jam community or on social media.

-

Conclusion

-

FL Studio 12 APK is a powerful and easy-to-use music production app for Android devices that lets you create, edit, mix, and master your own music anywhere and anytime. It has a lot of features and functions that make it one of the best music production apps for Android. However, it also has some drawbacks and limitations that you should be aware of before using it. If you are looking for some alternatives to FL Studio 12 APK, you can try Caustic 3, Audio Evolution Mobile Studio, or Music Maker Jam.

-

We hope this article has given you a complete guide on FL Studio 12 APK and helped you decide whether it is the right app for you and your music. If you have any questions or feedback about FL Studio 12 APK or any other music production app for Android, feel free to leave a comment below.

-

FAQs

-

Here are some of the frequently asked questions about FL Studio 12 APK:

-

Is FL Studio 12 APK free?

-

No, FL Studio 12 APK is not free. It costs $16.69 on Google Play Store. However, it does not have any subscription fees or in-app purchases.

-

Can I use FL Studio 12 APK on my PC?

-

No, FL Studio 12 APK is only compatible with Android devices. If you want to use FL Studio on your PC, you need to buy the desktop version of FL Studio from the official website.

-

Can I transfer my projects from FL Studio 12 APK to FL Studio desktop?

-

Yes, you can transfer your projects from FL Studio 12 APK to FL Studio desktop using the Wi-Fi share feature or by exporting them as ZIP files.

-

What are the differences between FL Studio 12 APK and FL Studio desktop?

-

FL Studio 12 APK and FL Studio desktop have many similarities in terms of features and functionality, but they also have some differences due to the mobile platform restrictions or differences. Here are some of the main differences between FL Studio 12 APK and FL Studio desktop:

-
    -
  • FL Studio 12 APK has a maximum of 99 tracks, while FL Studio desktop has unlimited tracks.
  • -
  • FL Studio 12 APK has a limited number of plugins and effects, while FL Studio desktop has over 80 plugins and effects.
  • -
  • FL Studio 12 APK does not support some features or functions of FL Studio desktop, such as audio recording with ASIO, Edison, Slicex, Vocodex, etc.
  • -
  • FL Studio 12 APK has a different user interface and workflow than FL Studio desktop, which may require some adaptation or learning.
  • -
-

How can I get more sounds or content for FL Studio 12 APK?

-

You can get more sounds or content for FL Studio 12 APK by using the online store feature in the app. You can browse through different categories, genres, or packs of sounds or content, and download them for free or for a small fee. You can also import your own sounds or use external plugins to add more sounds or content to your project.

-

How can I get help or support for FL Studio 12 APK?

-

If you need help or support for FL Studio 12 APK, you can use the following resources:

-
    -
  • The help menu in the app, which contains a user manual, a video tutorial, and a FAQ section.
  • -
  • The official website of FL Studio, which contains more information, guides, tips, forums, blogs, etc.
  • -
  • The official YouTube channel of FL Studio, which contains video tutorials, demos, reviews, etc.
  • -
  • The official Facebook page of FL Studio, which contains news, updates, events, contests, etc.
  • -
  • The official Twitter account of FL Studio, which contains tweets about FL Studio and related topics.
  • -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/KJ Apas Style Evolution From Teen to Trendsetter.md b/spaces/congsaPfin/Manga-OCR/logs/KJ Apas Style Evolution From Teen to Trendsetter.md deleted file mode 100644 index d1a1756c7176a5be12afd406b4b347ebfe2e8422..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/KJ Apas Style Evolution From Teen to Trendsetter.md +++ /dev/null @@ -1,83 +0,0 @@ - -

KJ Apa: The Rising Star of Riverdale and Beyond

-

If you are a fan of teen dramas, you probably know KJ Apa as the handsome and charismatic Archie Andrews in the hit series Riverdale. But did you know that he is also a talented singer, musician, and producer? Or that he has a rich cultural background and a passion for social causes? In this article, we will explore the life and career of KJ Apa, one of the most promising stars of his generation.

-

Who is KJ Apa?

-

KJ Apa is a New Zealand actor, singer and musician who has gained international fame for his roles in Riverdale, A Dog's Purpose, The Hate U Give, I Still Believe, and Songbird. He has received various accolades, including a Saturn Award and nominations for an MTV Movie & TV Award, two People's Choice Awards and two Teen Choice Awards. He was also appointed the matai title of Savae chief of his village Moata'a in September 2022.

-

kj apa


Download Zip ✵✵✵ https://urlca.com/2uO6Cd



-

Early life and family background

-

KJ Apa was born on 17 June 1997 at Waitakere Hospital in Auckland, New Zealand. His full name is Savae Keneti James Fitzgerald Apa. His father, Tupa'i, is Samoan and a matai (chief) of his village in Samoa; his mother, Tessa, is a European New Zealander. He has two older sisters and is the nephew of former rugby union player and coach Michael Jones. He attended high school at King's College in Auckland before beginning his acting career.

-

Career beginnings and breakthrough

-

From 2013 to 2015, Apa starred as Kane Jenkins in the New Zealand prime-time soap opera Shortland Street. In 2016, he was cast as Archie Andrews in the CW drama series Riverdale, after a four-month worldwide talent search. Riverdale is based on the characters from the Archie Comics and follows the lives of a group of teenagers in a small town with dark secrets. The show has been a huge success, attracting millions of viewers and fans around the world. Apa's portrayal of Archie, a loyal, brave, and conflicted young man who struggles with his love life, his family issues, and his musical aspirations, has earned him praise from critics and audiences alike.

-

kj apa riverdale
-kj apa age
-kj apa girlfriend
-kj apa movies
-kj apa net worth
-kj apa height
-kj apa instagram
-kj apa tiktok
-kj apa baby
-kj apa and clara berry
-kj apa hair color
-kj apa ethnicity
-kj apa tattoos
-kj apa music
-kj apa clocks album
-kj apa i still believe
-kj apa the hate u give
-kj apa songbird
-kj apa dead reckoning
-kj apa a dog's purpose
-kj apa shortland street
-kj apa interview
-kj apa workout
-kj apa injury
-kj apa guitar
-kj apa family
-kj apa samoa
-kj apa uncle michael jones
-kj apa zodiac sign
-kj apa snapchat
-kj apa twitter
-kj apa youtube channel
-kj apa awards and nominations
-kj apa fun facts
-kj apa quotes
-kj apa style
-kj apa beard
-kj apa photoshoots
-kj apa fan mail address
-kj apa merchandise.

-

Recent and upcoming projects

-

In addition to Riverdale, which is currently in its seventh season, Apa has also starred in several films in recent years. In 2017, he played Ethan Montgomery in the comedy-drama film A Dog's Purpose, which follows the reincarnations of a dog who tries to find his purpose in life. In 2018, he replaced Kian Lawley in the drama film The Hate U Give, which is based on the novel by Angie Thomas and deals with racism, police brutality, and social justice. In 2019, he starred as Griffin in the Netflix film The Last Summer, which follows a group of friends who spend their last summer together before college. In 2020, he played singer Jeremy Camp in the biographical romantic drama film I Still Believe, which tells the story of Camp's first marriage to Melissa Lynn Henning-Camp who died of ovarian cancer shortly after they married. He also starred as Nico in the thriller film Songbird, which is set during a pandemic lockdown in Los Angeles.

-

Apa has several upcoming projects in the works.

He has several upcoming projects in the works. He will star as a dishonorably discharged soldier who pursues his dream of racing motorcycles in the film One Fast Move. He will also play one of the Wonder Twins, a pair of alien siblings with superpowers, in the HBO Max film Wonder Twins, directed by Adam Sztykiel. He is also set to appear in a military drama called West Pointer, produced by Lionsgate Films.

-

What makes KJ Apa stand out?

-

KJ Apa is not just a handsome face and a talented actor. He also has many other qualities and attributes that make him unique and admirable. Here are some of them:

-

His diverse talents and skills

-

KJ Apa is a multi-talented artist who can sing, play guitar, piano, and drums. He has showcased his musical abilities in several of his films and TV shows, such as Riverdale, I Still Believe, and The Last Summer. He also released his debut solo album, Clocks, in 2021, which explores the folk rock and indie genres. He wrote and produced all the songs on the album, which he described as his "living room journal" on Instagram. He has also performed live with Kygo at Coachella and other tour dates in 2018.

-

His cultural heritage and values

-

KJ Apa is proud of his Samoan heritage and values. His father is a matai (chief) of his village Moata'a in Samoa, and KJ was bestowed the same title of Savae in 2022, after a traditional ceremony. He said that his goal is to serve his family and his village. He also speaks Samoan fluently and has a tattoo on his right shoulder that represents his family name and history. He often visits Samoa to see his relatives and connect with his roots. He also respects and appreciates other cultures, as he has dated women from different backgrounds, such as French model Clara Berry, with whom he has a son named Sasha.

-

His personal life and interests

-

KJ Apa is a down-to-earth and fun-loving person who enjoys spending time with his friends, family, and pets. He has two older sisters, Timena and Ari, who are also actors and models. He is close to his Riverdale co-stars, especially Cole Sprouse, Camila Mendes, Lili Reinhart, and Charles Melton. He likes to joke around with them on social media and behind the scenes. He also loves animals and has two dogs named Penny and Rosie. He often posts pictures and videos of them on his Instagram account. He is also interested in sports, especially rugby, soccer, boxing, and surfing. He likes to keep fit and healthy by working out regularly.

-

Why should you follow KJ Apa?

-

KJ Apa is not only a talented and successful actor, but also a role model for young people. Here are some reasons why you should follow him:

-

He is a role model for young people

-

KJ Apa has shown that he is a hard-working and dedicated professional who strives to improve his craft and challenge himself with different roles. He has also shown that he is a humble and grateful person who does not take his fame for granted. He has said that he feels blessed to have the opportunities that he has had and that he wants to use his platform for good causes. He has also spoken openly about his faith as a Christian and how it influences his life choices. He has said that he wants to inspire young people to pursue their dreams and passions.

-

He is a social media sensation

-

KJ Apa has a huge fan base on social media, with over 18 million followers on Instagram and over 2 million followers on Twitter. He often interacts with his fans by posting updates on his projects, personal life, hobbies, opinions, and humor. He also does live streams, Q&A sessions, challenges, pranks, and collaborations with other celebrities and influencers. He is not afraid to be himself and express his personality on social media. He also uses social media to raise awareness about important issues such as racism, police brutality, social justice, mental health, climate change, animal rights, and more.

-

He is a philanthropist and activist

-

KJ Apa is involved in various charitable causes and organizations that aim to make a positive difference in the world. He has supported causes such as UNICEF New Zealand, World Vision New Zealand[^14^

He is a philanthropist and activist

-

KJ Apa is involved in various charitable causes and organizations that aim to make a positive difference in the world. He has supported causes such as UNICEF New Zealand, World Vision New Zealand, and the ASPCA. He has also participated in events such as WE Day, a youth empowerment movement that celebrates social change, and Stand Up to Cancer, a telethon that raises funds for cancer research. He has also spoken out about issues such as racism, police brutality, social justice, mental health, climate change, animal rights, and more. He has said that he wants to use his voice and influence to spread awareness and inspire action.

-

Conclusion

-

KJ Apa is a rising star who has proven himself as a versatile and talented actor, singer, and musician. He has also shown that he is a humble, grateful, and generous person who values his family, friends, culture, and faith. He is a role model for young people who want to pursue their dreams and passions, as well as a social media sensation who engages with his fans and supports various causes. He is a person who deserves admiration and respect for his achievements and contributions. If you are not following him yet, you should start now!

-

FAQs

-

Here are some frequently asked questions about KJ Apa:

-
    -
  • What does KJ stand for? KJ stands for Keneti James, which are his middle names. His first name is Savae, which means "blessed" in Samoan.
  • -
  • How old is KJ Apa? KJ Apa was born on 17 June 1997, which makes him 26 years old as of 2023.
  • -
  • Is KJ Apa married? No, KJ Apa is not married, but he is in a relationship with French model Clara Berry, with whom he has a son named Sasha.
  • -
  • How much is KJ Apa worth? According to Celebrity Net Worth, KJ Apa has an estimated net worth of $3 million as of 2021.
  • -
  • Where can I watch KJ Apa's movies and TV shows? You can watch KJ Apa's movies and TV shows on various streaming platforms such as Netflix, Hulu, Amazon Prime Video, Disney+, HBO Max, and more.
  • -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Subway Surfers Mega Mod APK How to Unlock All Characters Boards and Locations with MOD YOLO.md b/spaces/congsaPfin/Manga-OCR/logs/Subway Surfers Mega Mod APK How to Unlock All Characters Boards and Locations with MOD YOLO.md deleted file mode 100644 index d8bfa371af8e45748c01e4b0e089c279315fa04b..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Subway Surfers Mega Mod APK How to Unlock All Characters Boards and Locations with MOD YOLO.md +++ /dev/null @@ -1,141 +0,0 @@ - -

Subway Surfers Mod APK Mod Yolo: How to Download and Play the Ultimate Endless Runner Game

-

Do you love playing endless runner games on your mobile device? If yes, then you must have heard of Subway Surfers, one of the most popular and addictive games in this genre. But did you know that there is a way to make this game even more fun and exciting? Yes, we are talking about Subway Surfers Mod APK Mod Yolo, a modified version of the game that gives you unlimited everything and a mega menu. In this article, we will tell you everything you need to know about Subway Surfers Mod APK Mod Yolo, including how to download and install it, how to play it, and what are the pros and cons of using it. So, let's get started!

-

What is Subway Surfers?

-

Subway Surfers is a 3D endless runner game developed by SYBO Games and Kiloo. It was released in 2012 and has since become one of the most downloaded games on Google Play Store and App Store. The game has over 1 billion downloads and has received many positive reviews from critics and players alike.

-

subway surfers mod apk mod yolo


Download Zip ->>> https://urlca.com/2uOeqO



-

The gameplay of Subway Surfers

-

The gameplay of Subway Surfers is simple yet addictive. You control a character who is running away from the police on a subway track. You have to swipe left or right to change lanes, swipe up to jump over obstacles, swipe down to slide under barriers, and tap to use power-ups. You have to avoid crashing into trains, signs, barriers, or other objects, or else you will be caught by the police. You can also collect coins, keys, hoverboards, jetpacks, magnets, and other items along the way to boost your score and unlock new characters and outfits.

-

The features of Subway Surfers

-

Subway Surfers has many features that make it an enjoyable game for all ages. Some of these features are:

-
    -
  • Colorful and vivid graphics that create a lively atmosphere.
  • -
  • Smooth and responsive controls that make the game easy to play.
  • -
  • Diverse and dynamic environments that change every month based on different cities around the world.
  • -
  • A variety of characters and outfits that you can customize according to your preference.
  • -
  • A leaderboard that lets you compete with your friends and other players around the world.
  • -
  • Achievements and missions that challenge you to complete different tasks and earn rewards.
  • -
-

What is Subway Surfers Mod APK Mod Yolo?

-

Subway Surfers Mod APK Mod Yolo is a modified version of Subway Surfers that gives you unlimited access to everything in the game. It also adds a mega menu that lets you customize your gameplay according to your liking. With Subway Surfers Mod APK Mod Yolo, you can enjoy the game without any limitations or restrictions.

-

The benefits of Subway Surfers Mod APK Mod Yolo

-

Some of the benefits of using Subway Surfers Mod APK Mod Yolo are:

-
    -
  • You get unlimited coins, keys, hoverboards, jetpacks, magnets, and other items that you can use to enhance your gameplay.
  • -
  • You get unlimited lives, so you don't have to worry about running out of chances or waiting for a timer to refill.
  • -
  • You get unlimited access to all the characters and outfits, so you can choose your favorite ones without spending any money.
  • -
  • You get a mega menu that lets you modify your speed, score, multiplier, coins, keys, and other aspects of the game.
  • -
  • You get to enjoy the game without any ads or pop-ups that might interrupt your experience.
  • -
-

The drawbacks of Subway Surfers Mod APK Mod Yolo

-

Some of the drawbacks of using Subway Surfers Mod APK Mod Yolo are:

-
    -
  • You might face some compatibility issues or bugs while playing the game, as it is not an official version.
  • -
  • You might lose your progress or data if you uninstall the game or switch to another device, as it is not connected to your Google Play account.
  • -
  • You might get banned or suspended from the game if you use the modded version online, as it is against the terms and conditions of the game.
  • -
  • You might miss out on some of the fun and challenge of playing the game normally, as it becomes too easy and boring with the modded version.
  • -
-

How to download and install Subway Surfers Mod APK Mod Yolo?

-

If you want to try out Subway Surfers Mod APK Mod Yolo, you need to download and install it on your device. Here are the steps to do so:

-

The steps to download and install Subway Surfers Mod APK Mod Yolo

-
    -
  1. Go to a trusted website that provides the link to download Subway Surfers Mod APK Mod Yolo. For example, you can use this link: .
  2. -
  3. Click on the download button and wait for the file to be downloaded on your device.
  4. -
  5. Go to your device settings and enable the option to install apps from unknown sources. This will allow you to install the modded version of the game.
  6. -
  7. Locate the downloaded file on your device and tap on it to start the installation process.
  8. -
  9. Follow the instructions on the screen and wait for the installation to be completed.
  10. -
  11. Launch the game and enjoy Subway Surfers Mod APK Mod Yolo!
  12. -
-

The precautions to take before downloading and installing Subway Surfers Mod APK Mod Yolo

-

Before you download and install Subway Surfers Mod APK Mod Yolo, you should take some precautions to avoid any problems or risks. Some of these precautions are:

-

subway surfers mod apk unlimited coins and keys
-subway surfers mod apk latest version download
-subway surfers mod apk hack all characters
-subway surfers mod apk free shopping
-subway surfers mod apk unlimited everything
-subway surfers mod apk no ads
-subway surfers mod apk all world tour
-subway surfers mod apk mega mod
-subway surfers mod apk offline play
-subway surfers mod apk high score
-subway surfers mod apk android 1
-subway surfers mod apk revdl
-subway surfers mod apk rexdl
-subway surfers mod apk happymod
-subway surfers mod apk an1
-subway surfers mod apk 2023 update
-subway surfers mod apk new york
-subway surfers mod apk paris
-subway surfers mod apk london
-subway surfers mod apk tokyo
-subway surfers mod apk mumbai
-subway surfers mod apk rio de janeiro
-subway surfers mod apk sydney
-subway surfers mod apk moscow
-subway surfers mod apk beijing
-subway surfers mod apk cairo
-subway surfers mod apk rome
-subway surfers mod apk berlin
-subway surfers mod apk seoul
-subway surfers mod apk hawaii
-subway surfers mod apk las vegas
-subway surfers mod apk san francisco
-subway surfers mod apk arabia
-subway surfers mod apk transylvania
-subway surfers mod apk bangkok
-subway surfers mod apk peru
-subway surfers mod apk iceland
-subway surfers mod apk singapore
-subway surfers mod apk monaco
-subway surfers mod apk copenhagen
-subway surfers mod apk miami
-subway surfers mod apk barcelona
-subway surfers mod apk venice beach
-subway surfers mod apk buenos aires
-subway surfers mod apk hong kong
-subway surfers mod apk atlanta
-subway surfers mod apk zurich
-subway surfers mod apk amsterdam

-
    -
  • Make sure you have enough space on your device to store the modded version of the game, as it might be larger than the original version.
  • -
  • Make sure you have a stable internet connection to download the file without any interruptions or errors.
  • -
  • Make sure you have a backup of your data and progress in case something goes wrong during the installation or after playing the game.
  • -
  • Make sure you do not use the modded version online or with other players, as it might get you banned or suspended from the game.
  • -
-

How to play Subway Surfers Mod APK Mod Yolo?

-

Playing Subway Surfers Mod APK Mod Yolo is similar to playing the original version of Subway Surfers, except that you have unlimited everything and a mega menu. Here are some tips and tricks to play Subway Surfers Mod APK Mod Yolo:

-

The tips and tricks to play Subway Surfers Mod APK Mod Yolo

-
    -
  • Use the mega menu to customize your gameplay according to your preference. You can change your speed, score, multiplier, coins, keys, and other aspects of the game. You can also enable or disable some features like hoverboards, jetpacks, magnets, etc.
  • -
  • Use the unlimited coins and keys to buy and upgrade anything you want in the game. You can unlock all the characters and outfits, buy more hoverboards and power-ups, and increase your chances of escaping from the police.
  • -
  • Use the unlimited lives to play as long as you want without worrying about getting caught or crashing. You can also use them to revive yourself if you make a mistake or want to continue playing.
  • -
  • Use the unlimited hoverboards and jetpacks to fly over obstacles and trains, and collect more coins and items. You can also use them to avoid getting caught by the police or hitting a barrier.
  • -
  • Use the unlimited magnets to attract all the coins and items around you without having to swipe or tap. You can also use them to increase your score and multiplier.
  • -
-

The challenges and rewards to play Subway Surfers Mod APK Mod Yolo

-

Playing Subway Surfers Mod APK Mod Yolo might seem easy and boring at first, as you have unlimited everything and a mega menu. However, you can still challenge yourself and have fun by setting your own goals and rewards. Some of these challenges and rewards are:

-
    -
  • Try to beat your own high score or the high scores of your friends and other players on the leaderboard. You can also try to achieve the highest possible multiplier and collect the most coins and items in one run.
  • -
  • Try to complete all the achievements and missions in the game. You can also try to collect all the trophies and badges that are hidden in different locations.
  • -
  • Try to explore all the different environments and cities that are featured in the game. You can also try to find all the easter eggs and secrets that are hidden in the game.
  • -
  • Try to customize your character and outfit according to your mood and style. You can also try to mix and match different items and accessories to create your own unique look.
  • -
  • Try to have fun and enjoy the game without worrying about anything. You can also try to share your screenshots and videos of your gameplay with your friends and other players on social media.
  • -
-

Conclusion

-

Subway Surfers Mod APK Mod Yolo is a modified version of Subway Surfers that gives you unlimited everything and a mega menu. It is a great way to enjoy the game without any limitations or restrictions. However, it also has some drawbacks and risks that you should be aware of before using it. In this article, we have explained everything you need to know about Subway Surfers Mod APK Mod Yolo, including how to download and install it, how to play it, and what are the pros and cons of using it. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

-

FAQs

-

Here are some of the frequently asked questions about Subway Surfers Mod APK Mod Yolo:

-
    -
  1. Is Subway Surfers Mod APK Mod Yolo safe to use?
  2. -

    Subway Surfers Mod APK Mod Yolo is not an official version of Subway Surfers, so it might not be safe to use. It might contain viruses, malware, or spyware that could harm your device or steal your personal information. It might also cause compatibility issues or bugs while playing the game. Therefore, you should use it at your own risk and discretion.

    -
  3. Is Subway Surfers Mod APK Mod Yolo legal to use?
  4. -

    Subway Surfers Mod APK Mod Yolo is not legal to use, as it violates the terms and conditions of Subway Surfers. It also infringes the intellectual property rights of SYBO Games and Kiloo, the developers of Subway Surfers. Therefore, you might face legal consequences if you use it online or with other players.

    -
  5. Can I play Subway Surfers Mod APK Mod Yolo offline?
  6. -

    Yes, you can play Subway Surfers Mod APK Mod Yolo offline, as it does not require an internet connection to run. However, you might miss out on some of the features and updates that are available online.

    -
  7. Can I play Subway Surfers Mod APK Mod Yolo with my friends?
  8. -

    No, you cannot play Subway Surfers Mod APK Mod Yolo with your friends, as it is not compatible with the original version of Subway Surfers. If you try to play it online or with other players, you might get banned or suspended from the game.

    -
  9. Can I update Subway Surfers Mod APK Mod Yolo?
  10. -

    No, you cannot update Subway Surfers Mod APK Mod Yolo, as it is not connected to Google Play Store or App Store. If you want to get the latest version of Subway Surfers, you have to download and install it from the official sources.

    -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Tekken 3 MOD APK 2023 Free Download Unlimited Money No Ads.md b/spaces/congsaPfin/Manga-OCR/logs/Tekken 3 MOD APK 2023 Free Download Unlimited Money No Ads.md deleted file mode 100644 index 0070cc3645671397bbe2b78469cbdb21ccfcdb4f..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Tekken 3 MOD APK 2023 Free Download Unlimited Money No Ads.md +++ /dev/null @@ -1,136 +0,0 @@ - -

How to Download Tekken 3 Mod Apk

-

Tekken 3 is one of the most popular and classic fighting games of all time. It was released in 1997 for the arcade and PlayStation, and it has been praised for its gameplay, graphics, sound, and characters. However, if you want to enjoy this game on your android device with some extra features and enhancements, you should try tekken 3 mod apk.

-

how to download tekken 3 mod apk


Downloadhttps://urlca.com/2uOeE2



-

Tekken 3 mod apk is a modified version of the original game that adds unlimited money, coins, characters, levels, modes, and more. It also removes ads and improves the performance and compatibility of the game. With tekken 3 mod apk, you can have the ultimate fighting experience on your android device.

-

In this article, we will show you what are the main features of tekken 3 mod apk, how to play the different modes, how to unlock the characters, and how to install it on your android device. We will also share some tips and tricks to help you master the game. So, let's get started.

-

Features of Tekken 3 Mod Apk

-

Tekken 3 mod apk has many features that make it better than the original game. Here are some of them:

-
    -
  • All characters unlocked: You can choose from 23 different characters, each with their own unique moves, combos, and styles. You don't have to beat the game with each character to unlock them, as they are all available from the start.
  • -
  • All levels unlocked: You can play on any stage you want, without having to complete the previous ones. You can also change the difficulty level according to your preference.
  • -
  • Unlimited money and coins: You can buy anything you want from the shop, such as costumes, items, power-ups, etc. You don't have to worry about running out of money or coins, as they are unlimited in tekken 3 mod apk.
  • -
  • No ads: You can enjoy the game without any interruptions or distractions from annoying ads. Tekken 3 mod apk is ad-free and lets you focus on the action.
  • -
  • Better performance and compatibility: Tekken 3 mod apk runs smoothly and fast on any android device. It also supports different screen sizes and resolutions. You don't need a high-end device or a lot of storage space to play tekken 3 mod apk.
  • -
-

Modes of Tekken 3 Mod Apk

-

Tekken 3 mod apk has various modes that offer different challenges and fun. Here are some of them:

-
    -
  • Arcade Mode: This is the classic mode where you fight against different opponents in a series of matches. You can choose your character and difficulty level before starting. You can also save your progress and resume later.
  • -
  • Versus Mode: This is the mode where you can play against another player on the same device. You can choose your character and stage before starting. You can also adjust the time limit and rounds per match.
  • -
  • Team Battle Mode: This is the mode where you can form a team of up to eight characters and compete against another team. You can choose your team members and order before starting. The team with the last surviving member wins.
  • -
  • Time Attack Mode: This is the mode where you have to beat as many opponents as possible within a limited time. You can choose your character before starting. The faster you finish, the higher your score.
  • -
  • Survival Mode: This is the mode where you have to survive as long as possible against endless opponents with a single health bar. You can choose your character before starting. The longer you last, the higher your score.
  • -
  • Practice Mode: This is the mode where you can practice your moves, combos, and skills with any character and stage. You can also adjust the settings such as the opponent's behavior, health, and damage.
  • -
-

Characters of Tekken 3 Mod Apk

-

Tekken 3 mod apk has a diverse and colorful roster of characters, each with their own backstory, personality, and fighting style. Here are some of them:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameOriginStyle
Jin KazamaJapanKarate
Ling XiaoyuChinaMartial arts
HwoarangKoreaTaekwondo
Eddy GordoBrazilCapoeira
Nina WilliamsIrelandAikido and Koppo
Paul PhoenixUSAJudo and Karate
King IIMexicoWrestling and Lucha Libre
YoshimitsuJapanNinjutsu and Swordsmanship
...and many more!
-

You can unlock all the characters in tekken 3 mod apk by simply downloading and installing the game. You don't have to complete any challenges or missions to access them.

-

Tips and Tricks for Tekken 3 Mod Apk

-

Tekken 3 mod apk is a fun and addictive game, but it can also be challenging and frustrating at times. Here are some tips and tricks to help you improve your skills and enjoy the game more:

-

How to download tekken 3 mod apk with unlimited money and coins
-How to download tekken 3 mod apk for android without root
-How to download tekken 3 mod apk latest version 2023
-How to download tekken 3 mod apk with all characters unlocked
-How to download tekken 3 mod apk offline mode
-How to download tekken 3 mod apk from apkbounce.net[^1^]
-How to download tekken 3 mod apk with no ads and malware
-How to download tekken 3 mod apk for free on any device
-How to download tekken 3 mod apk with high graphics and sound quality
-How to download tekken 3 mod apk with easy installation and setup
-How to download tekken 3 mod apk with cheats and hacks
-How to download tekken 3 mod apk with multiplayer mode and online features
-How to download tekken 3 mod apk with custom skins and costumes
-How to download tekken 3 mod apk with new levels and stages
-How to download tekken 3 mod apk with bonus content and rewards
-How to download tekken 3 mod apk with original storyline and gameplay
-How to download tekken 3 mod apk with support for different languages and regions
-How to download tekken 3 mod apk with smooth performance and compatibility
-How to download tekken 3 mod apk with regular updates and bug fixes
-How to download tekken 3 mod apk with user reviews and ratings
-How to download tekken 3 mod apk with tips and tricks for beginners
-How to download tekken 3 mod apk with advanced features and settings
-How to download tekken 3 mod apk with best fighting moves and combos
-How to download tekken 3 mod apk with fun challenges and missions
-How to download tekken 3 mod apk with special effects and animations

-
    -
  • Learn the basics: Before you jump into the action, make sure you know the basic controls, moves, and combos of your character. You can use the practice mode to familiarize yourself with them. You can also check the move list in the pause menu for reference.
  • -
  • Use the block button: Blocking is an essential skill in tekken 3 mod apk, as it can prevent you from taking damage and create openings for counterattacks. You can block by pressing the block button or by holding back on the directional pad. You can also perform low blocks by holding down and back on the directional pad.
  • -
  • Mix up your attacks: Don't be predictable in your attacks, as your opponent can easily read and dodge them. Try to vary your attacks by using different moves, combos, and directions. You can also use throws, grabs, and special moves to surprise your opponent.
  • -
  • Dodge and sidestep: Sometimes, blocking is not enough to avoid damage. You can also dodge and sidestep by pressing up or down on the directional pad. This can help you evade incoming attacks and reposition yourself for a better angle.
  • -
  • Use power-ups: In tekken 3 mod apk, you can find various power-ups that can boost your performance in the game. For example, you can find health packs that can restore your health, coins that can increase your money, and items that can give you special effects. You can use these power-ups by tapping on them on the screen.
  • -
  • Have fun: The most important tip is to have fun while playing tekken 3 mod apk. Don't get frustrated or angry if you lose or make mistakes. Instead, learn from them and try again. Remember that tekken 3 mod apk is a game meant to entertain you and not to stress you out.
  • -

    How to Install Tekken 3 Mod Apk on Your Android Device

    -

    If you want to play tekken 3 mod apk on your android device, you need to follow these simple steps:

    -
      -
    1. Download tekken 3 mod apk file: You can download tekken 3 mod apk file from any reliable source on the internet. Make sure you download the latest version of the file that is compatible with your device.
    2. -
    3. Enable unknown sources: You need to enable unknown sources on your device to install tekken 3 mod apk file. You can do this by going to your device settings, security, and then toggle on the unknown sources option.
    4. -
    5. Install tekken 3 mod apk file: You need to locate the tekken 3 mod apk file on your device and tap on it to start the installation process. You may need to grant some permissions and accept some terms and conditions before proceeding. Wait for the installation to finish.
    6. -
    7. Launch tekken 3 mod apk: You can now launch tekken 3 mod apk from your device's app drawer or home screen. You can also create a shortcut for easy access. Enjoy the game!
    8. -
    -

    Conclusion

    -

    Tekken 3 mod apk is a great way to enjoy one of the best fighting games ever made on your android device. It has many features, modes, characters, and enhancements that make it better than the original game. It is also easy to install and play, and it is free of ads and bugs.

    -

    If you are a fan of tekken 3 or fighting games in general, you should definitely download tekken 3 mod apk and give it a try. You will not regret it. It is a game that will keep you entertained and challenged for hours.

    -

    So, what are you waiting for? Download tekken 3 mod apk now and unleash your inner fighter!

    -

    FAQs

    -

    Here are some frequently asked questions about tekken 3 mod apk:

    -

    Is tekken 3 mod apk safe to download and install?

    -

    Yes, tekken 3 mod apk is safe to download and install, as long as you get it from a reliable source. It does not contain any viruses, malware, or spyware that can harm your device or data. However, you should always scan any file before installing it on your device, just to be sure.

    -

    Is tekken 3 mod apk legal to use?

    -

    Tekken 3 mod apk is a modified version of the original game that is not authorized or endorsed by the developers or publishers of the game. Therefore, it may violate some copyrights or trademarks of the game. However, as long as you use it for personal and non-commercial purposes, you should not face any legal issues.

    -

    Does tekken 3 mod apk require an internet connection?

    -

    No, tekken 3 mod apk does not require an internet connection to play. You can play it offline without any problem. However, you may need an internet connection to download and install the game, as well as to access some online features such as leaderboards and achievements.

    -

    Can I play tekken 3 mod apk with my friends?

    -

    Yes, you can play tekken 3 mod apk with your friends on the same device using the versus mode. You can also play with other players online using the multiplayer mode. However, you need an internet connection and a compatible device to use these modes.

    -

    How can I update tekken 3 mod apk?

    -

    You can update tekken 3 mod apk by downloading and installing the latest version of the file from the same source you got it from. You may need to uninstall the previous version before installing the new one. You can also check for updates within the game settings.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/The Best Way to Install Summertime Saga APK on Android A Step-by-Step Tutorial.md b/spaces/congsaPfin/Manga-OCR/logs/The Best Way to Install Summertime Saga APK on Android A Step-by-Step Tutorial.md deleted file mode 100644 index 79ee94e1431fd20972bcf67982be0d7f87be3b0d..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/The Best Way to Install Summertime Saga APK on Android A Step-by-Step Tutorial.md +++ /dev/null @@ -1,120 +0,0 @@ -
    -

    Summertime Saga APK: How to Install on Android

    -

    If you are looking for a fun and engaging graphical adventure game for adults, you might want to check out Summertime Saga APK. This is a game funded by Patreon that has become very popular among fans of the genre. In this game, you play as a young man who is trying to cope with the death of his father, while also dealing with school, romance, and mystery. You can explore different locations, interact with various characters, and make choices that affect the outcome of the story.

    -

    summertime saga apk how to install


    Download File ✪✪✪ https://urlca.com/2uO5n9



    -

    However, Summertime Saga is not available on the Google Play Store, so you will need to install it manually using an APK file. An APK file is a package that contains all the files and data needed to run an Android app. In this article, we will show you how to download and install Summertime Saga APK on your Android device in different ways. We will also discuss the benefits and risks of installing this game, and answer some frequently asked questions.

    -

    What is an APK file and how to download it

    -

    An APK file stands for Android Package Kit, and it is a format used by Android to distribute and install apps. Unlike regular apps that you can download from the Google Play Store, APK files are not verified by Google, so they can come from any source. This means that you can find apps that are not available on the official store, or get updates before they are rolled out to everyone.

    -

    However, this also means that you have to be careful about what APK files you download, as some of them might contain malware or viruses that can harm your device. You should only download APK files from reputable sources, such as APK Mirror , which is a site that monitors the files it hosts and verifies that they are safe. You can browse the site and download any APK file you want, but make sure you read the reviews and ratings before doing so.

    -

    How to enable unknown sources on your Android device

    -

    Before you can install an APK file on your Android device, you need to allow your device to install apps from unknown sources. This is a security feature that prevents unauthorized apps from accessing your device. To do this, follow these steps:

    -
      -
    1. Go to your device settings and tap Apps & Notifications (or Apps in older versions of Android).
    2. -
    3. Tap the three dots in the upper-right corner.
    4. -
    5. Tap Special access.
    6. -
    7. Tap Install unknown apps.
    8. -
    9. Tap Chrome (or whichever web browser you use).
    10. -
    11. Move Allow from this source to the On position.
    12. -
    -

    You can also choose which other apps are allowed to install unknown sources from this menu, but be careful not to grant this permission to any suspicious apps.

    -

    summertime saga apk download for android
    -how to play summertime saga on android
    -summertime saga apk mod latest version
    -summertime saga apk installation guide
    -summertime saga apk file free download
    -how to enable unknown sources for summertime saga apk
    -summertime saga apk gameplay and features
    -summertime saga apk update 2023
    -summertime saga apk download link
    -how to install summertime saga apk on pc
    -summertime saga apk walkthrough and tips
    -summertime saga apk size and requirements
    -summertime saga apk offline mode
    -how to uninstall summertime saga apk
    -summertime saga apk cheats and hacks
    -summertime saga apk review and rating
    -summertime saga apk problems and solutions
    -how to backup summertime saga apk data
    -summertime saga apk alternatives and similar games
    -how to get summertime saga apk premium access
    -summertime saga apk characters and stories
    -how to transfer summertime saga apk to sd card
    -summertime saga apk download without ads
    -how to stream summertime saga apk online
    -summertime saga apk best endings and routes
    -how to change language in summertime saga apk
    -summertime saga apk secrets and easter eggs
    -how to join summertime saga apk community
    -summertime saga apk fan art and wallpapers
    -how to support summertime saga apk developers
    -summertime saga apk bugs and glitches
    -how to speed up summertime saga apk loading time
    -summertime saga apk trivia and facts
    -how to customize summertime saga apk settings
    -summertime saga apk achievements and rewards
    -how to skip scenes in summertime saga apk
    -summertime saga apk compatibility and optimization
    -how to pause and resume summertime saga apk progress
    -summertime saga apk frequently asked questions (FAQ)
    -how to contact summertime saga apk customer service

    -

    How to install Summertime Saga APK using your browser

    -

    The easiest way to install Summertime Saga APK on your Android device is to download it using your browser. Here's how:

    -
      -
    1. Open Chrome (or whichever web browser you use) and go to the APK Mirror website.
    2. -
    3. Search for Summertime Saga APK and select the latest version.
    4. -
    5. Tap Download APK and confirm your choice.
    6. -
    7. Wait for the download to finish and tap Open.
    8. -
    9. Tap Install and wait for the installation to complete.
    10. -
    11. Tap Done and enjoy playing Summertime Saga.
    12. -
    -

    You can also find the APK file in your Downloads folder and install it from there.

    -

    How to install Summertime Saga APK using your computer

    -

    If you prefer to download the APK file on your computer and then transfer it to your Android device, you can do that as well. Here's how:

    -
      -
    1. Go to the APK Mirror website on your computer and search for Summertime Saga APK.
    2. -
    3. Select the latest version and click Download APK.
    4. -
    5. Save the file to a location you can easily access, such as your desktop.
    6. -
    7. Connect your Android device to your computer using a USB cable.
    8. -
    9. On your device, tap Allow when prompted to grant access to your files.
    10. -
    11. On your computer, open the folder where you saved the APK file and drag it to your device's storage.
    12. -
    13. Disconnect your device from your computer.
    14. -
    15. On your device, open a file manager app, such as Files by Google or ES File Explorer.
    16. -
    17. Navigate to the folder where you copied the APK file and tap on it.
    18. -
    19. Tap Install and wait for the installation to complete.
    20. -
    21. Tap Done and enjoy playing Summertime Saga.
    22. -
    -

    How to install Summertime Saga APK using an APK installer app

    -

    Another way to install Summertime Saga APK on your Android device is to use an app that can scan and install the APK files on your device. One such app is SAI (Split APKs Installer), which is available on the Google Play Store. Here's how to use it:

    -
      -
    1. Download and install SAI from the Google Play Store.
    2. -
    3. Open SAI and grant it permission to access your files.
    4. -
    5. Tap Install APKs and select the folder where you downloaded or copied the Summertime Saga APK file.
    6. -
    7. Select the file and tap Select.
    8. -
    9. Tap Install and wait for the installation to complete.
    10. -
    11. Tap Done and enjoy playing Summertime Saga.
    12. -
    -

    What are the benefits and risks of installing Summertime Saga APK

    -

    Installing Summertime Saga APK on your Android device can have some benefits and risks that you should be aware of. Here are some of them:

    - - - - - - -
    BenefitsRisks
    You can play a game that is not available on the Google Play StoreYou might download a fake or malicious APK file that can harm your device or steal your data
    You can get updates faster than waiting for them to be released on the official siteYou might violate the terms of service of the game developer or publisher
    You can enjoy a game that has high-quality graphics, sound, and gameplayYou might encounter bugs or glitches that can affect your gaming experience
    You can choose from different storylines, characters, and endingsYou might expose yourself to inappropriate or offensive content that is not suitable for minors
    -

    As you can see, installing Summertime Saga APK has its pros and cons, so you should weigh them carefully before deciding whether to do it or not. If you do decide to install it, make sure you follow the instructions above and only download it from trusted sources. Also, be respectful of the game developer's work and support them if you can by donating or sharing their game with others.

    -

    Conclusion

    -

    In this article, we have shown you how to install Summertime Saga APK on your Android device in different ways. We have also discussed what an APK file is, how to enable unknown sources on your device, what are the benefits and risks of installing Summertime Saga APK, and answered some frequently asked questions. We hope you found this article helpful and informative, and that you enjoy playing Summertime Saga on your Android device. If you have any questions or feedback, feel free to leave a comment below. Happy gaming!

    -

    FAQs

    -

    What is Summertime Saga?

    -

    Summertime Saga is a graphical adventure game for adults that is funded by Patreon. It has a rich and immersive story, with over 65 characters to meet and interact with. You can choose from different paths and endings, depending on your choices and actions. The game also features mini-games, puzzles, and secrets to discover.

    -

    Is Summertime Saga APK safe to install?

    -

    Summertime Saga APK is safe to install if you download it from a reputable source, such as APK Mirror. However, you should always be careful about what APK files you install on your device, as some of them might contain malware or viruses that can harm your device or steal your data. You should also scan the APK file with an antivirus app before installing it, and avoid granting unnecessary permissions to the app.

    -

    Is Summertime Saga APK legal to install?

    -

    Summertime Saga APK is legal to install if you follow the terms of service of the game developer and publisher. The game is free to download and play, but you can support the developer by donating or becoming a patron on Patreon. You should also not distribute or modify the game without the developer's permission, or use it for any illegal or immoral purposes.

    -

    How do I update Summertime Saga APK?

    -

    To update Summertime Saga APK, you need to download and install the latest version of the APK file from the same source you used before. You can check for updates on the official site of the game or on the APK Mirror website. You should also backup your game data before updating, as some updates might erase your progress or cause compatibility issues.

    -

    How do I uninstall Summertime Saga APK?

    -

    To uninstall Summertime Saga APK, you need to go to your device settings and tap Apps & Notifications (or Apps in older versions of Android). Then, find and tap Summertime Saga and tap Uninstall. Confirm your choice and wait for the uninstallation to complete. You can also delete the APK file from your device storage if you want to free up some space.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/YouTube Version 2.3 6 APK Watch and Share Videos on Your Device.md b/spaces/congsaPfin/Manga-OCR/logs/YouTube Version 2.3 6 APK Watch and Share Videos on Your Device.md deleted file mode 100644 index 7cbbde4b26a42e9cb5051dbee30eae1d85769b3c..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/YouTube Version 2.3 6 APK Watch and Share Videos on Your Device.md +++ /dev/null @@ -1,90 +0,0 @@ -
    -

    YouTube Version 2.3 6 APK: What Is It and How to Download It

    -

    YouTube is one of the most popular and widely used video-sharing platforms in the world. Millions of people watch, upload, and share videos on YouTube every day. However, not everyone is satisfied with the latest version of the YouTube app for Android devices. Some users prefer the older versions of the app, which had different features and layouts. If you are one of them, you might be interested in downloading YouTube Version 2.3 6 APK.

    -

    youtube version 2.3 6 apk


    Download Zip ✺✺✺ https://urlca.com/2uOchI



    -

    Introduction

    -

    In this article, we will explain what YouTube Version 2.3 6 APK is, why you might want to download it, how to download it, and what are the benefits and risks of using it. By the end of this article, you will have a clear idea of whether YouTube Version 2.3 6 APK is right for you or not.

    -

    What is YouTube Version 2.3 6 APK?

    -

    YouTube Version 2.3 6 APK is an Android Package Kit (APK) file that contains the installation data for an older version of the YouTube app for Android devices. Specifically, it is the version that was released on October 2, 2014. An APK file is a compressed file that can be downloaded and installed on Android devices without using the Google Play Store.

    -

    Why would you want to download YouTube Version 2.3 6 APK?

    -

    There are several reasons why you might want to download YouTube Version 2.3 6 APK instead of using the latest version of the YouTube app from the Google Play Store. Some of these reasons are:

    -
      -
    • You like the older features and layouts of the YouTube app that were available in version 2.3 6, such as the slide-out menu, the video quality selector, and the offline mode.
    • -
    • You have an older device or operating system that is not compatible with the latest version of the YouTube app, which requires Android 5.0 or higher.
    • -
    • You want to enjoy faster performance and less ads on the YouTube app, as some users have reported that the older versions run smoother and have fewer interruptions.
    • -
    -

    How to Download YouTube Version 2.3 6 APK

    -

    If you have decided that you want to download YouTube Version 2.3 6 APK, you will need to follow these steps:

    -

    Step 1: Enable Unknown Sources on Your Device

    -

    Since you are downloading an APK file from a source other than the Google Play Store, you will need to enable unknown sources on your device. This will allow you to install apps from sources other than the official app store. To do this, go to Settings > Security > Unknown Sources and toggle it on.

    -

    Step 2: Find a Reliable Source for the APK File

    -

    Next, you will need to find a reliable source for the APK file that you want to download. There are many websites that offer APK files for various apps, but not all of them are trustworthy or safe. Some of them may contain malware or viruses that can harm your device or steal your data. Therefore, you should do some research before downloading any APK file from any website.

    -

    One of One of the reliable sources for APK files that I found is APKMirror. This site is owned and operated by the same team that created the widely-read Android news site, Android Police. They verify all APKs uploaded to the site before publishing them, and they match cryptographic signatures for new versions of apps with previous versions. They also do not host any modded APKs, pirated apps, or paid apps on the site. You can use the search function to find YouTube Version 2.3 6 APK on their site, or you can use this link to go directly to the download page. Alternatively, you can also use other reputable sources for APK files, such as APKPure, APK4Fun, or Aptoide. However, you should always be careful when downloading any APK file from any website, and make sure you have a good antivirus app on your device.

    Step 3: Download and Install the APK File

    -

    Once you have found a reliable source for the APK file, you can proceed to download and install it on your device. To do this, follow these steps:

    -
      -
    1. Tap on the download link or button on the website to start downloading the APK file. You may see a warning message that this type of file can harm your device. Tap OK to continue.
    2. -
    3. After the download is complete, open the file manager app on your device and locate the APK file in the Downloads folder or wherever you saved it.
    4. -
    5. Tap on the APK file to start the installation process. You may see a prompt asking you to confirm the installation of the app from an unknown source. Tap Install to continue.
    6. -
    7. Wait for the installation to finish. You may see a message that the app was installed successfully. Tap Open to launch the app or Done to exit.
    8. -
    -

    Benefits of YouTube Version 2.3 6 APK

    -

    Now that you have downloaded and installed YouTube Version 2.3 6 APK, you can enjoy some of the benefits that this older version of the app offers. Some of these benefits are:

    -

    Access to Older Features and Layouts

    -

    One of the main reasons why some users prefer YouTube Version 2.3 6 APK is that it has some features and layouts that are no longer available in the latest version of the app. For example, you can access the slide-out menu by swiping from the left edge of the screen, which gives you quick access to your subscriptions, playlists, history, uploads, and settings. You can also select the video quality by tapping on the three-dot menu icon on the top right corner of the video player, which lets you choose from 144p to 1080p depending on your network speed and preference. You can also use the offline mode by tapping on the download icon next to a video, which allows you to save videos for offline viewing later.

    -

    youtube version 2.3 6 apk download
    -youtube version 2.3 6 apk free
    -youtube version 2.3 6 apk mod
    -youtube version 2.3 6 apk old
    -youtube version 2.3 6 apk update
    -youtube version 2.3 6 apk for android
    -youtube version 2.3 6 apk for pc
    -youtube version 2.3 6 apk for firestick
    -youtube version 2.3 6 apk for smart tv
    -youtube version 2.3 6 apk for ios
    -youtube version 2.3 6 apk offline
    -youtube version 2.3 6 apk premium
    -youtube version 2.3 6 apk pro
    -youtube version 2.3 6 apk cracked
    -youtube version 2.3 6 apk patched
    -youtube version 2.3 6 apk no ads
    -youtube version 2.3 6 apk no root
    -youtube version 2.3 6 apk latest
    -youtube version 2.3 6 apk mirror
    -youtube version 2.3 6 apk uptodown[^1^]
    -youtube version 2.3 6 apk apkpure
    -youtube version 2.3 6 apk apkmirror
    -youtube version 2.3 6 apk apkmody
    -youtube version 2.3 6 apk apknite
    -youtube version 2.3 6 apk apksfree
    -youtube version 2.3 6 apk appbrain
    -youtube version 2.3 6 apk appvn
    -youtube version 2.3 6 apk aptoide
    -youtube version 2.3 6 apk blackmod
    -youtube version 2.3 6 apk browsercam
    -youtube version 2.3 6 apk chromeos-apk
    -youtube version 2.3 6 apk dlandroid
    -youtube version 2.3 6 apk fileplanet
    -youtube version 2.3 6 apk happymod
    -youtube version 2.3 6 apk malavida
    -youtube version 2.3 6 apk mob.org
    -youtube version 2.3 6 apk moddroid
    -youtube version 2.3 6 apk modapkdown
    -youtube version 2.3 6 apk onhax
    -youtube version 2.3 6 apk pandaapp

    -

    Compatibility with Older Devices and Operating Systems

    -

    Another benefit of YouTube Version 2.3 6 APK is that it is compatible with older devices and operating systems that are not supported by the latest version of the app. The latest version of the YouTube app requires Android 5.0 or higher, which means that some devices that run on Android 4.4 or lower cannot use it. However, YouTube Version 2.3 6 APK works on Android 4.0 or higher, which means that more devices can use it without any issues.

    -

    Faster Performance and Less Ads

    -

    A final benefit of YouTube Version 2.3 6 APK is that it offers faster performance and less ads than the latest version of the app. Some users have reported that the older version of the app runs smoother and faster on their devices, especially on low-end or older devices. They also claim that there are fewer ads on the older version of the app, which makes for a more enjoyable viewing experience.

    -

    Risks of YouTube Version 2.3 6 APK

    -

    While there are some benefits of using YouTube Version 2.3 6 APK, there are also some risks that you should be aware of before using it. Some of these risks are:

    -

    Security and Privacy Issues

    -

    One of the main risks of using YouTube Version 2.3 6 APK is that it may pose security and privacy issues for your device and data. Since you are downloading an APK file from an unknown source, you are exposing yourself to potential malware or viruses that can harm your device or steal your data. Even if you use a reliable source for the APK file, there is no guarantee that it has not been tampered with or modified by someone with malicious intent. Furthermore, since you are using an older version of the app, you are not getting

  • Q: How can I verify the authenticity and safety of YouTube Version 2.3 6 APK?
  • -
  • A: If you want to verify the authenticity and safety of YouTube Version 2.3 6 APK, you can use a tool such as VirusTotal or APK Analyzer to scan the APK file for any malware or viruses. You can also compare the cryptographic signature of the APK file with the official version of the app to see if they match.
  • -
  • Q: How can I find more information about YouTube Version 2.3 6 APK?
  • -
  • A: If you want to find more information about YouTube Version 2.3 6 APK, you can visit the websites that offer the APK file and read their descriptions, reviews, and comments. You can also visit online forums, blogs, or social media platforms that discuss YouTube Version 2.3 6 APK and see what other users have to say about it.
  • -
  • Q: How can I contact YouTube or Google if I have any questions or concerns about YouTube Version 2.3 6 APK?
  • -
  • A: If you have any questions or concerns about YouTube Version 2.3 6 APK, you can contact YouTube or Google through their official channels, such as their websites, email addresses, phone numbers, or social media accounts. However, you should be aware that they may not support or endorse YouTube Version 2.3 6 APK, and they may advise you to use the latest version of the app instead.
  • -
-

I hope this article has been helpful and informative for you. Thank you for reading it.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Download SiteSucker for Windows and Save Any Website to Your Hard Drive.md b/spaces/contluForse/HuggingGPT/assets/Download SiteSucker for Windows and Save Any Website to Your Hard Drive.md deleted file mode 100644 index 4d583da2df815ceea6eac3b6e397784203be4356..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Download SiteSucker for Windows and Save Any Website to Your Hard Drive.md +++ /dev/null @@ -1,37 +0,0 @@ - -

WebCopy by Cyotek takes a website URL and scans it for links, pages, and media. As it finds pages, it recursively looks for more links, pages, and media until the whole website is discovered. Then you can use the configuration options to decide which parts to download offline.

-

Download Sitesucker For Windows


Download Zip ☆☆☆ https://ssurll.com/2uzwiL



-

The interesting thing about WebCopy is you can set up multiple projects that each have their own settings and configurations. This makes it easy to re-download many sites whenever you want; each one, in the same way every time.

-

Once the copying is done, you can use the Results tab to see the status of each individual page and/or media file. The Errors tab shows any problems that may have occurred, and the Skipped tab shows files that weren't downloaded. But most important is the Sitemap, which shows the full directory structure of the website as discovered by WebCopy.

-

Like WebCopy, it uses a project-based approach that lets you copy multiple websites and keep them all organized. You can pause and resume downloads, and you can update copied websites by re-downloading old and new files.

-

-

Once everything is downloaded, you can browse the site normally, simply by going to where the files were downloaded and opening the index.html or index.htm in a browser.

-

You can replace the website URL here with the URL of whichever website you want to download. For instance, if you wanted to download the whole Encyclopedia Britannica, you'll have to tweak your command to this:

-

One of its nifty feature is the ability to save an in-progress download to a file, then use that file to download the same files and structure again in the future (or on another machine). This feature is also what allows SiteSucker to pause and resume downloads.

-

Wget is a command-line utility that can retrieve all kinds of files over the HTTP and FTP protocols. Since websites are served through HTTP and most web media files are accessible through HTTP or FTP, this makes Wget an excellent tool for downloading entire websites.

-

Wget comes bundled with most Unix-based systems. While Wget is typically used to download single files, it can also be used to recursively download all pages and files that are found through an initial page:

-

If you want to be polite, you should also limit your download speed (so you don't hog the web server's bandwidth) and pause between each download (so you don't overwhelm the web server with too many requests):

-

Apart from simply downloading a whole website, the app packs a host of other features and intricacies as well. For instance, when you download and install the app, in the app's main menu you'll see these options to choose from:

-

But remember: the bigger the site, the bigger the download. Therefore, we don't recommend downloading massive sites like MUO because you'll need thousands of MBs to store all the media files such sites use.

-

Are you looking for SiteSucker for Windows 7/8/10 download instructions?
Then you've come to the right place. Most of the mobile apps are exclusively developed to work with mobile phones. But we love to use them on larger screens such as Windows Laptop / Desktop as it gives easy and faster access than a small mobile device.

-

Few apps provide the larger screen versions that support windows, mac, and PC by default. But when there is no official large screen support, we need to find a way to install and use it. Luckily we have got you a few methods that can help you to install and use SiteSucker in Windows - 7/8/10, Mac, PC.

-

"SiteSucker is a Macintosh application that automatically downloads Web sites from the Internet. It does this by asynchronously copying the site's Web pages, images, backgrounds, movies, and other files to your local hard drive, duplicating the site's directory structure. Just enter a URL (Uniform Resource Locator), press return, and SiteSucker can download an entire Web site."

-

What you probably need is a website downloader like previously covered Fresh Websuction, to download all webpages with files, images, and other content saved on web server to your system. SiteSucker is a one-click website downloader for Mac OS X which can fetch all images, backgrounds, media files, and other uploaded content from web server. The application is set to download all files on same server, however it includes option to fetch all web pages and files on sub-domains as well.

-

In addition to supporting linked pages and sub-domains, you can put limit on downloading process to fetch only required number of web pages and search for files in any depth levels. You can alternatively opt to specify file types and maximum file size to download in order to save disk space. Other download process specific customizations include, paths to Include and Exclude options, fetch files and web pages from only defined locations, etc.

-

Using SiteSucker, you can instantly make offline versions of your websites. All you need is to feed it with website URL to start downloading all index and link pages. The built-in History and Log features lets you easily download fresh version of any previously downloaded websites and check errors retrieved files from website, respectively. On main interface, enter URL of website you wish to download and click Download to begin downloading website with default settings.

-

You can change default downloading settings from Edit Settings dialog accessible from Settings menu. The General window deals with robot.txt file type exclusions, suppressing login dialog box, and other file replace, HTML processing, download mode, etc., options. To apply limits on file downloading, head over to Limits tab to set max number of levels to search, number of files you want to download from website, min and max file size, and max image screen according to screen size.

-

Similarly, you can apply filters on file types to download. By default, it downloads all types of files. You can choose Only Download These File Types option from drop-down menu to specify required types of files. Additionally, it includes an option to set a range of file types which are to be treated as HTML or webpage file.

-

If you just want to download specific web pages for viewing later, your browser can easily do it for you. It can download the whole page with all its components and let you browse it offline in the browser again.

-

HTTracks is a popular tool to download the whole data of a website and access it offline. It is an open-source tool that is available for Windows, Linux and Android platforms. It will download the whole website by moving from link to link, so it is also able to format the archive like you are browsing the website online.

-

The process could take a lot of time depending on how big the website is. It may even take days to download a website as massive as Hongkiat.com. On top of that, this data could easily take GBs of space on your hard drive, so make sure you have enough space. Once downloaded, you can open the project to start browsing the website in your default browser.

-

Tip: HTTracks starts downloading data from the latest updates and moves backward. If you only want latest data and not the whole website, then cancel the download process when you are sure that the needed data is downloaded. Even if the download process is cancelled, you can still access the data that has been already downloaded.

-

SiteSucker is a great alternative to HTTracks for macOS and iOS users. It works similar to HTTracks and downloads websites as a whole by jumping from link to link. You can also pause downloads in the middle to view the downloaded web pages and resume any time you like.

-

PageArchiver lets you save multiple web pages and access them from its interface. You can download all the web pages that are currently opened in your browser. Simply open the pages you need and download them with PageArchiver.

-

WebScrapBook lets you download a single web page or a whole website. It also organizes the downloaded content in its interface and a handy search bar makes it easy to search for the right content.

-

If you only want to download online articles to read later, then Pocket might be a great option. It has compatible extensions for all the popular browsers that you can use to save articles and other supported content.

-

And of course, for downloading the whole websites, HTTracks and SiteSicker are the best options. If you know any other tools to save websites for offline viewing, do share with us in the comments below.

-

An offline reader (sometimes called an offline browser or offline navigator) is computer software that downloads e-mail, newsgroup posts or web pages, making them available when the computer is offline: not connected to a server.[a] Offline readers are useful for portable computers and dial-up access.

-

Website mirroring software is software that allows for the download of a copy of an entire website to the local hard disk for offline browsing. In effect, the downloaded copy serves as a mirror of the original site. Web crawler software such as Wget can be used to generate a site mirror.

-

Offline mail readers are computer programs that allow users to read electronic mail or other messages (for example, those on bulletin board systems) with a minimum of connection time to the server storing the messages. BBS servers accomplished this by packaging up multiple messages into a compressed file, e.g., a QWK packet, for the user to download using, e.g., Xmodem, Ymodem, Zmodem, and then disconnect. The user reads and replies to the messages locally and packages up and uploads any replies or new messages back to the server upon the next connection. Internet mail servers using POP3 or IMAP4 send the messages uncompressed as part of the protocol, and outbound messages using SMTP are also uncompressed. Offline news readers using NNTP are similar, but the messages are organized into news groups.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/cooelf/Multimodal-CoT/timm/data/config.py b/spaces/cooelf/Multimodal-CoT/timm/data/config.py deleted file mode 100644 index 38f5689a707f5602e38cb717ed6115f26a0d7ea2..0000000000000000000000000000000000000000 --- a/spaces/cooelf/Multimodal-CoT/timm/data/config.py +++ /dev/null @@ -1,78 +0,0 @@ -import logging -from .constants import * - - -_logger = logging.getLogger(__name__) - - -def resolve_data_config(args, default_cfg={}, model=None, use_test_size=False, verbose=False): - new_config = {} - default_cfg = default_cfg - if not default_cfg and model is not None and hasattr(model, 'default_cfg'): - default_cfg = model.default_cfg - - # Resolve input/image size - in_chans = 3 - if 'chans' in args and args['chans'] is not None: - in_chans = args['chans'] - - input_size = (in_chans, 224, 224) - if 'input_size' in args and args['input_size'] is not None: - assert isinstance(args['input_size'], (tuple, list)) - assert len(args['input_size']) == 3 - input_size = tuple(args['input_size']) - in_chans = input_size[0] # input_size overrides in_chans - elif 'img_size' in args and args['img_size'] is not None: - assert isinstance(args['img_size'], int) - input_size = (in_chans, args['img_size'], args['img_size']) - else: - if use_test_size and 'test_input_size' in default_cfg: - input_size = default_cfg['test_input_size'] - elif 'input_size' in default_cfg: - input_size = default_cfg['input_size'] - new_config['input_size'] = input_size - - # resolve interpolation method - new_config['interpolation'] = 'bicubic' - if 'interpolation' in args and args['interpolation']: - new_config['interpolation'] = args['interpolation'] - elif 'interpolation' in default_cfg: - new_config['interpolation'] = default_cfg['interpolation'] - - # resolve dataset + model mean for normalization - new_config['mean'] = IMAGENET_DEFAULT_MEAN - if 'mean' in args and args['mean'] is not None: - mean = tuple(args['mean']) - if len(mean) == 1: - mean = tuple(list(mean) * in_chans) - else: - assert len(mean) == in_chans - new_config['mean'] = mean - elif 'mean' in default_cfg: - new_config['mean'] = default_cfg['mean'] - - # resolve dataset + model std deviation for normalization - new_config['std'] = IMAGENET_DEFAULT_STD - if 'std' in args and args['std'] is not None: - std = tuple(args['std']) - if len(std) == 1: - std = tuple(list(std) * in_chans) - else: - assert len(std) == in_chans - new_config['std'] = std - elif 'std' in default_cfg: - new_config['std'] = default_cfg['std'] - - # resolve default crop percentage - new_config['crop_pct'] = DEFAULT_CROP_PCT - if 'crop_pct' in args and args['crop_pct'] is not None: - new_config['crop_pct'] = args['crop_pct'] - elif 'crop_pct' in default_cfg: - new_config['crop_pct'] = default_cfg['crop_pct'] - - if verbose: - _logger.info('Data processing configuration for current model + dataset:') - for n, v in new_config.items(): - _logger.info('\t%s: %s' % (n, str(v))) - - return new_config diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/runner/utils.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/runner/utils.py deleted file mode 100644 index c5befb8e56ece50b5fecfd007b26f8a29124c0bd..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/runner/utils.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os -import random -import sys -import time -import warnings -from getpass import getuser -from socket import gethostname - -import numpy as np -import torch - -import annotator.uniformer.mmcv as mmcv - - -def get_host_info(): - """Get hostname and username. - - Return empty string if exception raised, e.g. ``getpass.getuser()`` will - lead to error in docker container - """ - host = '' - try: - host = f'{getuser()}@{gethostname()}' - except Exception as e: - warnings.warn(f'Host or user not found: {str(e)}') - finally: - return host - - -def get_time_str(): - return time.strftime('%Y%m%d_%H%M%S', time.localtime()) - - -def obj_from_dict(info, parent=None, default_args=None): - """Initialize an object from dict. - - The dict must contain the key "type", which indicates the object type, it - can be either a string or type, such as "list" or ``list``. Remaining - fields are treated as the arguments for constructing the object. - - Args: - info (dict): Object types and arguments. - parent (:class:`module`): Module which may containing expected object - classes. - default_args (dict, optional): Default arguments for initializing the - object. - - Returns: - any type: Object built from the dict. - """ - assert isinstance(info, dict) and 'type' in info - assert isinstance(default_args, dict) or default_args is None - args = info.copy() - obj_type = args.pop('type') - if mmcv.is_str(obj_type): - if parent is not None: - obj_type = getattr(parent, obj_type) - else: - obj_type = sys.modules[obj_type] - elif not isinstance(obj_type, type): - raise TypeError('type must be a str or valid type, but ' - f'got {type(obj_type)}') - if default_args is not None: - for name, value in default_args.items(): - args.setdefault(name, value) - return obj_type(**args) - - -def set_random_seed(seed, deterministic=False, use_rank_shift=False): - """Set random seed. - - Args: - seed (int): Seed to be used. - deterministic (bool): Whether to set the deterministic option for - CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` - to True and `torch.backends.cudnn.benchmark` to False. - Default: False. - rank_shift (bool): Whether to add rank number to the random seed to - have different random seed in different threads. Default: False. - """ - if use_rank_shift: - rank, _ = mmcv.runner.get_dist_info() - seed += rank - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - os.environ['PYTHONHASHSEED'] = str(seed) - if deterministic: - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = False diff --git a/spaces/cscan/CodeFormer/CodeFormer/basicsr/data/transforms.py b/spaces/cscan/CodeFormer/CodeFormer/basicsr/data/transforms.py deleted file mode 100644 index aead9dc73ed063e1c5865040eaa2652b26aa3ad3..0000000000000000000000000000000000000000 --- a/spaces/cscan/CodeFormer/CodeFormer/basicsr/data/transforms.py +++ /dev/null @@ -1,165 +0,0 @@ -import cv2 -import random - - -def mod_crop(img, scale): - """Mod crop images, used during testing. - - Args: - img (ndarray): Input image. - scale (int): Scale factor. - - Returns: - ndarray: Result image. - """ - img = img.copy() - if img.ndim in (2, 3): - h, w = img.shape[0], img.shape[1] - h_remainder, w_remainder = h % scale, w % scale - img = img[:h - h_remainder, :w - w_remainder, ...] - else: - raise ValueError(f'Wrong img ndim: {img.ndim}.') - return img - - -def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path): - """Paired random crop. - - It crops lists of lq and gt images with corresponding locations. - - Args: - img_gts (list[ndarray] | ndarray): GT images. Note that all images - should have the same shape. If the input is an ndarray, it will - be transformed to a list containing itself. - img_lqs (list[ndarray] | ndarray): LQ images. Note that all images - should have the same shape. If the input is an ndarray, it will - be transformed to a list containing itself. - gt_patch_size (int): GT patch size. - scale (int): Scale factor. - gt_path (str): Path to ground-truth. - - Returns: - list[ndarray] | ndarray: GT images and LQ images. If returned results - only have one element, just return ndarray. - """ - - if not isinstance(img_gts, list): - img_gts = [img_gts] - if not isinstance(img_lqs, list): - img_lqs = [img_lqs] - - h_lq, w_lq, _ = img_lqs[0].shape - h_gt, w_gt, _ = img_gts[0].shape - lq_patch_size = gt_patch_size // scale - - if h_gt != h_lq * scale or w_gt != w_lq * scale: - raise ValueError(f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ', - f'multiplication of LQ ({h_lq}, {w_lq}).') - if h_lq < lq_patch_size or w_lq < lq_patch_size: - raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size ' - f'({lq_patch_size}, {lq_patch_size}). ' - f'Please remove {gt_path}.') - - # randomly choose top and left coordinates for lq patch - top = random.randint(0, h_lq - lq_patch_size) - left = random.randint(0, w_lq - lq_patch_size) - - # crop lq patch - img_lqs = [v[top:top + lq_patch_size, left:left + lq_patch_size, ...] for v in img_lqs] - - # crop corresponding gt patch - top_gt, left_gt = int(top * scale), int(left * scale) - img_gts = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_gts] - if len(img_gts) == 1: - img_gts = img_gts[0] - if len(img_lqs) == 1: - img_lqs = img_lqs[0] - return img_gts, img_lqs - - -def augment(imgs, hflip=True, rotation=True, flows=None, return_status=False): - """Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees). - - We use vertical flip and transpose for rotation implementation. - All the images in the list use the same augmentation. - - Args: - imgs (list[ndarray] | ndarray): Images to be augmented. If the input - is an ndarray, it will be transformed to a list. - hflip (bool): Horizontal flip. Default: True. - rotation (bool): Ratotation. Default: True. - flows (list[ndarray]: Flows to be augmented. If the input is an - ndarray, it will be transformed to a list. - Dimension is (h, w, 2). Default: None. - return_status (bool): Return the status of flip and rotation. - Default: False. - - Returns: - list[ndarray] | ndarray: Augmented images and flows. If returned - results only have one element, just return ndarray. - - """ - hflip = hflip and random.random() < 0.5 - vflip = rotation and random.random() < 0.5 - rot90 = rotation and random.random() < 0.5 - - def _augment(img): - if hflip: # horizontal - cv2.flip(img, 1, img) - if vflip: # vertical - cv2.flip(img, 0, img) - if rot90: - img = img.transpose(1, 0, 2) - return img - - def _augment_flow(flow): - if hflip: # horizontal - cv2.flip(flow, 1, flow) - flow[:, :, 0] *= -1 - if vflip: # vertical - cv2.flip(flow, 0, flow) - flow[:, :, 1] *= -1 - if rot90: - flow = flow.transpose(1, 0, 2) - flow = flow[:, :, [1, 0]] - return flow - - if not isinstance(imgs, list): - imgs = [imgs] - imgs = [_augment(img) for img in imgs] - if len(imgs) == 1: - imgs = imgs[0] - - if flows is not None: - if not isinstance(flows, list): - flows = [flows] - flows = [_augment_flow(flow) for flow in flows] - if len(flows) == 1: - flows = flows[0] - return imgs, flows - else: - if return_status: - return imgs, (hflip, vflip, rot90) - else: - return imgs - - -def img_rotate(img, angle, center=None, scale=1.0): - """Rotate image. - - Args: - img (ndarray): Image to be rotated. - angle (float): Rotation angle in degrees. Positive values mean - counter-clockwise rotation. - center (tuple[int]): Rotation center. If the center is None, - initialize it as the center of the image. Default: None. - scale (float): Isotropic scale factor. Default: 1.0. - """ - (h, w) = img.shape[:2] - - if center is None: - center = (w // 2, h // 2) - - matrix = cv2.getRotationMatrix2D(center, angle, scale) - rotated_img = cv2.warpAffine(img, matrix, (w, h)) - return rotated_img diff --git a/spaces/dachenchen/HiWantJoin/assets/Kelpy-Codos.js b/spaces/dachenchen/HiWantJoin/assets/Kelpy-Codos.js deleted file mode 100644 index cfbaeedb4f371dfb5fe157db545b364046fca3e1..0000000000000000000000000000000000000000 --- a/spaces/dachenchen/HiWantJoin/assets/Kelpy-Codos.js +++ /dev/null @@ -1,76 +0,0 @@ -// ==UserScript== -// @name Kelpy Codos -// @namespace https://github.com/Keldos-Li/Kelpy-Codos -// @version 1.0.5 -// @author Keldos; https://keldos.me/ -// @description Add copy button to PRE tags before CODE tag, for Chuanhu ChatGPT especially. -// Based on Chuanhu ChatGPT version: ac04408 (2023-3-22) -// @license GPL-3.0 -// @grant none -// ==/UserScript== - -(function () { - 'use strict'; - - function addCopyButton(pre) { - var code = pre.querySelector('code'); - if (!code) { - return; // 如果没有找到 元素,则不添加按钮 - } - var firstChild = code.firstChild; - if (!firstChild) { - return; // 如果 元素没有子节点,则不添加按钮 - } - var button = document.createElement('button'); - button.textContent = '\uD83D\uDCCE'; // 使用 📎 符号作为“复制”按钮的文本 - button.style.position = 'relative'; - button.style.float = 'right'; - button.style.fontSize = '1em'; // 可选:调整按钮大小 - button.style.background = 'none'; // 可选:去掉背景颜色 - button.style.border = 'none'; // 可选:去掉边框 - button.style.cursor = 'pointer'; // 可选:显示指针样式 - button.addEventListener('click', function () { - var range = document.createRange(); - range.selectNodeContents(code); - range.setStartBefore(firstChild); // 将范围设置为第一个子节点之前 - var selection = window.getSelection(); - selection.removeAllRanges(); - selection.addRange(range); - - try { - var success = document.execCommand('copy'); - if (success) { - button.textContent = '\u2714'; - setTimeout(function () { - button.textContent = '\uD83D\uDCCE'; // 恢复按钮为“复制” - }, 2000); - } else { - button.textContent = '\u2716'; - } - } catch (e) { - console.error(e); - button.textContent = '\u2716'; - } - - selection.removeAllRanges(); - }); - code.insertBefore(button, firstChild); // 将按钮插入到第一个子元素之前 - } - - function handleNewElements(mutationsList, observer) { - for (var mutation of mutationsList) { - if (mutation.type === 'childList') { - for (var node of mutation.addedNodes) { - if (node.nodeName === 'PRE') { - addCopyButton(node); - } - } - } - } - } - - var observer = new MutationObserver(handleNewElements); - observer.observe(document.documentElement, { childList: true, subtree: true }); - - document.querySelectorAll('pre').forEach(addCopyButton); -})(); diff --git a/spaces/dakaiye/dky_xuexi/request_llm/edge_gpt.py b/spaces/dakaiye/dky_xuexi/request_llm/edge_gpt.py deleted file mode 100644 index bbf84000d84a42de80d3c051a24f06336af76aaf..0000000000000000000000000000000000000000 --- a/spaces/dakaiye/dky_xuexi/request_llm/edge_gpt.py +++ /dev/null @@ -1,409 +0,0 @@ -""" -======================================================================== -第一部分:来自EdgeGPT.py -https://github.com/acheong08/EdgeGPT -======================================================================== -""" - -import argparse -import asyncio -import json -import os -import random -import re -import ssl -import sys -import uuid -from enum import Enum -from typing import Generator -from typing import Literal -from typing import Optional -from typing import Union -import websockets.client as websockets - -DELIMITER = "\x1e" - - -# Generate random IP between range 13.104.0.0/14 -FORWARDED_IP = ( - f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}" -) - -HEADERS = { - "accept": "application/json", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/json", - "sec-ch-ua": '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"', - "sec-ch-ua-arch": '"x86"', - "sec-ch-ua-bitness": '"64"', - "sec-ch-ua-full-version": '"109.0.1518.78"', - "sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-model": "", - "sec-ch-ua-platform": '"Windows"', - "sec-ch-ua-platform-version": '"15.0.0"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "x-ms-client-request-id": str(uuid.uuid4()), - "x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32", - "Referer": "https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx", - "Referrer-Policy": "origin-when-cross-origin", - "x-forwarded-for": FORWARDED_IP, -} - -HEADERS_INIT_CONVER = { - "authority": "edgeservices.bing.com", - "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7", - "accept-language": "en-US,en;q=0.9", - "cache-control": "max-age=0", - "sec-ch-ua": '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"', - "sec-ch-ua-arch": '"x86"', - "sec-ch-ua-bitness": '"64"', - "sec-ch-ua-full-version": '"110.0.1587.69"', - "sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-model": '""', - "sec-ch-ua-platform": '"Windows"', - "sec-ch-ua-platform-version": '"15.0.0"', - "sec-fetch-dest": "document", - "sec-fetch-mode": "navigate", - "sec-fetch-site": "none", - "sec-fetch-user": "?1", - "upgrade-insecure-requests": "1", - "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69", - "x-edge-shopping-flag": "1", - "x-forwarded-for": FORWARDED_IP, -} - -def get_ssl_context(): - import certifi - ssl_context = ssl.create_default_context() - ssl_context.load_verify_locations(certifi.where()) - return ssl_context - - - -class NotAllowedToAccess(Exception): - pass - - -class ConversationStyle(Enum): - creative = "h3imaginative,clgalileo,gencontentv3" - balanced = "galileo" - precise = "h3precise,clgalileo" - - -CONVERSATION_STYLE_TYPE = Optional[ - Union[ConversationStyle, Literal["creative", "balanced", "precise"]] -] - - -def _append_identifier(msg: dict) -> str: - """ - Appends special character to end of message to identify end of message - """ - # Convert dict to json string - return json.dumps(msg) + DELIMITER - - -def _get_ran_hex(length: int = 32) -> str: - """ - Returns random hex string - """ - return "".join(random.choice("0123456789abcdef") for _ in range(length)) - - -class _ChatHubRequest: - """ - Request object for ChatHub - """ - - def __init__( - self, - conversation_signature: str, - client_id: str, - conversation_id: str, - invocation_id: int = 0, - ) -> None: - self.struct: dict = {} - - self.client_id: str = client_id - self.conversation_id: str = conversation_id - self.conversation_signature: str = conversation_signature - self.invocation_id: int = invocation_id - - def update( - self, - prompt, - conversation_style, - options, - ) -> None: - """ - Updates request object - """ - if options is None: - options = [ - "deepleo", - "enable_debug_commands", - "disable_emoji_spoken_text", - "enablemm", - ] - if conversation_style: - if not isinstance(conversation_style, ConversationStyle): - conversation_style = getattr(ConversationStyle, conversation_style) - options = [ - "nlu_direct_response_filter", - "deepleo", - "disable_emoji_spoken_text", - "responsible_ai_policy_235", - "enablemm", - conversation_style.value, - "dtappid", - "cricinfo", - "cricinfov2", - "dv3sugg", - ] - self.struct = { - "arguments": [ - { - "source": "cib", - "optionsSets": options, - "sliceIds": [ - "222dtappid", - "225cricinfo", - "224locals0", - ], - "traceId": _get_ran_hex(32), - "isStartOfSession": self.invocation_id == 0, - "message": { - "author": "user", - "inputMethod": "Keyboard", - "text": prompt, - "messageType": "Chat", - }, - "conversationSignature": self.conversation_signature, - "participant": { - "id": self.client_id, - }, - "conversationId": self.conversation_id, - }, - ], - "invocationId": str(self.invocation_id), - "target": "chat", - "type": 4, - } - self.invocation_id += 1 - - -class _Conversation: - """ - Conversation API - """ - - def __init__( - self, - cookies, - proxy, - ) -> None: - self.struct: dict = { - "conversationId": None, - "clientId": None, - "conversationSignature": None, - "result": {"value": "Success", "message": None}, - } - import httpx - self.proxy = proxy - proxy = ( - proxy - or os.environ.get("all_proxy") - or os.environ.get("ALL_PROXY") - or os.environ.get("https_proxy") - or os.environ.get("HTTPS_PROXY") - or None - ) - if proxy is not None and proxy.startswith("socks5h://"): - proxy = "socks5://" + proxy[len("socks5h://") :] - self.session = httpx.Client( - proxies=proxy, - timeout=30, - headers=HEADERS_INIT_CONVER, - ) - for cookie in cookies: - self.session.cookies.set(cookie["name"], cookie["value"]) - - # Send GET request - response = self.session.get( - url=os.environ.get("BING_PROXY_URL") - or "https://edgeservices.bing.com/edgesvc/turing/conversation/create", - ) - if response.status_code != 200: - response = self.session.get( - "https://edge.churchless.tech/edgesvc/turing/conversation/create", - ) - if response.status_code != 200: - print(f"Status code: {response.status_code}") - print(response.text) - print(response.url) - raise Exception("Authentication failed") - try: - self.struct = response.json() - except (json.decoder.JSONDecodeError, NotAllowedToAccess) as exc: - raise Exception( - "Authentication failed. You have not been accepted into the beta.", - ) from exc - if self.struct["result"]["value"] == "UnauthorizedRequest": - raise NotAllowedToAccess(self.struct["result"]["message"]) - - -class _ChatHub: - """ - Chat API - """ - - def __init__(self, conversation) -> None: - self.wss = None - self.request: _ChatHubRequest - self.loop: bool - self.task: asyncio.Task - print(conversation.struct) - self.request = _ChatHubRequest( - conversation_signature=conversation.struct["conversationSignature"], - client_id=conversation.struct["clientId"], - conversation_id=conversation.struct["conversationId"], - ) - - async def ask_stream( - self, - prompt: str, - wss_link: str, - conversation_style: CONVERSATION_STYLE_TYPE = None, - raw: bool = False, - options: dict = None, - ) -> Generator[str, None, None]: - """ - Ask a question to the bot - """ - if self.wss and not self.wss.closed: - await self.wss.close() - # Check if websocket is closed - self.wss = await websockets.connect( - wss_link, - extra_headers=HEADERS, - max_size=None, - ssl=get_ssl_context() - ) - await self._initial_handshake() - # Construct a ChatHub request - self.request.update( - prompt=prompt, - conversation_style=conversation_style, - options=options, - ) - # Send request - await self.wss.send(_append_identifier(self.request.struct)) - final = False - while not final: - objects = str(await self.wss.recv()).split(DELIMITER) - for obj in objects: - if obj is None or not obj: - continue - response = json.loads(obj) - if response.get("type") != 2 and raw: - yield False, response - elif response.get("type") == 1 and response["arguments"][0].get( - "messages", - ): - resp_txt = response["arguments"][0]["messages"][0]["adaptiveCards"][ - 0 - ]["body"][0].get("text") - yield False, resp_txt - elif response.get("type") == 2: - final = True - yield True, response - - async def _initial_handshake(self) -> None: - await self.wss.send(_append_identifier({"protocol": "json", "version": 1})) - await self.wss.recv() - - async def close(self) -> None: - """ - Close the connection - """ - if self.wss and not self.wss.closed: - await self.wss.close() - - -class NewbingChatbot: - """ - Combines everything to make it seamless - """ - - def __init__( - self, - cookies, - proxy - ) -> None: - if cookies is None: - cookies = {} - self.cookies = cookies - self.proxy = proxy - self.chat_hub: _ChatHub = _ChatHub( - _Conversation(self.cookies, self.proxy), - ) - - async def ask( - self, - prompt: str, - wss_link: str, - conversation_style: CONVERSATION_STYLE_TYPE = None, - options: dict = None, - ) -> dict: - """ - Ask a question to the bot - """ - async for final, response in self.chat_hub.ask_stream( - prompt=prompt, - conversation_style=conversation_style, - wss_link=wss_link, - options=options, - ): - if final: - return response - await self.chat_hub.wss.close() - return None - - async def ask_stream( - self, - prompt: str, - wss_link: str, - conversation_style: CONVERSATION_STYLE_TYPE = None, - raw: bool = False, - options: dict = None, - ) -> Generator[str, None, None]: - """ - Ask a question to the bot - """ - async for response in self.chat_hub.ask_stream( - prompt=prompt, - conversation_style=conversation_style, - wss_link=wss_link, - raw=raw, - options=options, - ): - yield response - - async def close(self) -> None: - """ - Close the connection - """ - await self.chat_hub.close() - - async def reset(self) -> None: - """ - Reset the conversation - """ - await self.close() - self.chat_hub = _ChatHub(_Conversation(self.cookies, self.proxy)) - - diff --git a/spaces/danterivers/music-generation-samples/audiocraft/utils/utils.py b/spaces/danterivers/music-generation-samples/audiocraft/utils/utils.py deleted file mode 100644 index 86e1448d065fa182ca69aae00d2f2a7eea55d8a4..0000000000000000000000000000000000000000 --- a/spaces/danterivers/music-generation-samples/audiocraft/utils/utils.py +++ /dev/null @@ -1,234 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from concurrent.futures import ProcessPoolExecutor -from functools import wraps -import hashlib -import logging -import typing as tp - -import flashy -import flashy.distrib -import omegaconf -import torch -from torch.nn.utils.rnn import pad_sequence - - -logger = logging.getLogger(__name__) - - -def dict_from_config(cfg: omegaconf.DictConfig) -> dict: - """Convenience function to map an omegaconf configuration to a dictionary. - - Args: - cfg (omegaconf.DictConfig): Original configuration to map to dict. - Returns: - dict: Config as dictionary object. - """ - dct = omegaconf.OmegaConf.to_container(cfg, resolve=True) - assert isinstance(dct, dict) - return dct - - -def random_subset(dataset, max_samples: int, seed: int = 42) -> torch.utils.data.Subset: - if max_samples >= len(dataset): - return dataset - - generator = torch.Generator().manual_seed(seed) - perm = torch.randperm(len(dataset), generator=generator) - return torch.utils.data.Subset(dataset, perm[:max_samples].tolist()) - - -def get_loader(dataset, num_samples: tp.Optional[int], batch_size: int, - num_workers: int, seed: int, **kwargs) -> torch.utils.data.DataLoader: - """Convenience function to load dataset into a dataloader with optional subset sampling. - - Args: - dataset: Dataset to load. - num_samples (Optional[int]): Number of samples to limit subset size. - batch_size (int): Batch size. - num_workers (int): Number of workers for data loading. - seed (int): Random seed. - """ - if num_samples is not None: - dataset = random_subset(dataset, num_samples, seed) - - dataloader = flashy.distrib.loader( - dataset, - batch_size=batch_size, - num_workers=num_workers, - **kwargs - ) - return dataloader - - -def get_dataset_from_loader(dataloader): - dataset = dataloader.dataset - if isinstance(dataset, torch.utils.data.Subset): - return dataset.dataset - else: - return dataset - - -def multinomial(input: torch.Tensor, num_samples: int, replacement=False, *, generator=None): - """torch.multinomial with arbitrary number of dimensions, and number of candidates on the last dimension. - - Args: - input (torch.Tensor): The input tensor containing probabilities. - num_samples (int): Number of samples to draw. - replacement (bool): Whether to draw with replacement or not. - Keywords args: - generator (torch.Generator): A pseudorandom number generator for sampling. - Returns: - torch.Tensor: Last dimension contains num_samples indices - sampled from the multinomial probability distribution - located in the last dimension of tensor input. - """ - input_ = input.reshape(-1, input.shape[-1]) - output_ = torch.multinomial(input_, num_samples=num_samples, replacement=replacement, generator=generator) - output = output_.reshape(*list(input.shape[:-1]), -1) - return output - - -def sample_top_k(probs: torch.Tensor, k: int) -> torch.Tensor: - """Sample next token from top K values along the last dimension of the input probs tensor. - - Args: - probs (torch.Tensor): Input probabilities with token candidates on the last dimension. - k (int): The k in “top-k”. - Returns: - torch.Tensor: Sampled tokens. - """ - top_k_value, _ = torch.topk(probs, k, dim=-1) - min_value_top_k = top_k_value[..., [-1]] - probs *= (probs >= min_value_top_k).float() - probs.div_(probs.sum(dim=-1, keepdim=True)) - next_token = multinomial(probs, num_samples=1) - return next_token - - -def sample_top_p(probs: torch.Tensor, p: float) -> torch.Tensor: - """Sample next token from top P probabilities along the last dimension of the input probs tensor. - - Args: - probs (torch.Tensor): Input probabilities with token candidates on the last dimension. - p (int): The p in “top-p”. - Returns: - torch.Tensor: Sampled tokens. - """ - probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True) - probs_sum = torch.cumsum(probs_sort, dim=-1) - mask = probs_sum - probs_sort > p - probs_sort *= (~mask).float() - probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True)) - next_token = multinomial(probs_sort, num_samples=1) - next_token = torch.gather(probs_idx, -1, next_token) - return next_token - - -class DummyPoolExecutor: - """Dummy pool executor to use when we actually have only 1 worker. - (e.g. instead of ProcessPoolExecutor). - """ - class DummyResult: - def __init__(self, func, *args, **kwargs): - self.func = func - self.args = args - self.kwargs = kwargs - - def result(self): - return self.func(*self.args, **self.kwargs) - - def __init__(self, workers, mp_context=None): - pass - - def submit(self, func, *args, **kwargs): - return DummyPoolExecutor.DummyResult(func, *args, **kwargs) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, exc_tb): - return - - -def get_pool_executor(num_workers: int, mp_context=None): - return ProcessPoolExecutor(num_workers, mp_context) if num_workers > 1 else DummyPoolExecutor(1) - - -def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor: - """Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences). - For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]] - - Args: - lengths (torch.Tensor): tensor with lengths - max_len (int): can set the max length manually. Defaults to None. - Returns: - torch.Tensor: mask with 0s where there is pad tokens else 1s - """ - assert len(lengths.shape) == 1, "Length shape should be 1 dimensional." - final_length = lengths.max().item() if not max_len else max_len - final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor - return torch.arange(final_length)[None, :].to(lengths.device) < lengths[:, None] - - -def hash_trick(word: str, vocab_size: int) -> int: - """Hash trick to pair each word with an index - - Args: - word (str): word we wish to convert to an index - vocab_size (int): size of the vocabulary - Returns: - int: index of the word in the embedding LUT - """ - hash = int(hashlib.sha256(word.encode("utf-8")).hexdigest(), 16) - return hash % vocab_size - - -def with_rank_rng(base_seed: int = 1234): - """Decorator for a function so that the function will use a Random Number Generator - whose state depend on the GPU rank. The original RNG state is restored upon returning. - - Args: - base_seed (int): Random seed. - """ - def _decorator(fun: tp.Callable): - @wraps(fun) - def _decorated(*args, **kwargs): - state = torch.get_rng_state() - seed = base_seed ^ flashy.distrib.rank() - torch.manual_seed(seed) - logger.debug('Rank dependent seed set to %d', seed) - try: - return fun(*args, **kwargs) - finally: - torch.set_rng_state(state) - logger.debug('RNG state restored.') - return _decorated - return _decorator - - -def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]: - """Get a list of tensors and collate them to a single tensor. according to the following logic: - - `dim` specifies the time dimension which will be stacked and padded. - - The output will contain 1 new dimension (dimension index 0) which will be the size of - of the original list. - - Args: - tensors (tp.List[torch.Tensor]): List of tensors to collate. - dim (int): Dimension which will be stacked and padded. - Returns: - tp.Tuple[torch.Tensor, torch.Tensor]: - torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension - (dimension index 0) which will be the size of the original list. - torch.Tensor: Tensor containing length of original tensor sizes (without padding). - """ - tensors = [x.transpose(0, dim) for x in tensors] - lens = torch.LongTensor([len(x) for x in tensors]) - padded_tensors = pad_sequence(tensors) - padded_tensors = padded_tensors.transpose(0, 1) - padded_tensors = padded_tensors.transpose(1, dim + 1) - return padded_tensors, lens diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/aiohttp/formdata.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/aiohttp/formdata.py deleted file mode 100644 index e7cd24ca9f7afb2bd31f1c653d9e15acb4fedc8b..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/aiohttp/formdata.py +++ /dev/null @@ -1,172 +0,0 @@ -import io -from typing import Any, Iterable, List, Optional -from urllib.parse import urlencode - -from multidict import MultiDict, MultiDictProxy - -from . import hdrs, multipart, payload -from .helpers import guess_filename -from .payload import Payload - -__all__ = ("FormData",) - - -class FormData: - """Helper class for form body generation. - - Supports multipart/form-data and application/x-www-form-urlencoded. - """ - - def __init__( - self, - fields: Iterable[Any] = (), - quote_fields: bool = True, - charset: Optional[str] = None, - ) -> None: - self._writer = multipart.MultipartWriter("form-data") - self._fields: List[Any] = [] - self._is_multipart = False - self._is_processed = False - self._quote_fields = quote_fields - self._charset = charset - - if isinstance(fields, dict): - fields = list(fields.items()) - elif not isinstance(fields, (list, tuple)): - fields = (fields,) - self.add_fields(*fields) - - @property - def is_multipart(self) -> bool: - return self._is_multipart - - def add_field( - self, - name: str, - value: Any, - *, - content_type: Optional[str] = None, - filename: Optional[str] = None, - content_transfer_encoding: Optional[str] = None, - ) -> None: - - if isinstance(value, io.IOBase): - self._is_multipart = True - elif isinstance(value, (bytes, bytearray, memoryview)): - if filename is None and content_transfer_encoding is None: - filename = name - - type_options: MultiDict[str] = MultiDict({"name": name}) - if filename is not None and not isinstance(filename, str): - raise TypeError( - "filename must be an instance of str. " "Got: %s" % filename - ) - if filename is None and isinstance(value, io.IOBase): - filename = guess_filename(value, name) - if filename is not None: - type_options["filename"] = filename - self._is_multipart = True - - headers = {} - if content_type is not None: - if not isinstance(content_type, str): - raise TypeError( - "content_type must be an instance of str. " "Got: %s" % content_type - ) - headers[hdrs.CONTENT_TYPE] = content_type - self._is_multipart = True - if content_transfer_encoding is not None: - if not isinstance(content_transfer_encoding, str): - raise TypeError( - "content_transfer_encoding must be an instance" - " of str. Got: %s" % content_transfer_encoding - ) - headers[hdrs.CONTENT_TRANSFER_ENCODING] = content_transfer_encoding - self._is_multipart = True - - self._fields.append((type_options, headers, value)) - - def add_fields(self, *fields: Any) -> None: - to_add = list(fields) - - while to_add: - rec = to_add.pop(0) - - if isinstance(rec, io.IOBase): - k = guess_filename(rec, "unknown") - self.add_field(k, rec) # type: ignore[arg-type] - - elif isinstance(rec, (MultiDictProxy, MultiDict)): - to_add.extend(rec.items()) - - elif isinstance(rec, (list, tuple)) and len(rec) == 2: - k, fp = rec - self.add_field(k, fp) # type: ignore[arg-type] - - else: - raise TypeError( - "Only io.IOBase, multidict and (name, file) " - "pairs allowed, use .add_field() for passing " - "more complex parameters, got {!r}".format(rec) - ) - - def _gen_form_urlencoded(self) -> payload.BytesPayload: - # form data (x-www-form-urlencoded) - data = [] - for type_options, _, value in self._fields: - data.append((type_options["name"], value)) - - charset = self._charset if self._charset is not None else "utf-8" - - if charset == "utf-8": - content_type = "application/x-www-form-urlencoded" - else: - content_type = "application/x-www-form-urlencoded; " "charset=%s" % charset - - return payload.BytesPayload( - urlencode(data, doseq=True, encoding=charset).encode(), - content_type=content_type, - ) - - def _gen_form_data(self) -> multipart.MultipartWriter: - """Encode a list of fields using the multipart/form-data MIME format""" - if self._is_processed: - raise RuntimeError("Form data has been processed already") - for dispparams, headers, value in self._fields: - try: - if hdrs.CONTENT_TYPE in headers: - part = payload.get_payload( - value, - content_type=headers[hdrs.CONTENT_TYPE], - headers=headers, - encoding=self._charset, - ) - else: - part = payload.get_payload( - value, headers=headers, encoding=self._charset - ) - except Exception as exc: - raise TypeError( - "Can not serialize value type: %r\n " - "headers: %r\n value: %r" % (type(value), headers, value) - ) from exc - - if dispparams: - part.set_content_disposition( - "form-data", quote_fields=self._quote_fields, **dispparams - ) - # FIXME cgi.FieldStorage doesn't likes body parts with - # Content-Length which were sent via chunked transfer encoding - assert part.headers is not None - part.headers.popall(hdrs.CONTENT_LENGTH, None) - - self._writer.append_payload(part) - - self._is_processed = True - return self._writer - - def __call__(self) -> Payload: - if self._is_multipart: - return self._gen_form_data() - else: - return self._gen_form_urlencoded() diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/schedulers/scheduling_euler_discrete.py b/spaces/declare-lab/tango/diffusers/src/diffusers/schedulers/scheduling_euler_discrete.py deleted file mode 100644 index d6252904fd9ac250b555dfe00d08c2ce64e0936b..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/src/diffusers/schedulers/scheduling_euler_discrete.py +++ /dev/null @@ -1,335 +0,0 @@ -# Copyright 2023 Katherine Crowson and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import math -from dataclasses import dataclass -from typing import List, Optional, Tuple, Union - -import numpy as np -import torch - -from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import BaseOutput, logging, randn_tensor -from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -@dataclass -# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->EulerDiscrete -class EulerDiscreteSchedulerOutput(BaseOutput): - """ - Output class for the scheduler's step function output. - - Args: - prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the - denoising loop. - pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - The predicted denoised sample (x_{0}) based on the model output from the current timestep. - `pred_original_sample` can be used to preview progress or for guidance. - """ - - prev_sample: torch.FloatTensor - pred_original_sample: Optional[torch.FloatTensor] = None - - -# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar -def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999): - """ - Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of - (1-beta) over time from t = [0,1]. - - Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up - to that part of the diffusion process. - - - Args: - num_diffusion_timesteps (`int`): the number of betas to produce. - max_beta (`float`): the maximum beta to use; use values lower than 1 to - prevent singularities. - - Returns: - betas (`np.ndarray`): the betas used by the scheduler to step the model outputs - """ - - def alpha_bar(time_step): - return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2 - - betas = [] - for i in range(num_diffusion_timesteps): - t1 = i / num_diffusion_timesteps - t2 = (i + 1) / num_diffusion_timesteps - betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) - return torch.tensor(betas, dtype=torch.float32) - - -class EulerDiscreteScheduler(SchedulerMixin, ConfigMixin): - """ - Euler scheduler (Algorithm 2) from Karras et al. (2022) https://arxiv.org/abs/2206.00364. . Based on the original - k-diffusion implementation by Katherine Crowson: - https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L51 - - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` - function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. - [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and - [`~SchedulerMixin.from_pretrained`] functions. - - Args: - num_train_timesteps (`int`): number of diffusion steps used to train the model. - beta_start (`float`): the starting `beta` value of inference. - beta_end (`float`): the final `beta` value. - beta_schedule (`str`): - the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from - `linear` or `scaled_linear`. - trained_betas (`np.ndarray`, optional): - option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. - prediction_type (`str`, default `"epsilon"`, optional): - prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion - process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 - https://imagen.research.google/video/paper.pdf) - interpolation_type (`str`, default `"linear"`, optional): - interpolation type to compute intermediate sigmas for the scheduler denoising steps. Should be one of - [`"linear"`, `"log_linear"`]. - """ - - _compatibles = [e.name for e in KarrasDiffusionSchedulers] - order = 1 - - @register_to_config - def __init__( - self, - num_train_timesteps: int = 1000, - beta_start: float = 0.0001, - beta_end: float = 0.02, - beta_schedule: str = "linear", - trained_betas: Optional[Union[np.ndarray, List[float]]] = None, - prediction_type: str = "epsilon", - interpolation_type: str = "linear", - ): - if trained_betas is not None: - self.betas = torch.tensor(trained_betas, dtype=torch.float32) - elif beta_schedule == "linear": - self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) - elif beta_schedule == "scaled_linear": - # this schedule is very specific to the latent diffusion model. - self.betas = ( - torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 - ) - elif beta_schedule == "squaredcos_cap_v2": - # Glide cosine schedule - self.betas = betas_for_alpha_bar(num_train_timesteps) - else: - raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") - - self.alphas = 1.0 - self.betas - self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) - - sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) - sigmas = np.concatenate([sigmas[::-1], [0.0]]).astype(np.float32) - self.sigmas = torch.from_numpy(sigmas) - - # standard deviation of the initial noise distribution - self.init_noise_sigma = self.sigmas.max() - - # setable values - self.num_inference_steps = None - timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=float)[::-1].copy() - self.timesteps = torch.from_numpy(timesteps) - self.is_scale_input_called = False - - def scale_model_input( - self, sample: torch.FloatTensor, timestep: Union[float, torch.FloatTensor] - ) -> torch.FloatTensor: - """ - Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the Euler algorithm. - - Args: - sample (`torch.FloatTensor`): input sample - timestep (`float` or `torch.FloatTensor`): the current timestep in the diffusion chain - - Returns: - `torch.FloatTensor`: scaled input sample - """ - if isinstance(timestep, torch.Tensor): - timestep = timestep.to(self.timesteps.device) - step_index = (self.timesteps == timestep).nonzero().item() - sigma = self.sigmas[step_index] - - sample = sample / ((sigma**2 + 1) ** 0.5) - - self.is_scale_input_called = True - return sample - - def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): - """ - Sets the timesteps used for the diffusion chain. Supporting function to be run before inference. - - Args: - num_inference_steps (`int`): - the number of diffusion steps used when generating samples with a pre-trained model. - device (`str` or `torch.device`, optional): - the device to which the timesteps should be moved to. If `None`, the timesteps are not moved. - """ - self.num_inference_steps = num_inference_steps - - timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=float)[::-1].copy() - sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) - - if self.config.interpolation_type == "linear": - sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) - elif self.config.interpolation_type == "log_linear": - sigmas = torch.linspace(np.log(sigmas[-1]), np.log(sigmas[0]), num_inference_steps + 1).exp() - else: - raise ValueError( - f"{self.config.interpolation_type} is not implemented. Please specify interpolation_type to either" - " 'linear' or 'log_linear'" - ) - - sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) - self.sigmas = torch.from_numpy(sigmas).to(device=device) - if str(device).startswith("mps"): - # mps does not support float64 - self.timesteps = torch.from_numpy(timesteps).to(device, dtype=torch.float32) - else: - self.timesteps = torch.from_numpy(timesteps).to(device=device) - - def step( - self, - model_output: torch.FloatTensor, - timestep: Union[float, torch.FloatTensor], - sample: torch.FloatTensor, - s_churn: float = 0.0, - s_tmin: float = 0.0, - s_tmax: float = float("inf"), - s_noise: float = 1.0, - generator: Optional[torch.Generator] = None, - return_dict: bool = True, - ) -> Union[EulerDiscreteSchedulerOutput, Tuple]: - """ - Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion - process from the learned model outputs (most often the predicted noise). - - Args: - model_output (`torch.FloatTensor`): direct output from learned diffusion model. - timestep (`float`): current timestep in the diffusion chain. - sample (`torch.FloatTensor`): - current instance of sample being created by diffusion process. - s_churn (`float`) - s_tmin (`float`) - s_tmax (`float`) - s_noise (`float`) - generator (`torch.Generator`, optional): Random number generator. - return_dict (`bool`): option for returning tuple rather than EulerDiscreteSchedulerOutput class - - Returns: - [`~schedulers.scheduling_utils.EulerDiscreteSchedulerOutput`] or `tuple`: - [`~schedulers.scheduling_utils.EulerDiscreteSchedulerOutput`] if `return_dict` is True, otherwise a - `tuple`. When returning a tuple, the first element is the sample tensor. - - """ - - if ( - isinstance(timestep, int) - or isinstance(timestep, torch.IntTensor) - or isinstance(timestep, torch.LongTensor) - ): - raise ValueError( - ( - "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" - " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass" - " one of the `scheduler.timesteps` as a timestep." - ), - ) - - if not self.is_scale_input_called: - logger.warning( - "The `scale_model_input` function should be called before `step` to ensure correct denoising. " - "See `StableDiffusionPipeline` for a usage example." - ) - - if isinstance(timestep, torch.Tensor): - timestep = timestep.to(self.timesteps.device) - - step_index = (self.timesteps == timestep).nonzero().item() - sigma = self.sigmas[step_index] - - gamma = min(s_churn / (len(self.sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigma <= s_tmax else 0.0 - - noise = randn_tensor( - model_output.shape, dtype=model_output.dtype, device=model_output.device, generator=generator - ) - - eps = noise * s_noise - sigma_hat = sigma * (gamma + 1) - - if gamma > 0: - sample = sample + eps * (sigma_hat**2 - sigma**2) ** 0.5 - - # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise - # NOTE: "original_sample" should not be an expected prediction_type but is left in for - # backwards compatibility - if self.config.prediction_type == "original_sample" or self.config.prediction_type == "sample": - pred_original_sample = model_output - elif self.config.prediction_type == "epsilon": - pred_original_sample = sample - sigma_hat * model_output - elif self.config.prediction_type == "v_prediction": - # * c_out + input * c_skip - pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1)) - else: - raise ValueError( - f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" - ) - - # 2. Convert to an ODE derivative - derivative = (sample - pred_original_sample) / sigma_hat - - dt = self.sigmas[step_index + 1] - sigma_hat - - prev_sample = sample + derivative * dt - - if not return_dict: - return (prev_sample,) - - return EulerDiscreteSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) - - def add_noise( - self, - original_samples: torch.FloatTensor, - noise: torch.FloatTensor, - timesteps: torch.FloatTensor, - ) -> torch.FloatTensor: - # Make sure sigmas and timesteps have the same device and dtype as original_samples - self.sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) - if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): - # mps does not support float64 - self.timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) - timesteps = timesteps.to(original_samples.device, dtype=torch.float32) - else: - self.timesteps = self.timesteps.to(original_samples.device) - timesteps = timesteps.to(original_samples.device) - - schedule_timesteps = self.timesteps - step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps] - - sigma = self.sigmas[step_indices].flatten() - while len(sigma.shape) < len(original_samples.shape): - sigma = sigma.unsqueeze(-1) - - noisy_samples = original_samples + noise * sigma - return noisy_samples - - def __len__(self): - return self.config.num_train_timesteps diff --git a/spaces/dfskGT/parrot-paraphraser/app.py b/spaces/dfskGT/parrot-paraphraser/app.py deleted file mode 100644 index 245ccc3635a2352dd087321ff6d1f5da37068de7..0000000000000000000000000000000000000000 --- a/spaces/dfskGT/parrot-paraphraser/app.py +++ /dev/null @@ -1,42 +0,0 @@ -import gradio as gr - -from parrot import Parrot -import warnings - -warnings.filterwarnings("ignore") - -""" -uncomment to get reproducable paraphrase generations -def random_state(seed): - torch.manual_seed(seed) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(seed) - -random_state(1234) -""" - -# Init models (make sure you init ONLY once if you integrate this to your code) -parrot = Parrot(model_tag="prithivida/parrot_paraphraser_on_T5") - - -def generate_paraphases(phrase): - para_phrases = parrot.augment( - input_phrase=phrase, use_gpu=False, max_return_phrases=10 - ) - return "\n".join(["- " + item[0] for item in para_phrases]) - - -input_textbox = gr.Textbox(label="Type your sentence here", lines=5) -output_textbox = gr.Textbox(label="Paraphrases", lines=10) - -demo = gr.Interface( - fn=generate_paraphases, - inputs=input_textbox, - outputs=output_textbox, - examples=[ - "Can you recommed some upscale restaurants in Newyork?", - "What are the famous places we should not miss in Russia?", - ], -) - -demo.launch() diff --git a/spaces/diacanFperku/AutoGPT/Architecte 3d 2014 Ultimate Uptobox.md b/spaces/diacanFperku/AutoGPT/Architecte 3d 2014 Ultimate Uptobox.md deleted file mode 100644 index 9aa0ff21785fb50c65eb69c023ea6b1f4312efc0..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Architecte 3d 2014 Ultimate Uptobox.md +++ /dev/null @@ -1,34 +0,0 @@ -

Architecte 3d 2014 Ultimate Uptobox


Downloadhttps://gohhs.com/2uFTrx



-
-2014 01 05 02 03 09 15 15:36 PRE. SUMMER 1ZSL Phonetics (Groups 2, 3, 4). - -Collection : Description - -3d Architecte Uptobox 3d Ultimate - -Added by : edatulai.in - -1. Design and implement a system for digital call forwarding. (6 Points) - -2. Develop and test a scripting language for managing the architecture of agents and screens. (6 Points) - -3. Create and use a database for recording the user activity (5 Points) - -4. Create and use a web server to manage the business logic (5 Points) - -5. Create and implement a workflow engine for a calendar-based scheduling system (5 Points) - -6. Manage the user interface of your software (5 Points) - -7. Implement a utility to read and write to configuration files for managing the software system (5 Points) - -8. Create a graphical web application based on the technologies you used in this project. (6 Points) - -9. Manage the order of business, create and edit a business rules engine (5 Points) - -Are You Tired Of Reading Summary Of Each Exam,Preparing A huge amounts of questions, Going To The Exam center and sitting for a long Time for the Exams, with insufficient knowledge, To clear the exams and get to the goal. So here is the solution, we provide you summary of the question, according to the Actual Test, and let you know, your mistakes, after completing it, you can move on to the next question, and you can directly go to the point where you want. Before going to take the Actual test, go through these questions and think deeply, for in the exam, the time is limited, and the more you can you know, the better it will be for you. So let’s begin.Architecte 3d 2014 Ultimate Uptobox architecte 3d ultimate uptobox, architecte 3d ultimate 2017 uptobox DOWNLOAD ( 3d 2014 Ultimate Uptobox. Related Collections. SUMMER 1ZSL Phonetics (Groups 2, 3, 4). 42 item. SUMMER 1ZSL Phonetics (Groups 2, 3, 4). 2014 01 05 02 03 09 15 15:36 PRE. SUMMER 1ZSL Phonetics (Groups 2, 3, 4). - -Architecte 3d 2014 Ultimate Uptobox architecte 3 4fefd39f24
-
-
-

diff --git a/spaces/diacanFperku/AutoGPT/Fmrte 2012 V5.2.5 Crack BEST.md b/spaces/diacanFperku/AutoGPT/Fmrte 2012 V5.2.5 Crack BEST.md deleted file mode 100644 index 39a416dc33c7736f8f8f6ee717d4e3ae9b94adcc..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Fmrte 2012 V5.2.5 Crack BEST.md +++ /dev/null @@ -1,27 +0,0 @@ -
-

Fmrte 2012 v5.2.5 crack: How to Edit Football Manager 2012 in Real Time

-

If you are a fan of Football Manager 2012, you may have heard of Fmrte 2012 v5.2.5 crack. Fmrte stands for Football Manager Real Time Editor, a tool that allows you to edit various aspects of the game in real time. You can change players' attributes, skills, contracts, positions, clubs, injuries, bans, etc. You can also edit clubs' finances, reputation, facilities, kits, staff, etc. You can even create your own players and staff, or edit the existing ones.

-

Fmrte 2012 v5.2.5 crack


DOWNLOADhttps://gohhs.com/2uFTo8



-

However, Fmrte is not a free tool. You need to purchase a license to activate it and use all its features. The license costs 4 euros and lasts for one year. If you don't want to pay for it, you may be tempted to use Fmrte 2012 v5.2.5 crack, a program that claims to bypass the activation process and let you use Fmrte for free.

-

Where to Find Fmrte 2012 v5.2.5 crack

-

There are many websites that offer Fmrte 2012 v5.2.5 crack for download, but not all of them are trustworthy or safe. Some of them may contain fake or outdated cracks, or worse, malicious software that can harm your computer or steal your personal information. Therefore, you should be careful when downloading anything from the internet.

-

One of the most popular sources of Fmrte 2012 v5.2.5 crack is YouTube, where you can find many videos that show you how to download and use the crack. For example, there is a video by vageldan1 that has over 66K views and provides a link to download Fmrte 2012 v5.2.5 crack from Mediafire.

-

Another source of Fmrte 2012 v5.2.5 crack is Buzau.org, a website that provides various files for download, such as PDFs, ebooks, software, etc. You can find Fmrte 2012 v5.2.5 crack on Buzau.org by searching for "Fmrte 2012 v5.2.5 crack" or "Fmrte 2012 v525 crack". You will see a PDF file that you can download or view online.

-

How to Use Fmrte 2012 v5.2.5 crack

-

Once you have downloaded Fmrte 2012 v5.2.5 crack from a reliable source, you need to follow these steps to use it:

-
    -
  1. Install Football Manager 2012 on your computer if you haven't already.
  2. -
  3. Download and install Fmrte 2012 from the official website: https://www.fmrte.com/files/file/3-fmrte-for-fm2012/
  4. -
  5. Run Fmrte 2012 and click on "Settings" button.
  6. -
  7. Click on "Activate" button and enter any email and license key.
  8. -
  9. Run Fmrte 2012 v5.2.5 crack and click on "Patch" button.
  10. -
  11. Restart Fmrte 2012 and enjoy editing your game in real time.
  12. -
-

Congratulations! You have successfully used Fmrte 2012 v5.2.5 crack to activate Fmrte 2012 and edit Football Manager 2012 in real time.

-

-

Disclaimer

-

This article is for educational purposes only. We do not condone or encourage the use of illegal software or piracy. Using a crack is against the law and may violate Fmrte's terms of service and license agreement. You may also risk getting infected by malware or viruses when downloading or using a crack. Therefore, we recommend that you purchase a legitimate license of Fmrte from their official website or authorized resellers.

-

Conclusion

-

Fmrte 2012 v5.2.5 crack is a way to activate Fmrte 2012, a tool that allows you to edit Football Manager 2012 in real time. However, using a crack is not legal and may expose you to malware or viruses. Therefore, you should be careful when downloading and using a crack. Alternatively, you can purchase a legitimate license of Fmrte from their official website or authorized resellers.

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Hidrologi Terapan Bambang Trihatmodjo Pdf Download.md b/spaces/diacanFperku/AutoGPT/Hidrologi Terapan Bambang Trihatmodjo Pdf Download.md deleted file mode 100644 index 75d8724c2fdbb06b141d3c19646b4d3177d9a80e..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Hidrologi Terapan Bambang Trihatmodjo Pdf Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

Hidrologi Terapan Bambang Trihatmodjo Pdf Download


Downloadhttps://gohhs.com/2uFTI1



- - 4fefd39f24
-
-
-

diff --git a/spaces/diacanFperku/AutoGPT/Ls Magazine Magic Lolita Issue S Hit.md b/spaces/diacanFperku/AutoGPT/Ls Magazine Magic Lolita Issue S Hit.md deleted file mode 100644 index fbaaad4e5768eb15f1416ec9e07ed8094e8e5139..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Ls Magazine Magic Lolita Issue S Hit.md +++ /dev/null @@ -1,19 +0,0 @@ -

ls magazine magic lolita issue s hit


Download Ziphttps://gohhs.com/2uFTgo



-
-download video bokep pemaksaan Ls Magazine Magic Lolita Issue S Hitl 1993.el.retorno.de.las.brujas.dvdrip.spanish.www.zonatorrent.com.23 ... Sorella Stefani, Marco Antonio, Mia Marco, Mia, Maria, Maria Mia, Maria, Maria Mia, Maria. -All photos of Mia Mia. -Mia Mia. -8 photos. -7 photos. -6 photos. -9 photos. -11 photos -Dvdrip.torrent. -Free Download Mia Mia. -Mia Mia dvdrip.torrent. -Title: Mia Mia Original title: Mia Mia Screening: 2002 Country: Italy, Spain. -Mia Mia - Mia Mia.torrent download for free, without registration. -Mia Mia - Mia Mia.torrent. 8a78ff9644
-
-
-

diff --git a/spaces/diaoren/OpenSetObstacleDetection/opendet2/modeling/meta_arch/__init__.py b/spaces/diaoren/OpenSetObstacleDetection/opendet2/modeling/meta_arch/__init__.py deleted file mode 100644 index 8b336b1d503a36e7353736cfb68185227abc7af6..0000000000000000000000000000000000000000 --- a/spaces/diaoren/OpenSetObstacleDetection/opendet2/modeling/meta_arch/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .retinanet import OpenSetRetinaNet - -__all__ = list(globals().keys()) diff --git a/spaces/digitalxingtong/Eileen-Bert-Vits2/setup_ffmpeg.py b/spaces/digitalxingtong/Eileen-Bert-Vits2/setup_ffmpeg.py deleted file mode 100644 index 7137ab5faebb6d80740b8c843667458f25596839..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Eileen-Bert-Vits2/setup_ffmpeg.py +++ /dev/null @@ -1,55 +0,0 @@ -import os -import sys -import re -from pathlib import Path -import winreg - -def check_ffmpeg_path(): - path_list = os.environ['Path'].split(';') - ffmpeg_found = False - - for path in path_list: - if 'ffmpeg' in path.lower() and 'bin' in path.lower(): - ffmpeg_found = True - print("FFmpeg already installed.") - break - - return ffmpeg_found - -def add_ffmpeg_path_to_user_variable(): - ffmpeg_bin_path = Path('.\\ffmpeg\\bin') - if ffmpeg_bin_path.is_dir(): - abs_path = str(ffmpeg_bin_path.resolve()) - - try: - key = winreg.OpenKey( - winreg.HKEY_CURRENT_USER, - r"Environment", - 0, - winreg.KEY_READ | winreg.KEY_WRITE - ) - - try: - current_path, _ = winreg.QueryValueEx(key, "Path") - if abs_path not in current_path: - new_path = f"{current_path};{abs_path}" - winreg.SetValueEx(key, "Path", 0, winreg.REG_EXPAND_SZ, new_path) - print(f"Added FFmpeg path to user variable 'Path': {abs_path}") - else: - print("FFmpeg path already exists in the user variable 'Path'.") - finally: - winreg.CloseKey(key) - except WindowsError: - print("Error: Unable to modify user variable 'Path'.") - sys.exit(1) - - else: - print("Error: ffmpeg\\bin folder not found in the current path.") - sys.exit(1) - -def main(): - if not check_ffmpeg_path(): - add_ffmpeg_path_to_user_variable() - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/spaces/digitalxingtong/Nanami-Bert-VITS2/text/japanese.py b/spaces/digitalxingtong/Nanami-Bert-VITS2/text/japanese.py deleted file mode 100644 index ddedafa0c5b7986068dc6c91637a86febc3923a9..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Nanami-Bert-VITS2/text/japanese.py +++ /dev/null @@ -1,104 +0,0 @@ -# modified from https://github.com/CjangCjengh/vits/blob/main/text/japanese.py -import re -import sys - -import pyopenjtalk - -from text import symbols - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile( - r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile( - r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (symbol, Japanese) pairs for marks: -_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('%', 'パーセント') -]] - - -# List of (consonant, sokuon) pairs: -_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'Q([↑↓]*[kg])', r'k#\1'), - (r'Q([↑↓]*[tdjʧ])', r't#\1'), - (r'Q([↑↓]*[sʃ])', r's\1'), - (r'Q([↑↓]*[pb])', r'p#\1') -]] - -# List of (consonant, hatsuon) pairs: -_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'N([↑↓]*[pbm])', r'm\1'), - (r'N([↑↓]*[ʧʥj])', r'n^\1'), - (r'N([↑↓]*[tdn])', r'n\1'), - (r'N([↑↓]*[kg])', r'ŋ\1') -]] - - - -def post_replace_ph(ph): - rep_map = { - ':': ',', - ';': ',', - ',': ',', - '。': '.', - '!': '!', - '?': '?', - '\n': '.', - "·": ",", - '、': ",", - '...': '…', - 'v': "V" - } - if ph in rep_map.keys(): - ph = rep_map[ph] - if ph in symbols: - return ph - if ph not in symbols: - ph = 'UNK' - return ph - -def symbols_to_japanese(text): - for regex, replacement in _symbols_to_japanese: - text = re.sub(regex, replacement, text) - return text - - -def preprocess_jap(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - text = symbols_to_japanese(text) - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = [] - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - p = pyopenjtalk.g2p(sentence) - text += p.split(" ") - - if i < len(marks): - text += [marks[i].replace(' ', '')] - return text - -def text_normalize(text): - # todo: jap text normalize - return text - -def g2p(norm_text): - phones = preprocess_jap(norm_text) - phones = [post_replace_ph(i) for i in phones] - # todo: implement tones and word2ph - tones = [0 for i in phones] - word2ph = [1 for i in phones] - return phones, tones, word2ph - - -if __name__ == '__main__': - for line in open("../../../Downloads/transcript_utf8.txt").readlines(): - text = line.split(":")[1] - phones, tones, word2ph = g2p(text) - for p in phones: - if p == "z": - print(text, phones) - sys.exit(0) diff --git a/spaces/elplaguister/Yuuka_TTS/src/transforms.py b/spaces/elplaguister/Yuuka_TTS/src/transforms.py deleted file mode 100644 index d4aee3ca37e631b7552782ca2901638a5850ccb8..0000000000000000000000000000000000000000 --- a/spaces/elplaguister/Yuuka_TTS/src/transforms.py +++ /dev/null @@ -1,194 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet - diff --git a/spaces/emilylearning/spurious_correlation_evaluation/app.py b/spaces/emilylearning/spurious_correlation_evaluation/app.py deleted file mode 100644 index 19013fed44070ac5afa61673ba8d998b4c205cf0..0000000000000000000000000000000000000000 --- a/spaces/emilylearning/spurious_correlation_evaluation/app.py +++ /dev/null @@ -1,472 +0,0 @@ -# %% -import gradio as gr -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -import random -from matplotlib.ticker import MaxNLocator -from transformers import pipeline - -MODEL_NAMES = ["bert-base-uncased", "roberta-base", "bert-large-uncased", "roberta-large"] -OWN_MODEL_NAME = 'add-a-model' - -DECIMAL_PLACES = 1 -EPS = 1e-5 # to avoid /0 errors - -# Example date conts -DATE_SPLIT_KEY = "DATE" -START_YEAR = 1801 -STOP_YEAR = 1999 -NUM_PTS = 20 -DATES = np.linspace(START_YEAR, STOP_YEAR, NUM_PTS).astype(int).tolist() -DATES = [f'{d}' for d in DATES] - -# Example place conts -# https://www3.weforum.org/docs/WEF_GGGR_2021.pdf -# Bottom 10 and top 10 Global Gender Gap ranked countries. -PLACE_SPLIT_KEY = "PLACE" -PLACES = [ - "Afghanistan", - "Yemen", - "Iraq", - "Pakistan", - "Syria", - "Democratic Republic of Congo", - "Iran", - "Mali", - "Chad", - "Saudi Arabia", - "Switzerland", - "Ireland", - "Lithuania", - "Rwanda", - "Namibia", - "Sweden", - "New Zealand", - "Norway", - "Finland", - "Iceland"] - - -# Example Reddit interest consts -# in order of increasing self-identified female participation. -# See http://bburky.com/subredditgenderratios/ , Minimum subreddit size: 400000 -SUBREDDITS = [ - "GlobalOffensive", - "pcmasterrace", - "nfl", - "sports", - "The_Donald", - "leagueoflegends", - "Overwatch", - "gonewild", - "Futurology", - "space", - "technology", - "gaming", - "Jokes", - "dataisbeautiful", - "woahdude", - "askscience", - "wow", - "anime", - "BlackPeopleTwitter", - "politics", - "pokemon", - "worldnews", - "reddit.com", - "interestingasfuck", - "videos", - "nottheonion", - "television", - "science", - "atheism", - "movies", - "gifs", - "Music", - "trees", - "EarthPorn", - "GetMotivated", - "pokemongo", - "news", - # removing below subreddit as most of the tokens are taken up by it: - # ['ff', '##ff', '##ff', '##fu', '##u', '##u', '##u', '##u', '##u', '##u', '##u', '##u', '##u', '##u', '##u', ...] - # "fffffffuuuuuuuuuuuu", - "Fitness", - "Showerthoughts", - "OldSchoolCool", - "explainlikeimfive", - "todayilearned", - "gameofthrones", - "AdviceAnimals", - "DIY", - "WTF", - "IAmA", - "cringepics", - "tifu", - "mildlyinteresting", - "funny", - "pics", - "LifeProTips", - "creepy", - "personalfinance", - "food", - "AskReddit", - "books", - "aww", - "sex", - "relationships", -] - -GENDERED_LIST = [ - ['he', 'she'], - ['him', 'her'], - ['his', 'hers'], - ["himself", "herself"], - ['male', 'female'], - ['man', 'woman'], - ['men', 'women'], - ["husband", "wife"], - ['father', 'mother'], - ['boyfriend', 'girlfriend'], - ['brother', 'sister'], - ["actor", "actress"], -] - -# %% -# Fire up the models -models = dict() - -for bert_like in MODEL_NAMES: - models[bert_like] = pipeline("fill-mask", model=bert_like) - -# %% - - -def get_gendered_token_ids(): - male_gendered_tokens = [list[0] for list in GENDERED_LIST] - female_gendered_tokens = [list[1] for list in GENDERED_LIST] - - return male_gendered_tokens, female_gendered_tokens - - -def prepare_text_for_masking(input_text, mask_token, gendered_tokens, split_key): - text_w_masks_list = [ - mask_token if word.lower() in gendered_tokens else word for word in input_text.split()] - num_masks = len([m for m in text_w_masks_list if m == mask_token]) - - text_portions = ' '.join(text_w_masks_list).split(split_key) - return text_portions, num_masks - - -def get_avg_prob_from_pipeline_outputs(mask_filled_text, gendered_token, num_preds): - pronoun_preds = [sum([ - pronoun["score"] if pronoun["token_str"].strip().lower() in gendered_token else 0.0 - for pronoun in top_preds]) - for top_preds in mask_filled_text - ] - return round(sum(pronoun_preds) / (EPS + num_preds) * 100, DECIMAL_PLACES) - -# %% - - -def get_figure(df, gender, n_fit=1, model_name=None): - df = df.set_index('x-axis') - cols = df.columns - xs = list(range(len(df))) - ys = df[cols[0]] - fig, ax = plt.subplots() - # Trying small fig due to rendering issues on HF, not on VS Code - fig.set_figheight(3) - fig.set_figwidth(9) - - # find stackoverflow reference - p, C_p = np.polyfit(xs, ys, n_fit, cov=1) - t = np.linspace(min(xs)-1, max(xs)+1, 10*len(xs)) - TT = np.vstack([t**(n_fit-i) for i in range(n_fit+1)]).T - - # matrix multiplication calculates the polynomial values - yi = np.dot(TT, p) - C_yi = np.dot(TT, np.dot(C_p, TT.T)) # C_y = TT*C_z*TT.T - sig_yi = np.sqrt(np.diag(C_yi)) # Standard deviations are sqrt of diagonal - - ax.fill_between(t, yi+sig_yi, yi-sig_yi, alpha=.25) - ax.plot(t, yi, '-') - ax.plot(df, 'ro') - ax.legend(list(df.columns)) - - ax.axis('tight') - ax.set_xlabel("Value injected into input text") - ax.set_title( - f"Probability of predicting {gender} pronouns on {model_name}.") - ax.set_ylabel(f"Softmax prob for pronouns") - ax.xaxis.set_major_locator(MaxNLocator(6)) - ax.tick_params(axis='x', labelrotation=5) - return fig - - -# %% -def predict_gender_pronouns( - model_name, - own_model_name, - indie_vars, - split_key, - normalizing, - n_fit, - input_text, -): - """Run inference on input_text for each model type, returning df and plots of percentage - of gender pronouns predicted as female and male in each target text. - """ - if model_name not in MODEL_NAMES: - model = pipeline("fill-mask", model=own_model_name) - model_name = OWN_MODEL_NAME - else: - model = models[model_name] - - mask_token = model.tokenizer.mask_token - - indie_vars_list = indie_vars.split(',') - - male_gendered_tokens, female_gendered_tokens = get_gendered_token_ids() - - text_segments, num_preds = prepare_text_for_masking( - input_text, mask_token, male_gendered_tokens + female_gendered_tokens, split_key) - - male_pronoun_preds = [] - female_pronoun_preds = [] - for indie_var in indie_vars_list: - - target_text = f"{indie_var}".join(text_segments) - mask_filled_text = model(target_text) - # Quick hack as realized return type based on how many MASKs in text. - if type(mask_filled_text[0]) is not list: - mask_filled_text = [mask_filled_text] - - female_pronoun_preds.append(get_avg_prob_from_pipeline_outputs( - mask_filled_text, - female_gendered_tokens, - num_preds - )) - male_pronoun_preds.append(get_avg_prob_from_pipeline_outputs( - mask_filled_text, - male_gendered_tokens, - num_preds - )) - - if normalizing: - total_gendered_probs = np.add( - female_pronoun_preds, male_pronoun_preds) - female_pronoun_preds = np.around( - np.divide(female_pronoun_preds, total_gendered_probs+EPS)*100, - decimals=DECIMAL_PLACES - ) - male_pronoun_preds = np.around( - np.divide(male_pronoun_preds, total_gendered_probs+EPS)*100, - decimals=DECIMAL_PLACES - ) - - results_df = pd.DataFrame({'x-axis': indie_vars_list}) - results_df['female_pronouns'] = female_pronoun_preds - results_df['male_pronouns'] = male_pronoun_preds - female_fig = get_figure(results_df.drop( - 'male_pronouns', axis=1), 'female', n_fit, model_name) - male_fig = get_figure(results_df.drop( - 'female_pronouns', axis=1), 'male', n_fit, model_name) - display_text = f"{random.choice(indie_vars_list)}".join(text_segments) - - return ( - display_text, - female_fig, - male_fig, - results_df, - ) - - -# %% -title = "Causing Gender Pronouns" -description = """ -## Intro -""" - - -date_example = [ - MODEL_NAMES[1], - '', - ', '.join(DATES), - 'DATE', - "False", - 1, - 'She was a teenager in DATE.' -] - - -place_example = [ - MODEL_NAMES[0], - '', - ', '.join(PLACES), - 'PLACE', - "False", - 1, - 'She became an adult in PLACE.' -] - - -subreddit_example = [ - MODEL_NAMES[3], - '', - ', '.join(SUBREDDITS), - 'SUBREDDIT', - "False", - 1, - 'She was a kid. SUBREDDIT.' -] - -own_model_example = [ - OWN_MODEL_NAME, - 'emilyalsentzer/Bio_ClinicalBERT', - ', '.join(DATES), - 'DATE', - "False", - 1, - 'She was exposed to the virus in DATE.' -] - - -def date_fn(): - return date_example - - -def place_fn(): - return place_example - - -def reddit_fn(): - return subreddit_example - - -def your_fn(): - return own_model_example - - -# %% -demo = gr.Blocks() -with demo: - gr.Markdown("# Spurious Correlation Evaluation for Pre-trained LLMs") - gr.Markdown("Find spurious correlations between seemingly independent variables (for example between `gender` and `time`) in almost any BERT-like LLM on Hugging Face, below.") - - # gr.Markdown("Note: If there is an issue with the rendering of the results taking longer than expected (more than 10s of seconds), there may be an unexpected issue effecting the hosting. If so, please see this [backup colab notebook](https://colab.research.google.com/drive/1A3a9cy9fERaxkuoX8YNTFhLlhRt_cxMm?usp=sharing).") - - - gr.Markdown("## Instructions for this Demo") - gr.Markdown("1) Click on one of the examples below (where we sweep through a spectrum of `places`, `dates` and `subreddits`) to pre-populate the input fields.") - gr.Markdown("2) Check out the pre-populated fields as you scroll down to the ['Hit Submit...'] button!") - gr.Markdown("3) Repeat steps (1) and (2) with more pre-populated inputs or with your own values in the input fields!") - - gr.Markdown("## Example inputs") - gr.Markdown("Click a button below to pre-populate input fields with example values. Then scroll down to Hit Submit to generate predictions.") - with gr.Row(): - date_gen = gr.Button('Click for date example inputs') - gr.Markdown("<-- x-axis sorted by older to more recent dates:") - - place_gen = gr.Button('Click for country example inputs') - gr.Markdown( - "<-- x-axis sorted by bottom 10 and top 10 [Global Gender Gap](https://www3.weforum.org/docs/WEF_GGGR_2021.pdf) ranked countries:") - - subreddit_gen = gr.Button('Click for Subreddit example inputs') - gr.Markdown( - "<-- x-axis sorted in order of increasing self-identified female participation (see [bburky](http://bburky.com/subredditgenderratios/)): ") - - your_gen = gr.Button('Add-a-model example inputs') - gr.Markdown("<-- x-axis dates, with your own model loaded! (If first time, try another example, it can take a while to load new model.)") - - gr.Markdown("## Input fields") - gr.Markdown( - f"A) Pick a spectrum of comma separated values for text injection and x-axis.") - - with gr.Row(): - x_axis = gr.Textbox( - lines=3, - label="A) Comma separated values for text injection and x-axis", - ) - - - gr.Markdown("B) Pick a pre-loaded BERT-family model of interest on the right.") - gr.Markdown(f"Or C) select `{OWN_MODEL_NAME}`, then add the name of any other Hugging Face model that supports the [fill-mask](https://huggingface.co/models?pipeline_tag=fill-mask) task on the right (note: this may take some time to load).") - - with gr.Row(): - model_name = gr.Radio( - MODEL_NAMES + [OWN_MODEL_NAME], - type="value", - label="B) BERT-like model.", - ) - own_model_name = gr.Textbox( - label="C) If you selected an 'add-a-model' model, put any Hugging Face pipeline model name (that supports the fill-mask task) here.", - ) - - gr.Markdown("D) Pick if you want to the predictions normalied to these gendered terms only.") - gr.Markdown("E) Also tell the demo what special token you will use in your input text, that you would like replaced with the spectrum of values you listed above.") - gr.Markdown("And F) the degree of polynomial fit used for high-lighting potential spurious association.") - - - with gr.Row(): - to_normalize = gr.Dropdown( - ["False", "True"], - label="D) Normalize model's predictions to only the gendered ones?", - type="index", - ) - place_holder = gr.Textbox( - label="E) Special token place-holder", - ) - n_fit = gr.Dropdown( - list(range(1, 5)), - label="F) Degree of polynomial fit", - type="value", - ) - - gr.Markdown( - "G) Finally, add input text that includes at least one gendered pronouns and one place-holder token specified above.") - - with gr.Row(): - input_text = gr.Textbox( - lines=2, - label="G) Input text with pronouns and place-holder token", - ) - - gr.Markdown("## Outputs!") - #gr.Markdown("Scroll down and 'Hit Submit'!") - with gr.Row(): - btn = gr.Button("Hit submit to generate predictions!") - - with gr.Row(): - sample_text = gr.Textbox( - type="auto", label="Output text: Sample of text fed to model") - with gr.Row(): - female_fig = gr.Plot(type="auto") - male_fig = gr.Plot(type="auto") - with gr.Row(): - df = gr.Dataframe( - show_label=True, - overflow_row_behaviour="show_ends", - label="Table of softmax probability for pronouns predictions", - ) - - with gr.Row(): - - date_gen.click(date_fn, inputs=[], outputs=[model_name, own_model_name, - x_axis, place_holder, to_normalize, n_fit, input_text]) - place_gen.click(place_fn, inputs=[], outputs=[ - model_name, own_model_name, x_axis, place_holder, to_normalize, n_fit, input_text]) - subreddit_gen.click(reddit_fn, inputs=[], outputs=[ - model_name, own_model_name, x_axis, place_holder, to_normalize, n_fit, input_text]) - your_gen.click(your_fn, inputs=[], outputs=[ - model_name, own_model_name, x_axis, place_holder, to_normalize, n_fit, input_text]) - - btn.click( - predict_gender_pronouns, - inputs=[model_name, own_model_name, x_axis, place_holder, - to_normalize, n_fit, input_text], - outputs=[sample_text, female_fig, male_fig, df]) - - -demo.launch(debug=True) diff --git a/spaces/erbanku/gpt-academic/crazy_functions/test_project/python/dqn/policies.py b/spaces/erbanku/gpt-academic/crazy_functions/test_project/python/dqn/policies.py deleted file mode 100644 index 4ecf39a5fc04b24ad1b809232b186728366987b6..0000000000000000000000000000000000000000 --- a/spaces/erbanku/gpt-academic/crazy_functions/test_project/python/dqn/policies.py +++ /dev/null @@ -1,237 +0,0 @@ -from typing import Any, Dict, List, Optional, Type - -import gym -import torch as th -from torch import nn - -from stable_baselines3.common.policies import BasePolicy, register_policy -from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, NatureCNN, create_mlp -from stable_baselines3.common.type_aliases import Schedule - - -class QNetwork(BasePolicy): - """ - Action-Value (Q-Value) network for DQN - - :param observation_space: Observation space - :param action_space: Action space - :param net_arch: The specification of the policy and value networks. - :param activation_fn: Activation function - :param normalize_images: Whether to normalize images or not, - dividing by 255.0 (True by default) - """ - - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - features_extractor: nn.Module, - features_dim: int, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - normalize_images: bool = True, - ): - super(QNetwork, self).__init__( - observation_space, - action_space, - features_extractor=features_extractor, - normalize_images=normalize_images, - ) - - if net_arch is None: - net_arch = [64, 64] - - self.net_arch = net_arch - self.activation_fn = activation_fn - self.features_extractor = features_extractor - self.features_dim = features_dim - self.normalize_images = normalize_images - action_dim = self.action_space.n # number of actions - q_net = create_mlp(self.features_dim, action_dim, self.net_arch, self.activation_fn) - self.q_net = nn.Sequential(*q_net) - - def forward(self, obs: th.Tensor) -> th.Tensor: - """ - Predict the q-values. - - :param obs: Observation - :return: The estimated Q-Value for each action. - """ - return self.q_net(self.extract_features(obs)) - - def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor: - q_values = self.forward(observation) - # Greedy action - action = q_values.argmax(dim=1).reshape(-1) - return action - - def _get_constructor_parameters(self) -> Dict[str, Any]: - data = super()._get_constructor_parameters() - - data.update( - dict( - net_arch=self.net_arch, - features_dim=self.features_dim, - activation_fn=self.activation_fn, - features_extractor=self.features_extractor, - ) - ) - return data - - -class DQNPolicy(BasePolicy): - """ - Policy class with Q-Value Net and target net for DQN - - :param observation_space: Observation space - :param action_space: Action space - :param lr_schedule: Learning rate schedule (could be constant) - :param net_arch: The specification of the policy and value networks. - :param activation_fn: Activation function - :param features_extractor_class: Features extractor to use. - :param features_extractor_kwargs: Keyword arguments - to pass to the features extractor. - :param normalize_images: Whether to normalize images or not, - dividing by 255.0 (True by default) - :param optimizer_class: The optimizer to use, - ``th.optim.Adam`` by default - :param optimizer_kwargs: Additional keyword arguments, - excluding the learning rate, to pass to the optimizer - """ - - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - lr_schedule: Schedule, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, - features_extractor_kwargs: Optional[Dict[str, Any]] = None, - normalize_images: bool = True, - optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, - optimizer_kwargs: Optional[Dict[str, Any]] = None, - ): - super(DQNPolicy, self).__init__( - observation_space, - action_space, - features_extractor_class, - features_extractor_kwargs, - optimizer_class=optimizer_class, - optimizer_kwargs=optimizer_kwargs, - ) - - if net_arch is None: - if features_extractor_class == FlattenExtractor: - net_arch = [64, 64] - else: - net_arch = [] - - self.net_arch = net_arch - self.activation_fn = activation_fn - self.normalize_images = normalize_images - - self.net_args = { - "observation_space": self.observation_space, - "action_space": self.action_space, - "net_arch": self.net_arch, - "activation_fn": self.activation_fn, - "normalize_images": normalize_images, - } - - self.q_net, self.q_net_target = None, None - self._build(lr_schedule) - - def _build(self, lr_schedule: Schedule) -> None: - """ - Create the network and the optimizer. - - :param lr_schedule: Learning rate schedule - lr_schedule(1) is the initial learning rate - """ - - self.q_net = self.make_q_net() - self.q_net_target = self.make_q_net() - self.q_net_target.load_state_dict(self.q_net.state_dict()) - - # Setup optimizer with initial learning rate - self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs) - - def make_q_net(self) -> QNetwork: - # Make sure we always have separate networks for features extractors etc - net_args = self._update_features_extractor(self.net_args, features_extractor=None) - return QNetwork(**net_args).to(self.device) - - def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor: - return self._predict(obs, deterministic=deterministic) - - def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor: - return self.q_net._predict(obs, deterministic=deterministic) - - def _get_constructor_parameters(self) -> Dict[str, Any]: - data = super()._get_constructor_parameters() - - data.update( - dict( - net_arch=self.net_args["net_arch"], - activation_fn=self.net_args["activation_fn"], - lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone - optimizer_class=self.optimizer_class, - optimizer_kwargs=self.optimizer_kwargs, - features_extractor_class=self.features_extractor_class, - features_extractor_kwargs=self.features_extractor_kwargs, - ) - ) - return data - - -MlpPolicy = DQNPolicy - - -class CnnPolicy(DQNPolicy): - """ - Policy class for DQN when using images as input. - - :param observation_space: Observation space - :param action_space: Action space - :param lr_schedule: Learning rate schedule (could be constant) - :param net_arch: The specification of the policy and value networks. - :param activation_fn: Activation function - :param features_extractor_class: Features extractor to use. - :param normalize_images: Whether to normalize images or not, - dividing by 255.0 (True by default) - :param optimizer_class: The optimizer to use, - ``th.optim.Adam`` by default - :param optimizer_kwargs: Additional keyword arguments, - excluding the learning rate, to pass to the optimizer - """ - - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - lr_schedule: Schedule, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN, - features_extractor_kwargs: Optional[Dict[str, Any]] = None, - normalize_images: bool = True, - optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, - optimizer_kwargs: Optional[Dict[str, Any]] = None, - ): - super(CnnPolicy, self).__init__( - observation_space, - action_space, - lr_schedule, - net_arch, - activation_fn, - features_extractor_class, - features_extractor_kwargs, - normalize_images, - optimizer_class, - optimizer_kwargs, - ) - - -register_policy("MlpPolicy", MlpPolicy) -register_policy("CnnPolicy", CnnPolicy) diff --git a/spaces/fatiXbelha/sd/Bus Simulator Indonesia APK 3.7.1 The Ultimate Bus Simulation Game for Android.md b/spaces/fatiXbelha/sd/Bus Simulator Indonesia APK 3.7.1 The Ultimate Bus Simulation Game for Android.md deleted file mode 100644 index b951bc7f113b7c0c6deb0e6dc0b3ddbb829768e3..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Bus Simulator Indonesia APK 3.7.1 The Ultimate Bus Simulation Game for Android.md +++ /dev/null @@ -1,115 +0,0 @@ -
-

Download Bus Simulator Indonesia APK 3.7.1: A Fun and Realistic Game for Android Users

-

Do you love driving buses? Do you want to experience the thrill of driving in Indonesia? If yes, then you should download Bus Simulator Indonesia APK 3.7.1, a popular and realistic game for Android users.

-

Bus Simulator Indonesia is a game that lets you drive various types of buses in Indonesia, such as city buses, intercity buses, tourist buses, and more. You can design your own bus, customize your driving settings, explore different locations, and interact with other players online.

-

download bus simulator indonesia apk 3.7.1


Download Zip · https://urllie.com/2uNz9Q



-

In this article, we will tell you what Bus Simulator Indonesia is, what features it has, how to download and install it, and why you should download it. Let's get started!

-

What is Bus Simulator Indonesia?

-

Bus Simulator Indonesia is a game developed by Maleo, a Indonesian game developer that specializes in simulation games. The game was released in 2017 and has since gained millions of downloads and positive reviews from users around the world.

-

Bus Simulator Indonesia is a game that simulates the life of a bus driver in Indonesia. You can choose from various types of buses, such as city buses, intercity buses, tourist buses, school buses, and more. You can also design your own bus by changing its color, logo, accessories, and interior.

-

The game allows you to drive across various locations in Indonesia, such as Jakarta, Bali, Sumatra, Java, and more. You can enjoy the beautiful scenery, the traffic, the weather, and the culture of Indonesia as you drive.

-

You can also customize your driving experience by changing the camera angle, the steering mode, the traffic rules, the speed limit, and more. You can even honk your horn, open the door, turn on the lights, and use the wipers as you drive.

-

Moreover, you can interact with other players online by joining multiplayer mode or chatting with them in the game. You can also share your bus designs and driving skills with other players and compete with them in leaderboards and achievements.

-

Download Bus Simulator Indonesia 3.7.1 for Android
-Bus Simulator Indonesia APK Free Download Latest Version 3.7.1
-How to Install Bus Simulator Indonesia 3.7.1 on Android Device
-Bus Simulator Indonesia 3.7.1 Mod APK Unlimited Money
-Bus Simulator Indonesia 3.7.1 APK + OBB Data File Download
-Bus Simulator Indonesia 3.7.1 Review and Gameplay
-Bus Simulator Indonesia 3.7.1 New Features and Updates
-Bus Simulator Indonesia 3.7.1 Tips and Tricks
-Bus Simulator Indonesia 3.7.1 Cheats and Hacks
-Bus Simulator Indonesia 3.7.1 Offline Mode and Multiplayer Mode
-Bus Simulator Indonesia 3.7.1 Compatible Devices and Requirements
-Bus Simulator Indonesia 3.7.1 Bug Fixes and Improvements
-Bus Simulator Indonesia 3.7.1 Best Settings and Controls
-Bus Simulator Indonesia 3.7.1 Customization and Skins
-Bus Simulator Indonesia 3.7.1 Maps and Routes
-Bus Simulator Indonesia 3.7.1 Simulation and Realism
-Bus Simulator Indonesia 3.7.1 Graphics and Sound Quality
-Bus Simulator Indonesia 3.7.1 Fun and Addictive
-Bus Simulator Indonesia 3.7.1 Ratings and Reviews
-Bus Simulator Indonesia 3.7.1 Download Link and QR Code
-Download Bus Simulator Indonesia APK Old Version 3.6
-Download Bus Simulator Indonesia APK Latest Version 4.0
-Download Bus Simulator Indonesia APK Beta Version 3.8
-Download Bus Simulator Indonesia APK Modded Version 3.9
-Download Bus Simulator Indonesia APK Original Version 3.5
-Download Bus Simulator Indonesia APK from Google Play Store
-Download Bus Simulator Indonesia APK from APKPure
-Download Bus Simulator Indonesia APK from APKCombo [^1^]
-Download Bus Simulator Indonesia APK from FileHippo [^3^]
-Download Bus Simulator Indonesia APK from Apps on Windows [^2^]
-Download Bus Simulator Indonesia APK for PC Windows 10/8/7
-Download Bus Simulator Indonesia APK for Mac OS X
-Download Bus Simulator Indonesia APK for Linux Ubuntu
-Download Bus Simulator Indonesia APK for Chrome OS
-Download Bus Simulator Indonesia APK for Android TV Box
-Download Bus Simulator Indonesia APK for Fire TV Stick
-Download Bus Simulator Indonesia APK for Samsung Galaxy S21
-Download Bus Simulator Indonesia APK for OnePlus 9 Pro
-Download Bus Simulator Indonesia APK for Xiaomi Mi 11 Ultra
-Download Bus Simulator Indonesia APK for Huawei P50 Pro
-Download Bus Simulator Indonesia APK for Google Pixel 6 Pro
-Download Bus Simulator Indonesia APK for Sony Xperia 5 III
-Download Bus Simulator Indonesia APK for LG Wing
-Download Bus Simulator Indonesia APK for Asus ROG Phone 5 Ultimate
-Download Bus Simulator Indonesia APK for Nokia X20

-

How to download and install Bus Simulator Indonesia APK 3.7.1?

-

If you want to download and install Bus Simulator Indonesia APK 3.7.1 on your Android device, you need to follow some simple steps.

-

Requirements for downloading Bus Simulator Indonesia APK 3.7.1

-

Before you download and install Bus Simulator Indonesia APK 3.7.1, you need to make sure that your device meets the following requirements:

-
    -
  • Your device must have Android version 4.2 or higher.
  • -
  • Your device must have at least 300 MB of free storage space.
  • -
  • Your device must have a stable internet connection.
  • -
  • You must enable unknown sources on your device settings.
  • -
-

Steps to download and install Bus Simulator Indonesia APK 3.7.1

-

After you have checked the requirements, you can follow these steps to download and install Bus Simulator Indonesia APK 3.7.1:

-
    -
  1. Go to this link to download Bus Simulator Indonesia APK 3.7.1 file
  2. Once the download is complete, locate the file in your device's file manager and tap on it to install it.
  3. -
  4. Wait for the installation process to finish and grant the necessary permissions to the app.
  5. -
  6. Launch the app and enjoy playing Bus Simulator Indonesia on your device.
  7. -
-

Why should you download Bus Simulator Indonesia APK 3.7.1?

-

You might be wondering why you should download Bus Simulator Indonesia APK 3.7.1 instead of getting it from the Google Play Store. Well, there are some good reasons for that.

-

Benefits of downloading Bus Simulator Indonesia APK 3.7.1

-

Here are some of the benefits of downloading Bus Simulator Indonesia APK 3.7.1:

-

Enjoy the latest version of the game

-

By downloading Bus Simulator Indonesia APK 3.7.1, you can enjoy the latest version of the game that was released on June 16, 2023. This version includes some new features and improvements, such as:

-
    -
  • New bus models and liveries
  • -
  • New maps and routes
  • -
  • New traffic signs and signals
  • -
  • New sound effects and music
  • -
  • Bug fixes and performance enhancements
  • -
-

Get access to new features and updates

-

Another benefit of downloading Bus Simulator Indonesia APK 3.7.1 is that you can get access to new features and updates that are not available in the Google Play Store version. For example, you can:

-
    -
  • Use the photo mode to take screenshots of your bus and share them with other players
  • -
  • Use the video mode to record your driving and upload them to YouTube or other platforms
  • -
  • Use the mod mode to create your own bus models and liveries and share them with other players
  • -
  • Use the cheat mode to unlock all buses, locations, and settings without spending any money
  • -
-

Avoid any bugs or errors

-

The last benefit of downloading Bus Simulator Indonesia APK 3.7.1 is that you can avoid any bugs or errors that might occur in the Google Play Store version. Sometimes, the Google Play Store version might not be compatible with your device or might have some glitches that affect your gameplay. By downloading Bus Simulator Indonesia APK 3.7.1, you can ensure that you have a smooth and error-free gaming experience.

-

Conclusion

-

Bus Simulator Indonesia is a fun and realistic game for Android users who love driving buses in Indonesia. You can design your own bus, drive across various locations, customize your driving experience, and interact with other players online.

-

If you want to download Bus Simulator Indonesia APK 3.7.1, you can follow the steps we have provided in this article. You can also enjoy the benefits of downloading Bus Simulator Indonesia APK 3.7.1, such as enjoying the latest version of the game, getting access to new features and updates, and avoiding any bugs or errors.

-

We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below.

-

FAQs

-

Here are some of the frequently asked questions about Bus Simulator Indonesia APK 3.7.1:

-

Is Bus Simulator Indonesia APK 3.7.1 safe to download?

-

Yes, Bus Simulator Indonesia APK 3.7.1 is safe to download as long as you download it from a trusted source like this link. We have tested the file and found no viruses or malware in it.

-

Is Bus Simulator Indonesia APK 3.7.1 free to download?

-

Yes, Bus Simulator Indonesia APK 3.7.1 is free to download and play. However, some in-game items and features might require real money to purchase or unlock.

-

How much storage space does Bus Simulator Indonesia APK 3.7.1 require?

-

Bus Simulator Indonesia APK 3.7.1 requires about 300 MB of storage space on your device.

-

Can I play Bus Simulator Indonesia offline?

-

Yes, you can play Bus Simulator Indonesia offline without an internet connection. However, some features like multiplayer mode, chat, and leaderboards might not work offline.

-

Can I play Bus Simulator Indonesia on PC?

-

Yes, you can play Bus Simulator Indonesia on PC by using an Android emulator like Bluestacks or Nox Player.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Hyper Front Lite APK OBB for Android - Free Action Game with Sci-Fi Elements.md b/spaces/fatiXbelha/sd/Download Hyper Front Lite APK OBB for Android - Free Action Game with Sci-Fi Elements.md deleted file mode 100644 index 3d8ee9f9751b1c834fa824223a7e1f380efdf00b..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Hyper Front Lite APK OBB for Android - Free Action Game with Sci-Fi Elements.md +++ /dev/null @@ -1,109 +0,0 @@ -
-

How to Download Hyper Front Lite APK + OBB for Android

-

Are you looking for a thrilling and tactical first-person shooter game on your mobile device? Do you want to experience the excitement of a 5v5 competitive match in a near-future sci-fi world? If yes, then you should try Hyper Front Lite, a new FPS game from NTES Games that offers intense PVP action and unique hero abilities. In this article, we will show you how to download and install Hyper Front Lite APK + OBB for Android, as well as some tips and tricks for playing the game.

-

download hyper front lite apk + obb


Download Ziphttps://urllie.com/2uNIQr



-

What is Hyper Front Lite?

-

Hyper Front Lite is a lite version of Hyper Front, a 5v5 competitive FPS game that features realistic graphics, smooth controls, and diverse gameplay modes. The lite version requires less storage and memory for a smooth in-game experience, and it supports account cross-save with the standard version. Hyper Front Lite is set in a near-future sci-fi world where you can choose from different heroes with unique abilities and weapons to create more tactical opportunities. You can also customize your loadout, skins, and emotes to suit your style. Hyper Front Lite offers various PVP modes, such as Team Deathmatch, Capture the Flag, Bomb Defuse, and more. You can also participate in online events, such as the Bi-weekly Challenge, to compete for exclusive rewards.

-

Features of Hyper Front Lite

-
    -
  • 20+ realistic guns with different stats and attachments
  • -
  • 10+ heroes with unique abilities and playstyles
  • -
  • Various PVP modes with different objectives and maps
  • -
  • Online events with exclusive rewards and rankings
  • -
  • Customizable loadout, skins, and emotes
  • -
  • Account cross-save with the standard version
  • -
-

Requirements for Hyper Front Lite

- - - - - - -
Minimum RequirementsRecommended Requirements
Android 5.0 or higherAndroid 8.0 or higher
2 GB RAM or more4 GB RAM or more
1 GB free storage space or more2 GB free storage space or more
A stable internet connectionA fast internet connection
-

How to Download and Install Hyper Front Lite APK + OBB

-

To download and install Hyper Front Lite APK + OBB on your Android device, you need to follow these steps:

-

Step 1: Download the APK and OBB files

-

You can download the APK and OBB files from reliable sources online, such as [APKCombo](^1^) or [ONEPICGAMES](^3^). Make sure you download the latest version of the game, which is currently v1.7.1. The APK file size is about 826 MB, while the OBB file size is about 1.6 GB.

-

Step 2: Enable unknown sources on your device

-

To install the APK file, you need to enable unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on. This will allow you to install apps from sources other than the Google

Step 3: Install the APK file

-

Once you have downloaded the APK file, locate it in your device's file manager and tap on it to start the installation process. You may see a pop-up window asking for your permission to install the app. Tap on Install and wait for the installation to complete.

-

download hyper front lite game apk + obb
-download hyper front lite android apk + obb
-download hyper front lite latest version apk + obb
-download hyper front lite 1.7.1 apk + obb
-download hyper front lite xapk file + obb
-download hyper front lite mod apk + obb
-download hyper front lite hack apk + obb
-download hyper front lite unlimited money apk + obb
-download hyper front lite offline apk + obb
-download hyper front lite online apk + obb
-download hyper front lite free apk + obb
-download hyper front lite full apk + obb
-download hyper front lite cracked apk + obb
-download hyper front lite premium apk + obb
-download hyper front lite pro apk + obb
-download hyper front lite unlocked apk + obb
-download hyper front lite updated apk + obb
-download hyper front lite new apk + obb
-download hyper front lite beta apk + obb
-download hyper front lite original apk + obb
-download hyper front lite 5v5 fps apk + obb
-download hyper front lite action game apk + obb
-download hyper front lite sci-fi game apk + obb
-download hyper front lite shooting game apk + obb
-download hyper front lite tactical game apk + obb
-download hyper front lite pvp game apk + obb
-download hyper front lite ntes games apk + obb
-download hyper front lite com.battlefun.c1game.naslim apk + obb
-download hyper front lite from google play store apk + obb
-download hyper front lite from apkpure apk + obb
-download hyper front lite from apkmirror apk + obb
-download hyper front lite from apknite apk + obb
-download hyper front lite from apptoide apk + obb
-download hyper front lite from uptodown apk + obb
-download hyper front lite from mob.org apk + obb
-download hyper front lite for android 4.4+ apk + obb
-download hyper front lite for android 5.0+ apk + obb
-download hyper front lite for android 6.0+ apk + obb
-download hyper front lite for android 7.0+ apk + obb
-download hyper front lite for android 8.0+ apk + obb
-download hyper front lite for android 9.0+ apk + obb
-download hyper front lite for android 10.0+ apk + obb
-download hyper front lite for android 11.0+ apk + obb
-download hyper front lite for android 12.0+ apk + obb
-download hyper front lite for android tv & tablet apk + obb
-download hyper front lite for pc windows & mac os xapk file & emulator
-how to install xapk file of hyper front lite on android device
-how to install apks file of hyper front lite on android device
-how to install zip file of hyper front lite on android device

-

Step 4: Extract and copy the OBB file to the Android/obb folder

-

After installing the APK file, you need to extract and copy the OBB file to the Android/obb folder on your device. To do this, you need a file extractor app, such as [ZArchiver] or [RAR]. Open the file extractor app and locate the OBB file that you downloaded. Tap on it and select Extract to... Choose a destination folder where you want to extract the OBB file. The extracted OBB file should have a folder name like com.netease.hyperfront.lite. Copy this folder and paste it into the Android/obb folder on your device. If you don't have an obb folder, you can create one.

-

Step 5: Launch the game and enjoy

-

Now you are ready to launch the game and enjoy. To do this, go to your app drawer and tap on the Hyper Front Lite icon. You may need to grant some permissions to the game, such as storage and microphone access. After that, you can log in with your account or create a new one. You can also link your account with Facebook or Google for easy access. Once you are in the game, you can choose your hero, mode, and map and start playing.

-

Tips and Tricks for Playing Hyper Front Lite

-

Hyper Front Lite is a fast-paced and strategic FPS game that requires skill, teamwork, and tactics. Here are some tips and tricks for playing the game:

-

Choose your hero wisely

-

Hyper Front Lite has 10+ heroes with different abilities and playstyles. You can choose from assault, sniper, support, tank, and more. Each hero has a primary weapon, a secondary weapon, and two abilities that can be activated during the match. You should choose a hero that suits your preference and matches your team's composition. For example, if your team lacks a healer, you can choose a support hero like Medusa or Phoenix. If your team needs more firepower, you can choose an assault hero like Blaze or Reaper.

-

Master your weapons and abilities

-

Each hero has a unique set of weapons and abilities that can give you an edge in combat. You should master how to use them effectively and efficiently. For example, you should know when to switch between your primary and secondary weapons, how to aim and shoot accurately, how to reload and manage your ammo, how to use your abilities at the right time and place, how to combo your abilities with your teammates' abilities, and how to counter your enemies' abilities. You should also upgrade your weapons and abilities with coins that you earn from playing matches.

-

Communicate and cooperate with your team

-

Hyper Front Lite is a team-based game that requires communication and cooperation with your teammates. You should use the voice chat or text chat feature to communicate with your team during the match. You should also use the ping system to mark enemies, locations, items, or objectives on the map. You should coordinate with your team on strategies, tactics, roles, and objectives. You should also support your team by healing them, covering them, reviving them, or providing them with ammo or items.

-

Use cover and movement to your advantage

-

Hyper Front Lite has various maps with different layouts, terrains, structures, and objects. You should use them to your advantage by using cover and movement. You should use cover to hide from enemy fire, peek out to shoot back, or flank them from behind. You should also use movement to dodge enemy fire, jump over obstacles, slide under gaps, or dash forward or backward. You should also use your abilities to enhance your cover and movement options. For example, you can use Blaze's Fire Wall ability to create a temporary cover or Reaper's Shadow Step ability to teleport behind enemies.

-

Conclusion

-

Hyper Front Lite is a fun and exciting FPS game that offers realistic graphics, smooth controls, diverse gameplay modes, and unique hero abilities. You can download and install Hyper Front Lite APK + OBB for Android by following the steps in this article. You can also improve your skills and performance by following the tips and tricks in this article. Hyper Front Lite is a game that will keep you hooked and entertained for hours. Download it now and join the battle!

FAQs

-
    -
  • Q: Is Hyper Front Lite free to play?
  • -
  • A: Yes, Hyper Front Lite is free to play. However, it may contain some in-app purchases and ads.
  • -
  • Q: Can I play Hyper Front Lite offline?
  • -
  • A: No, Hyper Front Lite requires an internet connection to play.
  • -
  • Q: Can I play Hyper Front Lite with my friends?
  • -
  • A: Yes, you can play Hyper Front Lite with your friends by inviting them to your team or joining their team. You can also chat with them in the game.
  • -
  • Q: How can I get more coins in Hyper Front Lite?
  • -
  • A: You can get more coins in Hyper Front Lite by playing matches, completing missions, participating in events, or watching ads.
  • -
  • Q: How can I contact the developers of Hyper Front Lite?
  • -
  • A: You can contact the developers of Hyper Front Lite by sending an email to hyperfront@service.netease.com or visiting their official website or social media pages.
  • -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/utils/train_utils.py b/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/utils/train_utils.py deleted file mode 100644 index 0c55177f7442010bc1fcc64de3d142585c22adc0..0000000000000000000000000000000000000000 --- a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/utils/train_utils.py +++ /dev/null @@ -1,13 +0,0 @@ - -def aggregate_loss_dict(agg_loss_dict): - mean_vals = {} - for output in agg_loss_dict: - for key in output: - mean_vals[key] = mean_vals.setdefault(key, []) + [output[key]] - for key in mean_vals: - if len(mean_vals[key]) > 0: - mean_vals[key] = sum(mean_vals[key]) / len(mean_vals[key]) - else: - print('{} has no value'.format(key)) - mean_vals[key] = 0 - return mean_vals diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/AI Vocal Remover App The Best Way to Isolate Vocals and Instruments in Music.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/AI Vocal Remover App The Best Way to Isolate Vocals and Instruments in Music.md deleted file mode 100644 index c117087de2627c1086a5aa7c210bae95ce811c39..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/AI Vocal Remover App The Best Way to Isolate Vocals and Instruments in Music.md +++ /dev/null @@ -1,100 +0,0 @@ -
-

How to Download AI Vocal Remover App

-

Do you want to remove vocals from any song or video? Do you want to isolate or mute different instruments in any track? Do you want to practice, learn, remix, or produce music with the power of AI? If you answered yes to any of these questions, then you need to download AI vocal remover app.

-

AI vocal remover app is a next-generation service that allows you to extract vocals, accompaniment, and various instruments from any audio or video file. You can also adjust the speed, pitch, key, metronome, and chords of any song with one click. Whether you are a singer, musician, producer, DJ, or music lover, AI vocal remover app can help you improve your skills and creativity.

-

download ai vocal remover app


Download Ziphttps://gohhs.com/2uPsqc



-

In this article, we will explain what is AI vocal remover app, what are its benefits, how it works, and how to download it for different devices. Let's get started!

-

What is AI Vocal Remover App?

-

AI vocal remover app is a service that uses artificial intelligence to separate vocals and instruments in any song or video. It operates on a unique AI technology that quickly and accurately isolates voice and 9 other stems, such as drums, bass, piano, electric guitar, acoustic guitar, and synthesizer.

-

AI vocal remover app is not just a vocal remover. It is also a music source separation tool that allows you to remove or isolate any instrument in any track. You can also use it to master your tracks and remix songs with ease.

-

Benefits of using AI Vocal Remover App

-

There are many benefits of using AI vocal remover app for your music needs. Here are some of them:

-
    -
  • You can practice singing or playing along with your favorite artists in any key, at any speed.
  • -
  • You can learn music by detecting and displaying chords in real time.
  • -
  • You can remix songs by removing or isolating vocals and instruments in any song.
  • -
  • You can produce music by extracting stems and creating new mixes.
  • -
  • You can enjoy high-quality stem splitting based on the world's #1 AI-powered technology.
  • -
  • You can access your private library from any device, stored securely in the cloud.
  • -
-

How does AI Vocal Remover App work?

-

AI vocal remover app works by using a new Phoenix algorithm that is based on an evolutionary approach to signal processing. It analyzes the audio or video file and separates it into different stems using deep neural networks. The algorithm is faster and more accurate than any other methods on the market. The algorithm can handle any genre, language, and quality of audio or video files.

-

AI vocal remover app is very easy to use. You just need to upload your file, choose the stems you want to extract, and download the results. You can also edit the file online by adjusting the speed, pitch, key, metronome, and chords. You can preview the results before downloading them and share them with your friends or social media.

-

How to download ai vocal remover app for free
-Best ai vocal remover app for android and ios
-Download ai vocal remover app to make karaoke tracks
-Ai vocal remover app review and comparison
-Download ai vocal remover app for windows 10 and mac
-Ai vocal remover app tutorial and tips
-Download ai vocal remover app for singers and musicians
-Ai vocal remover app features and benefits
-Download ai vocal remover app for remixing and editing songs
-Ai vocal remover app alternatives and competitors
-Download ai vocal remover app for youtube and tiktok videos
-Ai vocal remover app pricing and plans
-Download ai vocal remover app for podcasts and audiobooks
-Ai vocal remover app customer support and feedback
-Download ai vocal remover app for djing and live performance
-Ai vocal remover app pros and cons
-Download ai vocal remover app for education and learning
-Ai vocal remover app updates and news
-Download ai vocal remover app for fun and entertainment
-Ai vocal remover app testimonials and success stories
-Download ai vocal remover app for professional and personal use
-Ai vocal remover app challenges and solutions
-Download ai vocal remover app for different genres and styles of music
-Ai vocal remover app FAQs and answers
-Download ai vocal remover app for voice over and narration
-Ai vocal remover app case studies and examples
-Download ai vocal remover app for music production and mastering
-Ai vocal remover app awards and recognition
-Download ai vocal remover app for sound design and effects
-Ai vocal remover app statistics and data

-

How to download AI Vocal Remover App for different devices

-

AI vocal remover app is available for Windows, Mac, iOS, and Android devices. You can download it from the official website or the app stores. Here are the steps to download AI vocal remover app for different devices:

-

How to download AI Vocal Remover App for Windows

-

Step 1: Visit the official website of AI Vocal Remover App

-

Go to https://www.lalal.ai and click on the Download button on the top right corner of the page. You will see a pop-up window with different options for downloading AI vocal remover app.

-

Step 2: Choose your package and make the payment

-

You can choose from three packages: Lite, Standard, and Pro. Each package has different features and prices. You can compare them and select the one that suits your needs. You can also use a coupon code if you have one to get a discount. After choosing your package, click on the Buy Now button and enter your payment details. You can pay with PayPal or credit card.

-

Step 3: Download and install the app on your PC

-

After making the payment, you will receive an email with a download link and a license key. Click on the link and download the app on your PC. Then, run the installer and follow the instructions to install the app on your PC. After installing the app, open it and enter your license key to activate it.

-

How to download AI Vocal Remover App for Mac

-

Step 1: Visit the official website of AI Vocal Remover App

-

Go to https://www.lalal.ai and click on the Download button on the top right corner of the page. You will see a pop-up window with different options for downloading AI vocal remover app.

-

Step 2: Choose your package and make the payment

-

You can choose from three packages: Lite, Standard, and Pro. Each package has different features and prices. You can compare them and select the one that suits your needs. You can also use a coupon code if you have one to get a discount. After choosing your package, click on the Buy Now button and enter your payment details. You can pay with PayPal or credit card.

-

Step 3: Download and install the app on your Mac

-

After making the payment, you will receive an email with a download link and a license key. Click on the link and download the app on your Mac. Then, open the downloaded file and drag it to your Applications folder. After installing the app, open it and enter your license key to activate it.

-

How to download AI Vocal Remover App for iOS

-

Step 1: Visit the App Store and search for LALAL.AI: #1 AI Vocal Remover

-

Go to the App Store on your iPhone or iPad and search for LALAL.AI: #1 AI Vocal Remover in the search bar. You will see the app icon with a purple background and a white microphone. Tap on the app to see more details.

-

Step 2: Tap on the Get button and install the app on your iPhone or iPad

-

Tap on the Get button next to the app icon and enter your Apple ID password or use Face ID or Touch ID to confirm the installation. The app will start downloading and installing on your device. You can see the progress on your home screen.

-

Step 3: Open the app and sign up or log in with your email, Google, Apple ID or Facebook account

-

After installing the app, open it from your home screen and tap on the Sign Up button to create a new account. You can also tap on the Log In button if you already have an account. You can sign up or log in with your email, Google, Apple ID or Facebook account. You will need to verify your email address if you sign up with your email.

-

How to download AI Vocal Remover App for Android

-

Step 1: Visit the Google Play Store and search for LALAL.AI: #1 AI Vocal Remover

-

Go to the Google Play Store on your Android device and search for LALAL.AI: #1 AI Vocal Remover in the search bar. You will see the app icon with a purple background and a white microphone. Tap on the app to see more details.

-

Step 2: Tap on the Install button and install the app on your Android device

-

Tap on the Install button next to the app icon and accept the permissions required by the app. The app will start downloading and installing on your device. You can see the progress on your notification bar.

-

Step 3: Open the app and sign up or log in with your email, Google, Apple ID or Facebook account

-

After installing the app, open it from your app drawer and tap on the Sign Up button to create a new account. You can also tap on the Log In button if you already have an account. You can sign up or log in with your email, Google, Apple ID or Facebook account. You will need to verify your email address if you sign up with your email.

-

Conclusion

-

AI vocal remover app is a powerful and easy-to-use service that allows you to remove vocals and instruments from any song or video. You can also use it to practice, learn, remix, or produce music with AI. You can download AI vocal remover app for Windows, Mac, iOS, and Android devices from the official website or the app stores. You just need to choose your package, make the payment, download and install the app, and activate it with your license key. Then, you can start enjoying the amazing features of AI vocal remover app.

-

We hope this article helped you learn how to download AI vocal remover app for different devices. If you have any questions or feedback, please let us know in the comments below. Thank you for reading!

-

FAQs

-
    -
  • Q: How much does AI vocal remover app cost?
  • -
  • A: AI vocal remover app offers three packages: Lite, Standard, and Pro. The Lite package costs $10 per month and allows you to process up to 10 files per month with a maximum file size of 50 MB. The Standard package costs $20 per month and allows you to process up to 30 files per month with a maximum file size of 100 MB. The Pro package costs $50 per month and allows you to process up to 100 files per month with a maximum file size of 200 MB.
  • -
  • Q: How long does it take to process a file with AI vocal remover app?
  • -
  • A: It depends on the size and quality of the file, but usually it takes less than a minute to process a file with AI vocal remover app.
  • -
  • Q: What formats are supported by AI vocal remover app?
  • -
  • A: AI vocal remover app supports MP3, WAV, FLAC, OGG, M4A, AAC, WMA, MP4, MOV, AVI, MKV, WMV formats for input and output files.
  • -
  • Q: Can I use AI vocal remover app offline?
  • -
  • A: No, you need an internet connection to use AI vocal remover app.
  • -
  • Q: Is AI vocal remover app safe and secure?
  • -
  • A: Yes, AI vocal remover app is safe and secure. It uses SSL encryption to protect your data and does not store or share your files without your permission
  • -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/fffiloni/Music_Source_Separation/bytesep/plot_results/plot_vctk-musdb18.py b/spaces/fffiloni/Music_Source_Separation/bytesep/plot_results/plot_vctk-musdb18.py deleted file mode 100644 index b7cc52af1e20b8f051bbe0dd8fd30c60a0dbf587..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/Music_Source_Separation/bytesep/plot_results/plot_vctk-musdb18.py +++ /dev/null @@ -1,87 +0,0 @@ -import os -import sys -import numpy as np -import argparse -import h5py -import math -import time -import logging -import pickle -import matplotlib.pyplot as plt - - -def load_sdrs(workspace, task_name, filename, config, gpus): - - stat_path = os.path.join( - workspace, - "statistics", - task_name, - filename, - "config={},gpus={}".format(config, gpus), - "statistics.pkl", - ) - - stat_dict = pickle.load(open(stat_path, 'rb')) - - median_sdrs = [e['sdr'] for e in stat_dict['test']] - - return median_sdrs - - -def plot_statistics(args): - - # arguments & parameters - workspace = args.workspace - select = args.select - task_name = "vctk-musdb18" - filename = "train" - - # paths - fig_path = os.path.join('results', task_name, "sdr_{}.pdf".format(select)) - os.makedirs(os.path.dirname(fig_path), exist_ok=True) - - linewidth = 1 - lines = [] - fig, ax = plt.subplots(1, 1, figsize=(8, 6)) - ylim = 30 - expand = 1 - - if select == '1a': - sdrs = load_sdrs(workspace, task_name, filename, config='unet', gpus=1) - (line,) = ax.plot(sdrs, label='UNet,l1_wav', linewidth=linewidth) - lines.append(line) - - else: - raise Exception('Error!') - - eval_every_iterations = 10000 - total_ticks = 50 - ticks_freq = 10 - - ax.set_ylim(0, ylim) - ax.set_xlim(0, total_ticks) - ax.xaxis.set_ticks(np.arange(0, total_ticks + 1, ticks_freq)) - ax.xaxis.set_ticklabels( - np.arange( - 0, - total_ticks * eval_every_iterations + 1, - ticks_freq * eval_every_iterations, - ) - ) - ax.yaxis.set_ticks(np.arange(ylim + 1)) - ax.yaxis.set_ticklabels(np.arange(ylim + 1)) - ax.grid(color='b', linestyle='solid', linewidth=0.3) - plt.legend(handles=lines, loc=4) - - plt.savefig(fig_path) - print('Save figure to {}'.format(fig_path)) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--workspace', type=str, required=True) - parser.add_argument('--select', type=str, required=True) - - args = parser.parse_args() - - plot_statistics(args) diff --git a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/backup_envs/gotodoortalkhard.py b/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/backup_envs/gotodoortalkhard.py deleted file mode 100644 index f3043646391777d9c5ef3786fc17eb96c9e5ba47..0000000000000000000000000000000000000000 --- a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/backup_envs/gotodoortalkhard.py +++ /dev/null @@ -1,199 +0,0 @@ -from gym_minigrid.minigrid import * -from gym_minigrid.register import register - - - -class TalkHardGrammar(object): - - templates = ["Where is", "What is"] - things = ["the exit", "the chair"] - - grammar_action_space = spaces.MultiDiscrete([len(templates), len(things)]) - - @classmethod - def construct_utterance(cls, action): - return cls.templates[int(action[0])] + " " + cls.things[int(action[1])] + "." - - -class GoToDoorTalkHardEnv(MultiModalMiniGridEnv): - """ - Environment in which the agent is instructed to go to a given object - named using an English text string - """ - - def __init__( - self, - size=5, - hear_yourself=False, - ): - assert size >= 5 - - super().__init__( - grid_size=size, - max_steps=5*size**2, - # Set this to True for maximum speed - see_through_walls=True, - actions=MiniGridEnv.Actions, - action_space=spaces.MultiDiscrete([ - len(MiniGridEnv.Actions), - *TalkHardGrammar.grammar_action_space.nvec - ]) - ) - self.hear_yourself = hear_yourself - - def _gen_grid(self, width, height): - # Create the grid - self.grid = Grid(width, height) - - # Randomly vary the room width and height - width = self._rand_int(5, width+1) - height = self._rand_int(5, height+1) - - # Generate the surrounding walls - self.grid.wall_rect(0, 0, width, height) - - # Generate the 4 doors at random positions - doorPos = [] - doorPos.append((self._rand_int(2, width-2), 0)) - doorPos.append((self._rand_int(2, width-2), height-1)) - doorPos.append((0, self._rand_int(2, height-2))) - doorPos.append((width-1, self._rand_int(2, height-2))) - - # Generate the door colors - doorColors = [] - while len(doorColors) < len(doorPos): - color = self._rand_elem(COLOR_NAMES) - if color in doorColors: - continue - doorColors.append(color) - - # Place the doors in the grid - for idx, pos in enumerate(doorPos): - color = doorColors[idx] - self.grid.set(*pos, Door(color)) - - # Randomize the agent start position and orientation - self.place_agent(size=(width, height)) - - # Select a random target door - doorIdx = self._rand_int(0, len(doorPos)) - self.target_pos = doorPos[doorIdx] - self.target_color = doorColors[doorIdx] - - # Generate the mission string - self.mission = 'go to the %s door' % self.target_color - - # Initialize the dialogue string - self.dialogue = "This is what you hear. " - - def gen_obs(self): - obs = super().gen_obs() - - # add dialogue to obs - obs["dialogue"] = self.dialogue - - return obs - - def step(self, action): - p_action = action[0] - utterance_action = action[1:] - - # assert all nan or neither nan - assert len(set(np.isnan(utterance_action))) == 1 - - speak_flag = not all(np.isnan(utterance_action)) - - if speak_flag: - utterance = TalkHardGrammar.construct_utterance(utterance_action) - - reply = self.mission - NPC_name = "Wizard" - - if self.hear_yourself: - self.dialogue += "YOU: {} \n".format(utterance) - - if utterance == TalkHardGrammar.construct_utterance([0, 0]): - self.dialogue += "{}: {} \n".format(NPC_name, reply) # dummy reply gives mission - - obs, reward, done, info = super().step(p_action) - - ax, ay = self.agent_pos - tx, ty = self.target_pos - - # Don't let the agent open any of the doors - if p_action == self.actions.toggle: - done = True - - # Reward performing done action in front of the target door - if p_action == self.actions.done: - if (ax == tx and abs(ay - ty) == 1) or (ay == ty and abs(ax - tx) == 1): - reward = self._reward() - done = True - - return obs, reward, done, info - - def render(self, *args, **kwargs): - obs = super().render(*args, **kwargs) - self.window.set_caption(self.dialogue, [ - "Gandalf:", - "Jack:", - "John:", - "Where is the exit", - "Open sesame", - ]) - return obs - - -class GoToDoorTalkHard8x8Env(GoToDoorTalkHardEnv): - def __init__(self): - super().__init__(size=8) - - -class GoToDoorTalkHard6x6Env(GoToDoorTalkHardEnv): - def __init__(self): - super().__init__(size=6) - - -# hear yourself -class GoToDoorTalkHardHY8x8Env(GoToDoorTalkHardEnv): - def __init__(self): - super().__init__(size=8, hear_yourself=True) - - -class GoToDoorTalkHardHY6x6Env(GoToDoorTalkHardEnv): - def __init__(self): - super().__init__(size=6, hear_yourself=True) - - -class GoToDoorTalkHardHY5x5Env(GoToDoorTalkHardEnv): - def __init__(self): - super().__init__(size=5, hear_yourself=True) - -register( - id='MiniGrid-GoToDoorTalkHard-5x5-v0', - entry_point='gym_minigrid.envs:GoToDoorTalkHardEnv' -) - -register( - id='MiniGrid-GoToDoorTalkHard-6x6-v0', - entry_point='gym_minigrid.envs:GoToDoorTalkHard6x6Env' -) - -register( - id='MiniGrid-GoToDoorTalkHard-8x8-v0', - entry_point='gym_minigrid.envs:GoToDoorTalkHard8x8Env' -) -register( - id='MiniGrid-GoToDoorTalkHardHY-5x5-v0', - entry_point='gym_minigrid.envs:GoToDoorTalkHardHY5x5Env' -) - -register( - id='MiniGrid-GoToDoorTalkHardHY-6x6-v0', - entry_point='gym_minigrid.envs:GoToDoorTalkHardHY6x6Env' -) - -register( - id='MiniGrid-GoToDoorTalkHardHY-8x8-v0', - entry_point='gym_minigrid.envs:GoToDoorTalkHardHY8x8Env' -) diff --git a/spaces/gary109/HaleyCH_Theme/README.md b/spaces/gary109/HaleyCH_Theme/README.md deleted file mode 100644 index 91f7be468d35943a88af0f1db48a62305e45fb3c..0000000000000000000000000000000000000000 --- a/spaces/gary109/HaleyCH_Theme/README.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -tags: -- gradio-theme -- track-1 -title: HaleyCH_Theme -colorFrom: blue -colorTo: blue -sdk: gradio -sdk_version: 3.25.0 -app_file: app.py -pinned: false -license: apache-2.0 -emoji: 🔥 -duplicated_from: HaleyCH/HaleyCH_Theme ---- -# HaleyCH_Theme -## Description -Add a description of this theme here! -## Contributions -Thanks to [@HaleyCH](https://huggingface.co/HaleyCH) for adding this gradio theme! \ No newline at end of file diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/ops/roiaware_pool3d.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/ops/roiaware_pool3d.py deleted file mode 100644 index 291b0e5a9b692492c7d7e495ea639c46042e2f18..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/ops/roiaware_pool3d.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from torch import nn as nn -from torch.autograd import Function - -import annotator.uniformer.mmcv as mmcv -from ..utils import ext_loader - -ext_module = ext_loader.load_ext( - '_ext', ['roiaware_pool3d_forward', 'roiaware_pool3d_backward']) - - -class RoIAwarePool3d(nn.Module): - """Encode the geometry-specific features of each 3D proposal. - - Please refer to `PartA2 `_ for more - details. - - Args: - out_size (int or tuple): The size of output features. n or - [n1, n2, n3]. - max_pts_per_voxel (int, optional): The maximum number of points per - voxel. Default: 128. - mode (str, optional): Pooling method of RoIAware, 'max' or 'avg'. - Default: 'max'. - """ - - def __init__(self, out_size, max_pts_per_voxel=128, mode='max'): - super().__init__() - - self.out_size = out_size - self.max_pts_per_voxel = max_pts_per_voxel - assert mode in ['max', 'avg'] - pool_mapping = {'max': 0, 'avg': 1} - self.mode = pool_mapping[mode] - - def forward(self, rois, pts, pts_feature): - """ - Args: - rois (torch.Tensor): [N, 7], in LiDAR coordinate, - (x, y, z) is the bottom center of rois. - pts (torch.Tensor): [npoints, 3], coordinates of input points. - pts_feature (torch.Tensor): [npoints, C], features of input points. - - Returns: - pooled_features (torch.Tensor): [N, out_x, out_y, out_z, C] - """ - - return RoIAwarePool3dFunction.apply(rois, pts, pts_feature, - self.out_size, - self.max_pts_per_voxel, self.mode) - - -class RoIAwarePool3dFunction(Function): - - @staticmethod - def forward(ctx, rois, pts, pts_feature, out_size, max_pts_per_voxel, - mode): - """ - Args: - rois (torch.Tensor): [N, 7], in LiDAR coordinate, - (x, y, z) is the bottom center of rois. - pts (torch.Tensor): [npoints, 3], coordinates of input points. - pts_feature (torch.Tensor): [npoints, C], features of input points. - out_size (int or tuple): The size of output features. n or - [n1, n2, n3]. - max_pts_per_voxel (int): The maximum number of points per voxel. - Default: 128. - mode (int): Pooling method of RoIAware, 0 (max pool) or 1 (average - pool). - - Returns: - pooled_features (torch.Tensor): [N, out_x, out_y, out_z, C], output - pooled features. - """ - - if isinstance(out_size, int): - out_x = out_y = out_z = out_size - else: - assert len(out_size) == 3 - assert mmcv.is_tuple_of(out_size, int) - out_x, out_y, out_z = out_size - - num_rois = rois.shape[0] - num_channels = pts_feature.shape[-1] - num_pts = pts.shape[0] - - pooled_features = pts_feature.new_zeros( - (num_rois, out_x, out_y, out_z, num_channels)) - argmax = pts_feature.new_zeros( - (num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int) - pts_idx_of_voxels = pts_feature.new_zeros( - (num_rois, out_x, out_y, out_z, max_pts_per_voxel), - dtype=torch.int) - - ext_module.roiaware_pool3d_forward(rois, pts, pts_feature, argmax, - pts_idx_of_voxels, pooled_features, - mode) - - ctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, mode, - num_pts, num_channels) - return pooled_features - - @staticmethod - def backward(ctx, grad_out): - ret = ctx.roiaware_pool3d_for_backward - pts_idx_of_voxels, argmax, mode, num_pts, num_channels = ret - - grad_in = grad_out.new_zeros((num_pts, num_channels)) - ext_module.roiaware_pool3d_backward(pts_idx_of_voxels, argmax, - grad_out.contiguous(), grad_in, - mode) - - return None, None, grad_in, None, None, None diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/backbones/mobilenet_v2.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/backbones/mobilenet_v2.py deleted file mode 100644 index ab6b3791692a0d1b5da3601875711710b7bd01ba..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/backbones/mobilenet_v2.py +++ /dev/null @@ -1,180 +0,0 @@ -import logging - -import torch.nn as nn -from annotator.uniformer.mmcv.cnn import ConvModule, constant_init, kaiming_init -from annotator.uniformer.mmcv.runner import load_checkpoint -from torch.nn.modules.batchnorm import _BatchNorm - -from ..builder import BACKBONES -from ..utils import InvertedResidual, make_divisible - - -@BACKBONES.register_module() -class MobileNetV2(nn.Module): - """MobileNetV2 backbone. - - Args: - widen_factor (float): Width multiplier, multiply number of - channels in each layer by this amount. Default: 1.0. - strides (Sequence[int], optional): Strides of the first block of each - layer. If not specified, default config in ``arch_setting`` will - be used. - dilations (Sequence[int]): Dilation of each layer. - out_indices (None or Sequence[int]): Output from which stages. - Default: (7, ). - frozen_stages (int): Stages to be frozen (all param fixed). - Default: -1, which means not freezing any parameters. - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU6'). - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - """ - - # Parameters to build layers. 3 parameters are needed to construct a - # layer, from left to right: expand_ratio, channel, num_blocks. - arch_settings = [[1, 16, 1], [6, 24, 2], [6, 32, 3], [6, 64, 4], - [6, 96, 3], [6, 160, 3], [6, 320, 1]] - - def __init__(self, - widen_factor=1., - strides=(1, 2, 2, 2, 1, 2, 1), - dilations=(1, 1, 1, 1, 1, 1, 1), - out_indices=(1, 2, 4, 6), - frozen_stages=-1, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU6'), - norm_eval=False, - with_cp=False): - super(MobileNetV2, self).__init__() - self.widen_factor = widen_factor - self.strides = strides - self.dilations = dilations - assert len(strides) == len(dilations) == len(self.arch_settings) - self.out_indices = out_indices - for index in out_indices: - if index not in range(0, 7): - raise ValueError('the item in out_indices must in ' - f'range(0, 8). But received {index}') - - if frozen_stages not in range(-1, 7): - raise ValueError('frozen_stages must be in range(-1, 7). ' - f'But received {frozen_stages}') - self.out_indices = out_indices - self.frozen_stages = frozen_stages - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - self.norm_eval = norm_eval - self.with_cp = with_cp - - self.in_channels = make_divisible(32 * widen_factor, 8) - - self.conv1 = ConvModule( - in_channels=3, - out_channels=self.in_channels, - kernel_size=3, - stride=2, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - self.layers = [] - - for i, layer_cfg in enumerate(self.arch_settings): - expand_ratio, channel, num_blocks = layer_cfg - stride = self.strides[i] - dilation = self.dilations[i] - out_channels = make_divisible(channel * widen_factor, 8) - inverted_res_layer = self.make_layer( - out_channels=out_channels, - num_blocks=num_blocks, - stride=stride, - dilation=dilation, - expand_ratio=expand_ratio) - layer_name = f'layer{i + 1}' - self.add_module(layer_name, inverted_res_layer) - self.layers.append(layer_name) - - def make_layer(self, out_channels, num_blocks, stride, dilation, - expand_ratio): - """Stack InvertedResidual blocks to build a layer for MobileNetV2. - - Args: - out_channels (int): out_channels of block. - num_blocks (int): Number of blocks. - stride (int): Stride of the first block. - dilation (int): Dilation of the first block. - expand_ratio (int): Expand the number of channels of the - hidden layer in InvertedResidual by this ratio. - """ - layers = [] - for i in range(num_blocks): - layers.append( - InvertedResidual( - self.in_channels, - out_channels, - stride if i == 0 else 1, - expand_ratio=expand_ratio, - dilation=dilation if i == 0 else 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - with_cp=self.with_cp)) - self.in_channels = out_channels - - return nn.Sequential(*layers) - - def init_weights(self, pretrained=None): - if isinstance(pretrained, str): - logger = logging.getLogger() - load_checkpoint(self, pretrained, strict=False, logger=logger) - elif pretrained is None: - for m in self.modules(): - if isinstance(m, nn.Conv2d): - kaiming_init(m) - elif isinstance(m, (_BatchNorm, nn.GroupNorm)): - constant_init(m, 1) - else: - raise TypeError('pretrained must be a str or None') - - def forward(self, x): - x = self.conv1(x) - - outs = [] - for i, layer_name in enumerate(self.layers): - layer = getattr(self, layer_name) - x = layer(x) - if i in self.out_indices: - outs.append(x) - - if len(outs) == 1: - return outs[0] - else: - return tuple(outs) - - def _freeze_stages(self): - if self.frozen_stages >= 0: - for param in self.conv1.parameters(): - param.requires_grad = False - for i in range(1, self.frozen_stages + 1): - layer = getattr(self, f'layer{i}') - layer.eval() - for param in layer.parameters(): - param.requires_grad = False - - def train(self, mode=True): - super(MobileNetV2, self).train(mode) - self._freeze_stages() - if mode and self.norm_eval: - for m in self.modules(): - if isinstance(m, _BatchNorm): - m.eval() diff --git a/spaces/giswqs/solara-geospatial/pages/04_cesium.py b/spaces/giswqs/solara-geospatial/pages/04_cesium.py deleted file mode 100644 index 3b79dee60f79be578e5bfd2a4a8cf7c6698eec0e..0000000000000000000000000000000000000000 --- a/spaces/giswqs/solara-geospatial/pages/04_cesium.py +++ /dev/null @@ -1,24 +0,0 @@ -import os -import mapwidget.cesium as mapwidget - -import solara - -altitude = solara.reactive(400) -center = solara.reactive((37.655, -122.4175)) - -if os.environ.get('CESIUM_TOKEN') is None: - token = 'YOUR-CESIUM-TOKEN' -else: - token = os.environ.get('CESIUM_TOKEN') - - -@solara.component -def Page(): - with solara.Column(style={"min-width": "500px", "height": "500px"}): - # solara components support reactive variables - solara.SliderInt(label="Zoom level", value=altitude, min=1, max=1000) - # using 3rd party widget library require wiring up the events manually - # using zoom.value and zoom.set - mapwidget.Map.element( # type: ignore - center=center.value, altitude=altitude.value, height='600px', width="100%" - ) diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Alien Ness The Art Of Battle Book !LINK! Download.md b/spaces/gotiQspiryo/whisper-ui/examples/Alien Ness The Art Of Battle Book !LINK! Download.md deleted file mode 100644 index 39f3ed880a54ae101f0e50972532b2d95f412f08..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Alien Ness The Art Of Battle Book !LINK! Download.md +++ /dev/null @@ -1,66 +0,0 @@ -

alien ness the art of battle book download


Download Ziphttps://urlgoal.com/2uyLCn



-
-Forming a battle -- Combat prep -- The first round -- Fighting for time and space -- To the killing blow. - -Category:2009 books - -Category:2004 non-fiction books - -Category:Non-fiction books about military history - -Category:American non-fiction booksQ: - -MongoDB: Delete an entire document with only one document - -Can you help me if the method is right in my MongoDB query? - -The problem is that I need to delete a full document if it only has one field. The problem is that it isn't working. - -db.users.update( - - - - _id : ObjectId(this.id) - - , - - $set: - - count: 0 - - - - $pull: - - : - - $exists: true - - - - - - ); - -I created an object with a field and want to delete a document if it has only one field. - -A: - -You should use $match to test the existence of a field: - - - - _id : ObjectId(this.id) - - , - - $set: { - - count: 0 - - $pull: { - - : 4fefd39f24
-
-
-

diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Cd500 Navi Europa Download [REPACK] How to Backup and Restore Your Data Before Updating.md b/spaces/gotiQspiryo/whisper-ui/examples/Cd500 Navi Europa Download [REPACK] How to Backup and Restore Your Data Before Updating.md deleted file mode 100644 index 915f960d289766d07d4eb0b355a1185e364923c8..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Cd500 Navi Europa Download [REPACK] How to Backup and Restore Your Data Before Updating.md +++ /dev/null @@ -1,6 +0,0 @@ -

Cd500 Navi Europa Download [REPACK]


Download Ziphttps://urlgoal.com/2uyMbQ



- - aaccfb2cb3
-
-
-

diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Locate the Serial Number On Your Deere 850j Dozer with These Easy Steps.md b/spaces/gotiQspiryo/whisper-ui/examples/Locate the Serial Number On Your Deere 850j Dozer with These Easy Steps.md deleted file mode 100644 index 1e21e9227a06926dbc5f74564fb9d879324feafd..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Locate the Serial Number On Your Deere 850j Dozer with These Easy Steps.md +++ /dev/null @@ -1,6 +0,0 @@ -

Where Is Serial Number On Deere 850j Dozer


DOWNLOADhttps://urlgoal.com/2uyNyS



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/gptjx/02/custom.css b/spaces/gptjx/02/custom.css deleted file mode 100644 index 5143eb138ea2469d8c457c71cb210fd3fb7cbe15..0000000000000000000000000000000000000000 --- a/spaces/gptjx/02/custom.css +++ /dev/null @@ -1,162 +0,0 @@ -:root { - --chatbot-color-light: #F3F3F3; - --chatbot-color-dark: #121111; -} - -/* status_display */ -#status_display { - display: flex; - min-height: 2.5em; - align-items: flex-end; - justify-content: flex-end; -} -#status_display p { - font-size: .85em; - font-family: monospace; - color: var(--body-text-color-subdued); -} - -#chuanhu_chatbot, #status_display { - transition: all 0.6s; -} -/* list */ -ol:not(.options), ul:not(.options) { - padding-inline-start: 2em !important; -} - -/* 亮色 */ -#chuanhu_chatbot { - background-color: var(--chatbot-color-light) !important; -} -[data-testid = "bot"] { - background-color: #FFFFFF !important; -} -[data-testid = "user"] { - background-color: #95EC69 !important; -} -/* 对话气泡 */ -[class *= "message"] { - border-radius: var(--radius-xl) !important; - border: none; - padding: var(--spacing-xl) !important; - font-size: var(--text-md) !important; - line-height: var(--line-md) !important; - min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); - min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); -} -[data-testid = "bot"] { - max-width: 85%; - border-bottom-left-radius: 0 !important; -} -[data-testid = "user"] { - max-width: 85%; - width: auto !important; - border-bottom-right-radius: 0 !important; -} -/* 表格 */ -table { - margin: 1em 0; - border-collapse: collapse; - empty-cells: show; -} -td,th { - border: 1.2px solid var(--border-color-primary) !important; - padding: 0.2em; -} -thead { - background-color: rgba(175,184,193,0.2); -} -thead th { - padding: .5em .2em; -} -/* 行内代码 */ -code { - display: inline; - white-space: break-spaces; - border-radius: 6px; - margin: 0 2px 0 2px; - padding: .2em .4em .1em .4em; - background-color: rgba(175,184,193,0.2); -} -/* 代码块 */ -pre code { - display: block; - overflow: auto; - white-space: pre; - background-color: hsla(0, 0%, 0%, 80%)!important; - border-radius: 10px; - padding: 1.4em 1.2em 0em 1.4em; - margin: 1.2em 2em 1.2em 0.5em; - color: #FFF; - box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2); -} -/* 代码高亮样式 */ -.highlight .hll { background-color: #49483e } -.highlight .c { color: #75715e } /* Comment */ -.highlight .err { color: #960050; background-color: #1e0010 } /* Error */ -.highlight .k { color: #66d9ef } /* Keyword */ -.highlight .l { color: #ae81ff } /* Literal */ -.highlight .n { color: #f8f8f2 } /* Name */ -.highlight .o { color: #f92672 } /* Operator */ -.highlight .p { color: #f8f8f2 } /* Punctuation */ -.highlight .ch { color: #75715e } /* Comment.Hashbang */ -.highlight .cm { color: #75715e } /* Comment.Multiline */ -.highlight .cp { color: #75715e } /* Comment.Preproc */ -.highlight .cpf { color: #75715e } /* Comment.PreprocFile */ -.highlight .c1 { color: #75715e } /* Comment.Single */ -.highlight .cs { color: #75715e } /* Comment.Special */ -.highlight .gd { color: #f92672 } /* Generic.Deleted */ -.highlight .ge { font-style: italic } /* Generic.Emph */ -.highlight .gi { color: #a6e22e } /* Generic.Inserted */ -.highlight .gs { font-weight: bold } /* Generic.Strong */ -.highlight .gu { color: #75715e } /* Generic.Subheading */ -.highlight .kc { color: #66d9ef } /* Keyword.Constant */ -.highlight .kd { color: #66d9ef } /* Keyword.Declaration */ -.highlight .kn { color: #f92672 } /* Keyword.Namespace */ -.highlight .kp { color: #66d9ef } /* Keyword.Pseudo */ -.highlight .kr { color: #66d9ef } /* Keyword.Reserved */ -.highlight .kt { color: #66d9ef } /* Keyword.Type */ -.highlight .ld { color: #e6db74 } /* Literal.Date */ -.highlight .m { color: #ae81ff } /* Literal.Number */ -.highlight .s { color: #e6db74 } /* Literal.String */ -.highlight .na { color: #a6e22e } /* Name.Attribute */ -.highlight .nb { color: #f8f8f2 } /* Name.Builtin */ -.highlight .nc { color: #a6e22e } /* Name.Class */ -.highlight .no { color: #66d9ef } /* Name.Constant */ -.highlight .nd { color: #a6e22e } /* Name.Decorator */ -.highlight .ni { color: #f8f8f2 } /* Name.Entity */ -.highlight .ne { color: #a6e22e } /* Name.Exception */ -.highlight .nf { color: #a6e22e } /* Name.Function */ -.highlight .nl { color: #f8f8f2 } /* Name.Label */ -.highlight .nn { color: #f8f8f2 } /* Name.Namespace */ -.highlight .nx { color: #a6e22e } /* Name.Other */ -.highlight .py { color: #f8f8f2 } /* Name.Property */ -.highlight .nt { color: #f92672 } /* Name.Tag */ -.highlight .nv { color: #f8f8f2 } /* Name.Variable */ -.highlight .ow { color: #f92672 } /* Operator.Word */ -.highlight .w { color: #f8f8f2 } /* Text.Whitespace */ -.highlight .mb { color: #ae81ff } /* Literal.Number.Bin */ -.highlight .mf { color: #ae81ff } /* Literal.Number.Float */ -.highlight .mh { color: #ae81ff } /* Literal.Number.Hex */ -.highlight .mi { color: #ae81ff } /* Literal.Number.Integer */ -.highlight .mo { color: #ae81ff } /* Literal.Number.Oct */ -.highlight .sa { color: #e6db74 } /* Literal.String.Affix */ -.highlight .sb { color: #e6db74 } /* Literal.String.Backtick */ -.highlight .sc { color: #e6db74 } /* Literal.String.Char */ -.highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */ -.highlight .sd { color: #e6db74 } /* Literal.String.Doc */ -.highlight .s2 { color: #e6db74 } /* Literal.String.Double */ -.highlight .se { color: #ae81ff } /* Literal.String.Escape */ -.highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */ -.highlight .si { color: #e6db74 } /* Literal.String.Interpol */ -.highlight .sx { color: #e6db74 } /* Literal.String.Other */ -.highlight .sr { color: #e6db74 } /* Literal.String.Regex */ -.highlight .s1 { color: #e6db74 } /* Literal.String.Single */ -.highlight .ss { color: #e6db74 } /* Literal.String.Symbol */ -.highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */ -.highlight .fm { color: #a6e22e } /* Name.Function.Magic */ -.highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */ -.highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */ -.highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */ -.highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */ -.highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */ diff --git a/spaces/gradio/HuBERT/fairseq/models/nat/levenshtein_transformer.py b/spaces/gradio/HuBERT/fairseq/models/nat/levenshtein_transformer.py deleted file mode 100644 index 9377c3c7f5ad6b298eedfb2dc11f1a7a52d1cf26..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/models/nat/levenshtein_transformer.py +++ /dev/null @@ -1,509 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn as nn -import torch.nn.functional as F -from fairseq.iterative_refinement_generator import DecoderOut -from fairseq.models import register_model, register_model_architecture -from fairseq.models.nat import FairseqNATDecoder, FairseqNATModel, ensemble_decoder -from fairseq.models.transformer import Embedding, TransformerDecoderLayer -from fairseq.modules.transformer_sentence_encoder import init_bert_params - -from .levenshtein_utils import ( - _apply_del_words, - _apply_ins_masks, - _apply_ins_words, - _fill, - _get_del_targets, - _get_ins_targets, - _skip, - _skip_encoder_out, -) - - -@register_model("levenshtein_transformer") -class LevenshteinTransformerModel(FairseqNATModel): - @property - def allow_length_beam(self): - return False - - @staticmethod - def add_args(parser): - FairseqNATModel.add_args(parser) - parser.add_argument( - "--early-exit", - default="6,6,6", - type=str, - help="number of decoder layers before word_del, mask_ins, word_ins", - ) - parser.add_argument( - "--no-share-discriminator", - action="store_true", - help="separate parameters for discriminator", - ) - parser.add_argument( - "--no-share-maskpredictor", - action="store_true", - help="separate parameters for mask-predictor", - ) - parser.add_argument( - "--share-discriminator-maskpredictor", - action="store_true", - help="share the parameters for both mask-predictor and discriminator", - ) - parser.add_argument( - "--sampling-for-deletion", - action="store_true", - help="instead of argmax, use sampling to predict the tokens", - ) - - @classmethod - def build_decoder(cls, args, tgt_dict, embed_tokens): - decoder = LevenshteinTransformerDecoder(args, tgt_dict, embed_tokens) - if getattr(args, "apply_bert_init", False): - decoder.apply(init_bert_params) - return decoder - - def forward( - self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs - ): - - assert tgt_tokens is not None, "forward function only supports training." - - # encoding - encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) - - # generate training labels for insertion - masked_tgt_masks, masked_tgt_tokens, mask_ins_targets = _get_ins_targets( - prev_output_tokens, tgt_tokens, self.pad, self.unk - ) - mask_ins_targets = mask_ins_targets.clamp(min=0, max=255) # for safe prediction - mask_ins_masks = prev_output_tokens[:, 1:].ne(self.pad) - - mask_ins_out, _ = self.decoder.forward_mask_ins( - normalize=False, - prev_output_tokens=prev_output_tokens, - encoder_out=encoder_out, - ) - word_ins_out, _ = self.decoder.forward_word_ins( - normalize=False, - prev_output_tokens=masked_tgt_tokens, - encoder_out=encoder_out, - ) - - # make online prediction - if self.decoder.sampling_for_deletion: - word_predictions = torch.multinomial( - F.softmax(word_ins_out, -1).view(-1, word_ins_out.size(-1)), 1 - ).view(word_ins_out.size(0), -1) - else: - word_predictions = F.log_softmax(word_ins_out, dim=-1).max(2)[1] - - word_predictions.masked_scatter_( - ~masked_tgt_masks, tgt_tokens[~masked_tgt_masks] - ) - - # generate training labels for deletion - word_del_targets = _get_del_targets(word_predictions, tgt_tokens, self.pad) - word_del_out, _ = self.decoder.forward_word_del( - normalize=False, - prev_output_tokens=word_predictions, - encoder_out=encoder_out, - ) - word_del_masks = word_predictions.ne(self.pad) - - return { - "mask_ins": { - "out": mask_ins_out, - "tgt": mask_ins_targets, - "mask": mask_ins_masks, - "ls": 0.01, - }, - "word_ins": { - "out": word_ins_out, - "tgt": tgt_tokens, - "mask": masked_tgt_masks, - "ls": self.args.label_smoothing, - "nll_loss": True, - }, - "word_del": { - "out": word_del_out, - "tgt": word_del_targets, - "mask": word_del_masks, - }, - } - - def forward_decoder( - self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs - ): - - output_tokens = decoder_out.output_tokens - output_scores = decoder_out.output_scores - attn = decoder_out.attn - history = decoder_out.history - - bsz = output_tokens.size(0) - if max_ratio is None: - max_lens = torch.zeros_like(output_tokens).fill_(255) - else: - if not encoder_out["encoder_padding_mask"]: - max_src_len = encoder_out["encoder_out"].size(0) - src_lens = encoder_out["encoder_out"].new(bsz).fill_(max_src_len) - else: - src_lens = (~encoder_out["encoder_padding_mask"][0]).sum(1) - max_lens = (src_lens * max_ratio).clamp(min=10).long() - - # delete words - # do not delete tokens if it is - can_del_word = output_tokens.ne(self.pad).sum(1) > 2 - if can_del_word.sum() != 0: # we cannot delete, skip - word_del_score, word_del_attn = self.decoder.forward_word_del( - normalize=True, - prev_output_tokens=_skip(output_tokens, can_del_word), - encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_del_word), - ) - word_del_pred = word_del_score.max(-1)[1].bool() - - _tokens, _scores, _attn = _apply_del_words( - output_tokens[can_del_word], - output_scores[can_del_word], - word_del_attn, - word_del_pred, - self.pad, - self.bos, - self.eos, - ) - output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad) - output_scores = _fill(output_scores, can_del_word, _scores, 0) - attn = _fill(attn, can_del_word, _attn, 0.0) - - if history is not None: - history.append(output_tokens.clone()) - - # insert placeholders - can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens - if can_ins_mask.sum() != 0: - mask_ins_score, _ = self.decoder.forward_mask_ins( - normalize=True, - prev_output_tokens=_skip(output_tokens, can_ins_mask), - encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_mask), - ) - if eos_penalty > 0.0: - mask_ins_score[:, :, 0] = mask_ins_score[:, :, 0] - eos_penalty - mask_ins_pred = mask_ins_score.max(-1)[1] - mask_ins_pred = torch.min( - mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred) - ) - - _tokens, _scores = _apply_ins_masks( - output_tokens[can_ins_mask], - output_scores[can_ins_mask], - mask_ins_pred, - self.pad, - self.unk, - self.eos, - ) - output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad) - output_scores = _fill(output_scores, can_ins_mask, _scores, 0) - - if history is not None: - history.append(output_tokens.clone()) - - # insert words - can_ins_word = output_tokens.eq(self.unk).sum(1) > 0 - if can_ins_word.sum() != 0: - word_ins_score, word_ins_attn = self.decoder.forward_word_ins( - normalize=True, - prev_output_tokens=_skip(output_tokens, can_ins_word), - encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_word), - ) - word_ins_score, word_ins_pred = word_ins_score.max(-1) - _tokens, _scores = _apply_ins_words( - output_tokens[can_ins_word], - output_scores[can_ins_word], - word_ins_pred, - word_ins_score, - self.unk, - ) - - output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad) - output_scores = _fill(output_scores, can_ins_word, _scores, 0) - attn = _fill(attn, can_ins_word, word_ins_attn, 0.0) - - if history is not None: - history.append(output_tokens.clone()) - - # delete some unnecessary paddings - cut_off = output_tokens.ne(self.pad).sum(1).max() - output_tokens = output_tokens[:, :cut_off] - output_scores = output_scores[:, :cut_off] - attn = None if attn is None else attn[:, :cut_off, :] - - return decoder_out._replace( - output_tokens=output_tokens, - output_scores=output_scores, - attn=attn, - history=history, - ) - - def initialize_output_tokens(self, encoder_out, src_tokens): - initial_output_tokens = src_tokens.new_zeros(src_tokens.size(0), 2) - initial_output_tokens[:, 0] = self.bos - initial_output_tokens[:, 1] = self.eos - - initial_output_scores = initial_output_tokens.new_zeros( - *initial_output_tokens.size() - ).type_as(encoder_out["encoder_out"][0]) - - return DecoderOut( - output_tokens=initial_output_tokens, - output_scores=initial_output_scores, - attn=None, - step=0, - max_step=0, - history=None, - ) - - -class LevenshteinTransformerDecoder(FairseqNATDecoder): - def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): - super().__init__( - args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn - ) - self.dictionary = dictionary - self.bos = dictionary.bos() - self.unk = dictionary.unk() - self.eos = dictionary.eos() - self.sampling_for_deletion = getattr(args, "sampling_for_deletion", False) - self.embed_mask_ins = Embedding(256, self.output_embed_dim * 2, None) - self.embed_word_del = Embedding(2, self.output_embed_dim, None) - - # del_word, ins_mask, ins_word - self.early_exit = [int(i) for i in args.early_exit.split(",")] - assert len(self.early_exit) == 3 - - # copy layers for mask-predict/deletion - self.layers_msk = None - if getattr(args, "no_share_maskpredictor", False): - self.layers_msk = nn.ModuleList( - [ - TransformerDecoderLayer(args, no_encoder_attn) - for _ in range(self.early_exit[1]) - ] - ) - self.layers_del = None - if getattr(args, "no_share_discriminator", False): - self.layers_del = nn.ModuleList( - [ - TransformerDecoderLayer(args, no_encoder_attn) - for _ in range(self.early_exit[0]) - ] - ) - - if getattr(args, "share_discriminator_maskpredictor", False): - assert getattr( - args, "no_share_discriminator", False - ), "must set saperate discriminator" - self.layers_msk = self.layers_del - - def extract_features( - self, - prev_output_tokens, - encoder_out=None, - early_exit=None, - layers=None, - **unused - ): - """ - Similar to *forward* but only return features. - Inputs: - prev_output_tokens: Tensor(B, T) - encoder_out: a dictionary of hidden states and masks - - Returns: - tuple: - - the decoder's features of shape `(batch, tgt_len, embed_dim)` - - a dictionary with any model-specific outputs - the LevenshteinTransformer decoder has full-attention to all generated tokens - """ - # embed positions - positions = ( - self.embed_positions(prev_output_tokens) - if self.embed_positions is not None - else None - ) - - # embed tokens and positions - x = self.embed_scale * self.embed_tokens(prev_output_tokens) - if self.project_in_dim is not None: - x = self.project_in_dim(x) - - if positions is not None: - x += positions - x = self.dropout_module(x) - - # B x T x C -> T x B x C - x = x.transpose(0, 1) - attn = None - inner_states = [x] - - # decoder layers - decoder_padding_mask = prev_output_tokens.eq(self.padding_idx) - layers = self.layers if layers is None else layers - early_exit = len(layers) if early_exit is None else early_exit - for _, layer in enumerate(layers[:early_exit]): - x, attn, _ = layer( - x, - encoder_out["encoder_out"][0] - if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0) - else None, - encoder_out["encoder_padding_mask"][0] - if ( - encoder_out is not None - and len(encoder_out["encoder_padding_mask"]) > 0 - ) - else None, - self_attn_mask=None, - self_attn_padding_mask=decoder_padding_mask, - ) - inner_states.append(x) - - if self.layer_norm: - x = self.layer_norm(x) - - # T x B x C -> B x T x C - x = x.transpose(0, 1) - - if self.project_out_dim is not None: - x = self.project_out_dim(x) - - return x, {"attn": attn, "inner_states": inner_states} - - @ensemble_decoder - def forward_mask_ins(self, normalize, encoder_out, prev_output_tokens, **unused): - features, extra = self.extract_features( - prev_output_tokens, - encoder_out=encoder_out, - early_exit=self.early_exit[1], - layers=self.layers_msk, - **unused - ) - features_cat = torch.cat([features[:, :-1, :], features[:, 1:, :]], 2) - decoder_out = F.linear(features_cat, self.embed_mask_ins.weight) - if normalize: - return F.log_softmax(decoder_out, -1), extra["attn"] - return decoder_out, extra["attn"] - - @ensemble_decoder - def forward_word_ins(self, normalize, encoder_out, prev_output_tokens, **unused): - features, extra = self.extract_features( - prev_output_tokens, - encoder_out=encoder_out, - early_exit=self.early_exit[2], - layers=self.layers, - **unused - ) - decoder_out = self.output_layer(features) - if normalize: - return F.log_softmax(decoder_out, -1), extra["attn"] - return decoder_out, extra["attn"] - - @ensemble_decoder - def forward_word_del(self, normalize, encoder_out, prev_output_tokens, **unused): - features, extra = self.extract_features( - prev_output_tokens, - encoder_out=encoder_out, - early_exit=self.early_exit[0], - layers=self.layers_del, - **unused - ) - decoder_out = F.linear(features, self.embed_word_del.weight) - if normalize: - return F.log_softmax(decoder_out, -1), extra["attn"] - return decoder_out, extra["attn"] - - -@register_model_architecture("levenshtein_transformer", "levenshtein_transformer") -def levenshtein_base_architecture(args): - args.encoder_embed_path = getattr(args, "encoder_embed_path", None) - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) - args.encoder_layers = getattr(args, "encoder_layers", 6) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) - args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) - args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) - args.decoder_embed_path = getattr(args, "decoder_embed_path", None) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) - args.decoder_ffn_embed_dim = getattr( - args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim - ) - args.decoder_layers = getattr(args, "decoder_layers", 6) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) - args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) - args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) - args.attention_dropout = getattr(args, "attention_dropout", 0.0) - args.activation_dropout = getattr(args, "activation_dropout", 0.0) - args.activation_fn = getattr(args, "activation_fn", "relu") - args.dropout = getattr(args, "dropout", 0.1) - args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) - args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) - args.share_decoder_input_output_embed = getattr( - args, "share_decoder_input_output_embed", False - ) - args.share_all_embeddings = getattr(args, "share_all_embeddings", False) - args.no_token_positional_embeddings = getattr( - args, "no_token_positional_embeddings", False - ) - args.adaptive_input = getattr(args, "adaptive_input", False) - args.apply_bert_init = getattr(args, "apply_bert_init", False) - - args.decoder_output_dim = getattr( - args, "decoder_output_dim", args.decoder_embed_dim - ) - args.sampling_for_deletion = getattr(args, "sampling_for_deletion", False) - args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) - args.early_exit = getattr(args, "early_exit", "6,6,6") - args.no_share_discriminator = getattr(args, "no_share_discriminator", False) - args.no_share_maskpredictor = getattr(args, "no_share_maskpredictor", False) - args.share_discriminator_maskpredictor = getattr( - args, "share_discriminator_maskpredictor", False - ) - args.no_share_last_layer = getattr(args, "no_share_last_layer", False) - - -@register_model_architecture( - "levenshtein_transformer", "levenshtein_transformer_wmt_en_de" -) -def levenshtein_transformer_wmt_en_de(args): - levenshtein_base_architecture(args) - - -# similar parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) -@register_model_architecture( - "levenshtein_transformer", "levenshtein_transformer_vaswani_wmt_en_de_big" -) -def levenshtein_transformer_vaswani_wmt_en_de_big(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) - args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) - args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) - args.dropout = getattr(args, "dropout", 0.3) - levenshtein_base_architecture(args) - - -# default parameters used in tensor2tensor implementation -@register_model_architecture( - "levenshtein_transformer", "levenshtein_transformer_wmt_en_de_big" -) -def levenshtein_transformer_wmt_en_de_big_t2t(args): - args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) - args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) - args.attention_dropout = getattr(args, "attention_dropout", 0.1) - args.activation_dropout = getattr(args, "activation_dropout", 0.1) - levenshtein_transformer_vaswani_wmt_en_de_big(args) diff --git a/spaces/gradio/HuBERT/fairseq/modules/linearized_convolution.py b/spaces/gradio/HuBERT/fairseq/modules/linearized_convolution.py deleted file mode 100644 index f7e156cb0c75cb375447859c8b6749311372c35e..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/modules/linearized_convolution.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn.functional as F -from fairseq import utils -from fairseq.incremental_decoding_utils import with_incremental_state - -from .conv_tbc import ConvTBC - -from typing import Dict, Optional -from torch import Tensor - -@with_incremental_state -class LinearizedConvolution(ConvTBC): - """An optimized version of nn.Conv1d. - - At training time, this module uses ConvTBC, which is an optimized version - of Conv1d. At inference time, it optimizes incremental generation (i.e., - one time step at a time) by replacing the convolutions with linear layers. - Note that the input order changes from training to inference. - """ - - def __init__(self, in_channels, out_channels, kernel_size, **kwargs): - super().__init__(in_channels, out_channels, kernel_size, **kwargs) - self._linearized_weight = None - self.register_backward_hook(self._clear_linearized_weight) - - def state_dict(self, destination=None, prefix="", keep_vars=False): - state = ConvTBC.state_dict(self, destination, prefix, keep_vars=keep_vars) - # don't store redundant _linearized_weight in checkpoints - if prefix + "_linearized_weight" in state: - del state[prefix + "_linearized_weight"] - return state - - def upgrade_state_dict_named(self, state_dict, name): - prefix = name + "." if name != "" else "" - if prefix + "_linearized_weight" in state_dict: - del state_dict[prefix + "_linearized_weight"] - - @torch.jit.export - def forward(self, input, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None): - """ - Args: - incremental_state: Used to buffer signal; if not None, then input is - expected to contain a single frame. If the input order changes - between time steps, call reorder_incremental_state. - Input: - Time x Batch x Channel during training - Batch x Time x Channel during inference - """ - if incremental_state is None: - output = self.conv_tbc(input) - if self.kernel_size[0] > 1 and self.padding[0] > 0: - # remove future timesteps added by padding - output = output[: -self.padding[0], :, :] - return output - - # reshape weight - weight = self._get_linearized_weight() - kw = self.kernel_size[0] - - bsz = input.size(0) # input: bsz x len x dim - if kw > 1: - input = input.data - input_buffer = self._get_input_buffer(incremental_state) - if input_buffer is None: - input_buffer = input.new(bsz, kw, input.size(2)).zero_() - self._set_input_buffer(incremental_state, input_buffer) - else: - # shift buffer - input_buffer[:, :-1, :] = input_buffer[:, 1:, :].clone() - # append next input - input_buffer[:, -1, :] = input[:, -1, :] - input = input_buffer - with torch.no_grad(): - output = F.linear(input.view(bsz, -1), weight, self.bias) - return output.view(bsz, 1, -1) - - @torch.jit.unused - def reorder_incremental_state(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], new_order): - input_buffer = self._get_input_buffer(incremental_state) - if input_buffer is not None: - input_buffer = input_buffer.index_select(0, new_order) - self._set_input_buffer(incremental_state, input_buffer) - - @torch.jit.unused - def _get_input_buffer(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]): - return utils.get_incremental_state(self, incremental_state, "input_buffer") - - @torch.jit.unused - def _set_input_buffer(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], new_buffer): - return utils.set_incremental_state( - self, incremental_state, "input_buffer", new_buffer - ) - - @torch.jit.unused - def _get_linearized_weight(self): - if self._linearized_weight is None: - kw = self.kernel_size[0] - weight = self.weight.transpose(2, 1).transpose(1, 0).contiguous() - assert weight.size() == (self.out_channels, kw, self.in_channels) - return weight.view(self.out_channels, -1) - return self._linearized_weight - - @torch.jit.unused - def _clear_linearized_weight(self, *args): - self._linearized_weight = None diff --git a/spaces/gradio/HuBERT/fairseq/optim/lr_scheduler/triangular_lr_scheduler.py b/spaces/gradio/HuBERT/fairseq/optim/lr_scheduler/triangular_lr_scheduler.py deleted file mode 100644 index bfe2a0d381f28525f90ee120b31a69210338eb1b..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/optim/lr_scheduler/triangular_lr_scheduler.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math -from dataclasses import dataclass, field -from typing import List - -from omegaconf import II - -from fairseq.dataclass import FairseqDataclass -from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler - - -@dataclass -class TriangularLRScheduleConfig(FairseqDataclass): - max_lr: float = field( - default="???", metadata={"help": "max learning rate, must be more than cfg.lr"} - ) - lr_period_updates: float = field( - default=5000, - metadata={"help": "initial number of updates per period (cycle length)"}, - ) - lr_shrink: float = field( - default=0.1, metadata={"help": "shrink factor for annealing"} - ) - shrink_min: bool = field( - default=False, metadata={"help": "if set, also shrinks min lr"} - ) - lr: List[float] = II("optimization.lr") - - -@register_lr_scheduler("triangular", dataclass=TriangularLRScheduleConfig) -class TriangularLRSchedule(FairseqLRScheduler): - """Assign LR based on a triangular cyclical schedule. - - See https://arxiv.org/pdf/1506.01186.pdf for details. - """ - - def __init__(self, cfg: TriangularLRScheduleConfig, optimizer): - super().__init__(cfg, optimizer) - if len(cfg.lr) > 1: - raise ValueError( - "Cannot use a fixed learning rate schedule with triangular." - " Consider --lr-scheduler=fixed instead." - ) - - lr = cfg.lr[0] - - assert cfg.max_lr > lr, "max_lr must be more than lr" - self.min_lr = lr - self.max_lr = cfg.max_lr - self.stepsize = cfg.lr_period_updates // 2 - self.lr_shrink = cfg.lr_shrink - self.shrink_min = cfg.shrink_min - - # initial learning rate - self.lr = self.min_lr - self.optimizer.set_lr(self.lr) - - def step(self, epoch, val_loss=None): - """Update the learning rate at the end of the given epoch.""" - super().step(epoch, val_loss) - # we don't change the learning rate at epoch boundaries - return self.optimizer.get_lr() - - def step_update(self, num_updates): - """Update the learning rate after each update.""" - cycle = math.floor(num_updates / (2 * self.stepsize)) - - lr_shrink = self.lr_shrink ** cycle - max_lr = self.max_lr * lr_shrink - if self.shrink_min: - min_lr = self.min_lr * lr_shrink - else: - min_lr = self.min_lr - - x = abs(num_updates / self.stepsize - 2 * (cycle + 1) + 1) - self.lr = min_lr + (max_lr - min_lr) * max(0, (1 - x)) - - self.optimizer.set_lr(self.lr) - return self.lr diff --git a/spaces/gradio/HuBERT/tests/test_fp16_optimizer.py b/spaces/gradio/HuBERT/tests/test_fp16_optimizer.py deleted file mode 100644 index ce4f1c055ce68b8e3933636fae66cca73c5e9d18..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/tests/test_fp16_optimizer.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import copy -import logging -import unittest - -import torch -from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer -from omegaconf import OmegaConf - - -@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") -class TestGradientScaling(unittest.TestCase): - def setUp(self): - self.x = torch.tensor([2.0]).cuda().half() - weight = 3.0 - bias = 5.0 - self.error = 1.0 - self.target = torch.tensor([self.x * weight + bias + self.error]).cuda().half() - self.loss_fn = torch.nn.L1Loss() - - self.model = torch.nn.Linear(1, 1) - self.model.weight.data = torch.tensor([[weight]]) - self.model.bias.data = torch.tensor([bias]) - self.model.cuda().half() - self.params = list(self.model.parameters()) - - self.cfg_dls = OmegaConf.create( - { - "optimization": { - "lr": [0.1], - }, - "optimizer": { - "_name": "adam", - "lr": [0.1], - "adam_betas": "(0.9, 0.999)", - "adam_eps": 1e-8, - "weight_decay": 0.0, - }, - "common": { - "fp16_init_scale": 1, - "fp16_scale_window": 1, - "fp16_scale_tolerance": 1, - "threshold_loss_scale": 1, - "min_loss_scale": 1e-4, - "tpu": False, - }, - } - ) - logging.disable(logging.CRITICAL) - - def tearDown(self): - logging.disable(logging.NOTSET) - - def run_iter(self, model, params, optimizer): - optimizer.zero_grad() - y = model(self.x) - loss = self.loss_fn(y, self.target) - optimizer.backward(loss) - self.assertEqual(loss, torch.tensor(1.0, device="cuda:0", dtype=torch.float16)) - - grad_norm = optimizer.clip_grad_norm(0) - self.assertAlmostEqual(grad_norm.item(), 2.2361, 4) - - optimizer.step() - self.assertEqual( - model.weight, - torch.tensor( - [[3.0996]], device="cuda:0", dtype=torch.float16, requires_grad=True - ), - ) - self.assertEqual( - model.bias, - torch.tensor( - [5.1016], device="cuda:0", dtype=torch.float16, requires_grad=True - ), - ) - self.assertEqual(optimizer.scaler.loss_scale, 2.0) - - def test_mixed_precision(self): - model = copy.deepcopy(self.model) - params = list(model.parameters()) - optimizer = FP16Optimizer.build_optimizer(self.cfg_dls, params) - - self.run_iter(model, params, optimizer) - self.assertTrue( - all( - torch.all( - fp32_params.eq( - torch.tensor( - [3.1000, 5.1000], device="cuda:0", requires_grad=True - ) - ) - ) - for fp32_params in optimizer.fp32_params.values() - ) - ) - - def test_memory_efficient(self): - model = copy.deepcopy(self.model) - params = list(model.parameters()) - optimizer = MemoryEfficientFP16Optimizer.build_optimizer(self.cfg_dls, params) - - self.run_iter(model, params, optimizer) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/gradio/sentiment_analysis/app.py b/spaces/gradio/sentiment_analysis/app.py deleted file mode 100644 index 3441f0dc9c8e598db19c448f2552350068fa9c87..0000000000000000000000000000000000000000 --- a/spaces/gradio/sentiment_analysis/app.py +++ /dev/null @@ -1,20 +0,0 @@ -import gradio as gr -import nltk -from nltk.sentiment.vader import SentimentIntensityAnalyzer - -nltk.download("vader_lexicon") -sid = SentimentIntensityAnalyzer() - -def sentiment_analysis(text): - scores = sid.polarity_scores(text) - del scores["compound"] - return scores - -demo = gr.Interface( - fn=sentiment_analysis, - inputs=gr.Textbox(placeholder="Enter a positive or negative sentence here..."), - outputs="label", - interpretation="default", - examples=[["This is wonderful!"]]) - -demo.launch() \ No newline at end of file diff --git a/spaces/gradio/timeseries-forecasting-with-prophet/run.py b/spaces/gradio/timeseries-forecasting-with-prophet/run.py deleted file mode 100644 index 373a6de4bf8fa877ea2c6d21184113daf0141ce5..0000000000000000000000000000000000000000 --- a/spaces/gradio/timeseries-forecasting-with-prophet/run.py +++ /dev/null @@ -1,41 +0,0 @@ -import gradio as gr -import pypistats -from datetime import date -from dateutil.relativedelta import relativedelta -import pandas as pd -from prophet import Prophet -pd.options.plotting.backend = "plotly" - -def get_forecast(lib, time): - - data = pypistats.overall(lib, total=True, format="pandas") - data = data.groupby("category").get_group("with_mirrors").sort_values("date") - start_date = date.today() - relativedelta(months=int(time.split(" ")[0])) - df = data[(data['date'] > str(start_date))] - - df1 = df[['date','downloads']] - df1.columns = ['ds','y'] - - m = Prophet() - m.fit(df1) - future = m.make_future_dataframe(periods=90) - forecast = m.predict(future) - fig1 = m.plot(forecast) - return fig1 - -with gr.Blocks() as demo: - gr.Markdown( - """ - **Pypi Download Stats 📈 with Prophet Forecasting**: see live download stats for popular open-source libraries 🤗 along with a 3 month forecast using Prophet. The [ source code for this Gradio demo is here](https://huggingface.co/spaces/gradio/timeseries-forecasting-with-prophet/blob/main/app.py). - """) - with gr.Row(): - lib = gr.Dropdown(["pandas", "scikit-learn", "torch", "prophet"], label="Library", value="pandas") - time = gr.Dropdown(["3 months", "6 months", "9 months", "12 months"], label="Downloads over the last...", value="12 months") - - plt = gr.Plot() - - lib.change(get_forecast, [lib, time], plt, queue=False) - time.change(get_forecast, [lib, time], plt, queue=False) - demo.load(get_forecast, [lib, time], plt, queue=False) - -demo.launch() \ No newline at end of file diff --git a/spaces/gsaivinay/Llama-2-13B-GGML-UI/types/data.ts b/spaces/gsaivinay/Llama-2-13B-GGML-UI/types/data.ts deleted file mode 100644 index d57323721fbbf2ead31fcc33334717d75de1f3f6..0000000000000000000000000000000000000000 --- a/spaces/gsaivinay/Llama-2-13B-GGML-UI/types/data.ts +++ /dev/null @@ -1,4 +0,0 @@ -export interface KeyValuePair { - key: string; - value: any; -} diff --git a/spaces/h2oai/wave-tour/examples/stat_small.py b/spaces/h2oai/wave-tour/examples/stat_small.py deleted file mode 100644 index d960f313dd5e27f0a36e19ff720ed35392376dc7..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/stat_small.py +++ /dev/null @@ -1,28 +0,0 @@ -# Stat / Small -# Create a stat card displaying a single value. -# #stat_card -# --- -import time - -from faker import Faker - -from synth import FakePercent -from h2o_wave import site, ui - -page = site['/demo'] - -fake = Faker() -f = FakePercent() -val, _ = f.next() -c = page.add('example', ui.small_stat_card( - box='1 1 1 1', - title=fake.cryptocurrency_name(), - value=f'${val:.2f}', -)) -page.save() - -while True: - time.sleep(1) - val, _ = f.next() - c.value = f'${val:.2f}' - page.save() diff --git a/spaces/hands012/gpt-academic/Dockerfile b/spaces/hands012/gpt-academic/Dockerfile deleted file mode 100644 index 19d988f6d7da77b6473076700c5831d4abb7e2b9..0000000000000000000000000000000000000000 --- a/spaces/hands012/gpt-academic/Dockerfile +++ /dev/null @@ -1,24 +0,0 @@ -# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM -# 如何构建: 先修改 `config.py`, 然后 docker build -t gpt-academic . -# 如何运行: docker run --rm -it --net=host gpt-academic -FROM python:3.11 - -RUN echo '[global]' > /etc/pip.conf && \ - echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \ - echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf - - -WORKDIR /gpt - -# 装载项目文件 -COPY . . - -# 安装依赖 -RUN pip3 install -r requirements.txt - - -# 可选步骤,用于预热模块 -RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' - -# 启动 -CMD ["python3", "-u", "main.py"] diff --git "a/spaces/hands012/gpt-academic/crazy_functions/\350\257\273\346\226\207\347\253\240\345\206\231\346\221\230\350\246\201.py" "b/spaces/hands012/gpt-academic/crazy_functions/\350\257\273\346\226\207\347\253\240\345\206\231\346\221\230\350\246\201.py" deleted file mode 100644 index 72ffe6b1a8f2a59a3c5c364e30dfb4949bd6a929..0000000000000000000000000000000000000000 --- "a/spaces/hands012/gpt-academic/crazy_functions/\350\257\273\346\226\207\347\253\240\345\206\231\346\221\230\350\246\201.py" +++ /dev/null @@ -1,67 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_execption, write_results_to_file -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -fast_debug = False - - -def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - import time, glob, os - print('begin analysis on:', file_manifest) - for index, fp in enumerate(file_manifest): - with open(fp, 'r', encoding='utf-8', errors='replace') as f: - file_content = f.read() - - prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else "" - i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```' - i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}' - chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt) # 带超时倒计时 - - chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - if not fast_debug: time.sleep(2) - - all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)]) - i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。' - chatbot.append((i_say, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say, llm_kwargs, chatbot, history=history, sys_prompt=system_prompt) # 带超时倒计时 - - chatbot[-1] = (i_say, gpt_say) - history.append(i_say); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - res = write_results_to_file(history) - chatbot.append(("完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - - - -@CatchException -def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \ - # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ - # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git a/spaces/hdhzk/bingo/src/components/toaster.tsx b/spaces/hdhzk/bingo/src/components/toaster.tsx deleted file mode 100644 index 4d2693460b61307a1d4c127fd01df9bee16e59ff..0000000000000000000000000000000000000000 --- a/spaces/hdhzk/bingo/src/components/toaster.tsx +++ /dev/null @@ -1,3 +0,0 @@ -'use client' - -export { Toaster } from 'react-hot-toast' diff --git a/spaces/hdm1/mindtune/Dockerfile b/spaces/hdm1/mindtune/Dockerfile deleted file mode 100644 index 7ac2e8dc6efdf00f27bdb3b9615115fe7641af2d..0000000000000000000000000000000000000000 --- a/spaces/hdm1/mindtune/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ - -FROM tensorflow/tensorflow:latest - -WORKDIR /app - -COPY ./mindtune_model ./mindtune_model - -ENV MPLCONFIGDIR=/tmp/matplotlib - -RUN pip install gradio -RUN pip install matplotlib - -COPY /app/main.py ./main.py - -EXPOSE 7860 - -CMD ["python", "main.py"] diff --git a/spaces/hezhaoqia/vits-simple-api/bert_vits2/text/symbols.py b/spaces/hezhaoqia/vits-simple-api/bert_vits2/text/symbols.py deleted file mode 100644 index fd3d5dbe28b3b66de840f0dd2172c4c8cab5d697..0000000000000000000000000000000000000000 --- a/spaces/hezhaoqia/vits-simple-api/bert_vits2/text/symbols.py +++ /dev/null @@ -1,52 +0,0 @@ -punctuation = ['!', '?', '…', ",", ".", "'", '-'] -pu_symbols = punctuation + ["SP", "UNK"] -pad = '_' - -# chinese -zh_symbols = ['E', 'En', 'a', 'ai', 'an', 'ang', 'ao', 'b', 'c', 'ch', 'd', 'e', 'ei', 'en', 'eng', 'er', 'f', 'g', 'h', - 'i', 'i0', 'ia', 'ian', 'iang', 'iao', 'ie', 'in', 'ing', 'iong', 'ir', 'iu', 'j', 'k', 'l', 'm', 'n', - 'o', - 'ong', - 'ou', 'p', 'q', 'r', 's', 'sh', 't', 'u', 'ua', 'uai', 'uan', 'uang', 'ui', 'un', 'uo', 'v', 'van', 've', - 'vn', - 'w', 'x', 'y', 'z', 'zh', - "AA", "EE", "OO"] -num_zh_tones = 6 - -# japanese -ja_symbols = ['I', 'N', 'U', 'a', 'b', 'by', 'ch', 'cl', 'd', 'dy', 'e', 'f', 'g', 'gy', 'h', 'hy', 'i', 'j', 'k', 'ky', - 'm', 'my', 'n', 'ny', 'o', 'p', 'py', 'r', 'ry', 's', 'sh', 't', 'ts', 'u', 'V', 'w', 'y', 'z'] -num_ja_tones = 1 - -# English -en_symbols = ['aa', 'ae', 'ah', 'ao', 'aw', 'ay', 'b', 'ch', 'd', 'dh', 'eh', 'er', 'ey', 'f', 'g', 'hh', 'ih', 'iy', - 'jh', 'k', 'l', 'm', 'n', 'ng', 'ow', 'oy', 'p', 'r', 's', - 'sh', 't', 'th', 'uh', 'uw', 'V', 'w', 'y', 'z', 'zh'] -num_en_tones = 4 - -# combine all symbols -normal_symbols = sorted(set(zh_symbols + ja_symbols + en_symbols)) -symbols = [pad] + normal_symbols + pu_symbols -sil_phonemes_ids = [symbols.index(i) for i in pu_symbols] - -# combine all tones -num_tones = num_zh_tones + num_ja_tones + num_en_tones - -# language maps -language_id_map = { - 'ZH': 0, - "JA": 1, - "EN": 2 -} -num_languages = len(language_id_map.keys()) - -language_tone_start_map = { - 'ZH': 0, - "JA": num_zh_tones, - "EN": num_zh_tones + num_ja_tones -} - -if __name__ == '__main__': - a = set(zh_symbols) - b = set(en_symbols) - print(sorted(a & b)) diff --git a/spaces/hf4all/web-ui/_next/static/chunks/780.aecf08b05b0b9d76.js b/spaces/hf4all/web-ui/_next/static/chunks/780.aecf08b05b0b9d76.js deleted file mode 100644 index 54b69b4cb6454762d71ced8d20e2a5e11937716b..0000000000000000000000000000000000000000 --- a/spaces/hf4all/web-ui/_next/static/chunks/780.aecf08b05b0b9d76.js +++ /dev/null @@ -1,260 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[780],{33747:function(e,t,n){"use strict";n.d(t,{YF:function(){return p},x7:function(){return l}});var r=n(21828),o=n(41778),i=n(86006),a=n(8431);let l=e=>({name:"arrow",options:e,fn(t){let{element:n,padding:o}="function"==typeof e?e(t):e;if(n&&({}).hasOwnProperty.call(n,"current")){if(null!=n.current)return(0,r.x7)({element:n.current,padding:o}).fn(t)}else if(n)return(0,r.x7)({element:n,padding:o}).fn(t);return{}}});var s="undefined"!=typeof document?i.useLayoutEffect:i.useEffect;function c(e,t){let n,r,o;if(e===t)return!0;if(typeof e!=typeof t)return!1;if("function"==typeof e&&e.toString()===t.toString())return!0;if(e&&t&&"object"==typeof e){if(Array.isArray(e)){if((n=e.length)!=t.length)return!1;for(r=n;0!=r--;)if(!c(e[r],t[r]))return!1;return!0}if((n=(o=Object.keys(e)).length)!==Object.keys(t).length)return!1;for(r=n;0!=r--;)if(!({}).hasOwnProperty.call(t,o[r]))return!1;for(r=n;0!=r--;){let n=o[r];if(("_owner"!==n||!e.$$typeof)&&!c(e[n],t[n]))return!1}return!0}return e!=e&&t!=t}function u(e){if("undefined"==typeof window)return 1;let t=e.ownerDocument.defaultView||window;return t.devicePixelRatio||1}function f(e,t){let n=u(e);return Math.round(t*n)/n}function d(e){let t=i.useRef(e);return s(()=>{t.current=e}),t}function p(e){void 0===e&&(e={});let{placement:t="bottom",strategy:n="absolute",middleware:r=[],platform:l,elements:{reference:p,floating:h}={},transform:g=!0,whileElementsMounted:m,open:b}=e,[v,y]=i.useState({x:0,y:0,strategy:n,placement:t,middlewareData:{},isPositioned:!1}),[x,w]=i.useState(r);c(x,r)||w(r);let[E,S]=i.useState(null),[k,_]=i.useState(null),O=i.useCallback(e=>{e!=R.current&&(R.current=e,S(e))},[S]),C=i.useCallback(e=>{e!==T.current&&(T.current=e,_(e))},[_]),A=p||E,N=h||k,R=i.useRef(null),T=i.useRef(null),P=i.useRef(v),M=d(m),j=d(l),L=i.useCallback(()=>{if(!R.current||!T.current)return;let e={placement:t,strategy:n,middleware:x};j.current&&(e.platform=j.current),(0,o.oo)(R.current,T.current,e).then(e=>{let t={...e,isPositioned:!0};I.current&&!c(P.current,t)&&(P.current=t,a.flushSync(()=>{y(t)}))})},[x,t,n,j]);s(()=>{!1===b&&P.current.isPositioned&&(P.current.isPositioned=!1,y(e=>({...e,isPositioned:!1})))},[b]);let I=i.useRef(!1);s(()=>(I.current=!0,()=>{I.current=!1}),[]),s(()=>{if(A&&(R.current=A),N&&(T.current=N),A&&N){if(M.current)return M.current(A,N,L);L()}},[A,N,L,M]);let D=i.useMemo(()=>({reference:R,floating:T,setReference:O,setFloating:C}),[O,C]),F=i.useMemo(()=>({reference:A,floating:N}),[A,N]),B=i.useMemo(()=>{let e={position:n,left:0,top:0};if(!F.floating)return e;let t=f(F.floating,v.x),r=f(F.floating,v.y);return g?{...e,transform:"translate("+t+"px, "+r+"px)",...u(F.floating)>=1.5&&{willChange:"transform"}}:{position:n,left:t,top:r}},[n,g,F.floating,v.x,v.y]);return i.useMemo(()=>({...v,update:L,refs:D,elements:F,floatingStyles:B}),[v,L,D,F,B])}},52134:function(e,t,n){"use strict";let r;n.d(t,{wD:function(){return eg},vs:function(){return ev},bQ:function(){return eC},YF:function(){return eA},NI:function(){return eR},JA:function(){return ey},c0:function(){return eZ},qs:function(){return eq}});var o=n(41778),i=n(33747),a=n(86006),l=n.t(a,2),s=n(472),c='input:not([inert]),select:not([inert]),textarea:not([inert]),a[href]:not([inert]),button:not([inert]),[tabindex]:not(slot):not([inert]),audio[controls]:not([inert]),video[controls]:not([inert]),[contenteditable]:not([contenteditable="false"]):not([inert]),details>summary:first-of-type:not([inert]),details:not([inert])',u="undefined"==typeof Element,f=u?function(){}:Element.prototype.matches||Element.prototype.msMatchesSelector||Element.prototype.webkitMatchesSelector,d=!u&&Element.prototype.getRootNode?function(e){var t;return null==e?void 0:null===(t=e.getRootNode)||void 0===t?void 0:t.call(e)}:function(e){return null==e?void 0:e.ownerDocument},p=function e(t,n){void 0===n&&(n=!0);var r,o=null==t?void 0:null===(r=t.getAttribute)||void 0===r?void 0:r.call(t,"inert");return""===o||"true"===o||n&&t&&e(t.parentNode)},h=function(e){var t,n=null==e?void 0:null===(t=e.getAttribute)||void 0===t?void 0:t.call(e,"contenteditable");return""===n||"true"===n},g=function(e,t,n){if(p(e))return[];var r=Array.prototype.slice.apply(e.querySelectorAll(c));return t&&f.call(e,c)&&r.unshift(e),r=r.filter(n)},m=function e(t,n,r){for(var o=[],i=Array.from(t);i.length;){var a=i.shift();if(!p(a,!1)){if("SLOT"===a.tagName){var l=a.assignedElements(),s=e(l.length?l:a.children,!0,r);r.flatten?o.push.apply(o,s):o.push({scopeParent:a,candidates:s})}else{f.call(a,c)&&r.filter(a)&&(n||!t.includes(a))&&o.push(a);var u=a.shadowRoot||"function"==typeof r.getShadowRoot&&r.getShadowRoot(a),d=!p(u,!1)&&(!r.shadowRootFilter||r.shadowRootFilter(a));if(u&&d){var h=e(!0===u?a.children:u.children,!0,r);r.flatten?o.push.apply(o,h):o.push({scopeParent:a,candidates:h})}else i.unshift.apply(i,a.children)}}}return o},b=function(e){return!isNaN(parseInt(e.getAttribute("tabindex"),10))},v=function(e){if(!e)throw Error("No node provided");return e.tabIndex<0&&(/^(AUDIO|VIDEO|DETAILS)$/.test(e.tagName)||h(e))&&!b(e)?0:e.tabIndex},y=function(e,t){var n=v(e);return n<0&&t&&!b(e)?0:n},x=function(e,t){return e.tabIndex===t.tabIndex?e.documentOrder-t.documentOrder:e.tabIndex-t.tabIndex},w=function(e){return"INPUT"===e.tagName},E=function(e,t){for(var n=0;nsummary:first-of-type")?e.parentElement:e;if(f.call(o,"details:not([open]) *"))return!0;if(n&&"full"!==n&&"legacy-full"!==n){if("non-zero-area"===n)return _(e)}else{if("function"==typeof r){for(var i=e;e;){var a=e.parentElement,l=d(e);if(a&&!a.shadowRoot&&!0===r(a))return _(e);e=e.assignedSlot?e.assignedSlot:a||l===e.ownerDocument?a:l.host}e=i}if(k(e))return!e.getClientRects().length;if("legacy-full"!==n)return!0}return!1},C=function(e){if(/^(INPUT|BUTTON|SELECT|TEXTAREA)$/.test(e.tagName))for(var t=e.parentElement;t;){if("FIELDSET"===t.tagName&&t.disabled){for(var n=0;nv(t))&&(r=e,!((o=t).disabled||p(o)||w(o)&&"hidden"===o.type||O(o,r)||"DETAILS"===o.tagName&&Array.prototype.slice.apply(o.children).some(function(e){return"SUMMARY"===e.tagName})||C(o)))},N=function(e){var t=parseInt(e.getAttribute("tabindex"),10);return!!isNaN(t)||t>=0},R=function e(t){var n=[],r=[];return t.forEach(function(t,o){var i=!!t.scopeParent,a=i?t.scopeParent:t,l=y(a,i),s=i?e(t.candidates):a;0===l?i?n.push.apply(n,s):n.push(a):r.push({documentOrder:o,tabIndex:l,item:t,isScope:i,content:s})}),r.sort(x).reduce(function(e,t){return t.isScope?e.push.apply(e,t.content):e.push(t.content),e},[]).concat(n)},T=function(e,t){return R((t=t||{}).getShadowRoot?m([e],t.includeContainer,{filter:A.bind(null,t),flatten:!1,getShadowRoot:t.getShadowRoot,shadowRootFilter:N}):g(e,t.includeContainer,A.bind(null,t)))};function P(){return(P=Object.assign?Object.assign.bind():function(e){for(var t=1;t"floating-ui-"+L++,D=l["useId".toString()],F=D||function(){let[e,t]=a.useState(()=>j?I():void 0);return M(()=>{null==e&&t(I())},[]),a.useEffect(()=>{j||(j=!0)},[]),e},B=a.createContext(null),z=a.createContext(null),$=()=>{var e;return(null==(e=a.useContext(B))?void 0:e.id)||null},U=()=>a.useContext(z);function H(e){return(null==e?void 0:e.ownerDocument)||document}function Z(){let e=navigator.userAgentData;return null!=e&&e.platform?e.platform:navigator.platform}function q(e){return H(e).defaultView||window}function W(e){return!!e&&(e instanceof Element||e instanceof q(e).Element)}function V(e){return!!e&&(e instanceof HTMLElement||e instanceof q(e).HTMLElement)}function G(e){if(0===e.mozInputSource&&e.isTrusted)return!0;let t=/Android/i;return(t.test(Z())||t.test(function(){let e=navigator.userAgentData;return e&&Array.isArray(e.brands)?e.brands.map(e=>{let{brand:t,version:n}=e;return t+"/"+n}).join(" "):navigator.userAgent}()))&&e.pointerType?"click"===e.type&&1===e.buttons:0===e.detail&&!e.pointerType}function K(e){return 0===e.width&&0===e.height||1===e.width&&1===e.height&&0===e.pressure&&0===e.detail&&"mouse"!==e.pointerType||e.width<1&&e.height<1&&0===e.pressure&&0===e.detail}function Y(){return/apple/i.test(navigator.vendor)}function X(e,t){if(!e||!t)return!1;let n=t.getRootNode&&t.getRootNode();if(e.contains(t))return!0;if(n&&function(e){if("undefined"==typeof ShadowRoot)return!1;let t=q(e).ShadowRoot;return e instanceof t||e instanceof ShadowRoot}(n)){let n=t;for(;n;){if(e===n)return!0;n=n.parentNode||n.host}}return!1}function J(e){return"data-floating-ui-"+e}function Q(e){let t=(0,a.useRef)(e);return M(()=>{t.current=e}),t}function ee(e){let t=e.activeElement;for(;(null==(n=t)?void 0:null==(r=n.shadowRoot)?void 0:r.activeElement)!=null;){var n,r;t=t.shadowRoot.activeElement}return t}let et=0;function en(e,t){void 0===t&&(t={});let{preventScroll:n=!1,cancelPrevious:r=!0,sync:o=!1}=t;r&&cancelAnimationFrame(et);let i=()=>null==e?void 0:e.focus({preventScroll:n});o?i():et=requestAnimationFrame(i)}function er(e,t){let n=e.filter(e=>{var n;return e.parentId===t&&(null==(n=e.context)?void 0:n.open)}),r=n;for(;r.length;)r=e.filter(e=>{var t;return null==(t=r)?void 0:t.some(t=>{var n;return e.parentId===t.id&&(null==(n=e.context)?void 0:n.open)})}),n=n.concat(r);return n}function eo(e){return"composedPath"in e?e.composedPath()[0]:e.target}function ei(e){e.preventDefault(),e.stopPropagation()}let ea=()=>({getShadowRoot:!0,displayCheck:"function"==typeof ResizeObserver&&ResizeObserver.toString().includes("[native code]")?"full":"none"});function el(e,t){let n=T(e,ea());"prev"===t&&n.reverse();let r=n.indexOf(ee(H(e)));return n.slice(r+1)[0]}function es(e,t){let n=t||e.currentTarget,r=e.relatedTarget;return!r||!X(n,r)}let ec={border:0,clip:"rect(0 0 0 0)",height:"1px",margin:"-1px",overflow:"hidden",padding:0,position:"fixed",whiteSpace:"nowrap",width:"1px",top:0,left:0};function eu(e){"Tab"===e.key&&(e.target,clearTimeout(r))}let ef=a.forwardRef(function(e,t){let[n,r]=a.useState();M(()=>(Y()&&r("button"),document.addEventListener("keydown",eu),()=>{document.removeEventListener("keydown",eu)}),[]);let o={ref:t,tabIndex:0,role:n,"aria-hidden":!n||void 0,[J("focus-guard")]:"",style:ec};return a.createElement("span",P({},e,o))}),ed=a.createContext(null),ep=()=>a.useContext(ed),eh=a.forwardRef(function(e,t){return a.createElement("button",P({},e,{type:"button",ref:t,tabIndex:-1,style:ec}))});function eg(e){var t;let{context:n,children:r,disabled:o=!1,order:i=["content"],guards:l=!0,initialFocus:c=0,returnFocus:u=!0,modal:f=!0,visuallyHiddenDismiss:d=!1,closeOnFocusOut:p=!0}=e,{open:h,refs:g,nodeId:m,onOpenChange:b,events:v,dataRef:y,elements:{domReference:x,floating:w}}=n,E=!(0,s.J_)()||l,S=Q(i),k=Q(c),_=Q(u),O=U(),C=ep(),A="number"==typeof c&&c<0,N=a.useRef(null),R=a.useRef(null),P=a.useRef(!1),j=a.useRef(null),L=a.useRef(!1),I=null!=C,D=x&&"combobox"===x.getAttribute("role")&&V(t=x)&&t.matches("input:not([type='hidden']):not([disabled]),[contenteditable]:not([contenteditable='false']),textarea:not([disabled])"),F=a.useCallback(function(e){return void 0===e&&(e=w),e?T(e,ea()):[]},[w]),B=a.useCallback(e=>{let t=F(e);return S.current.map(e=>x&&"reference"===e?x:w&&"floating"===e?w:t).filter(Boolean).flat()},[x,w,S,F]);function z(e){return!o&&d&&f?a.createElement(eh,{ref:"start"===e?N:R,onClick:e=>b(!1,e.nativeEvent)},"string"==typeof d?d:"Dismiss"):null}a.useEffect(()=>{if(o||!f)return;function e(e){if("Tab"===e.key){X(w,ee(H(w)))&&0===F().length&&!D&&ei(e);let t=B(),n=eo(e);"reference"===S.current[0]&&n===x&&(ei(e),e.shiftKey?en(t[t.length-1]):en(t[1])),"floating"===S.current[1]&&n===w&&e.shiftKey&&(ei(e),en(t[0]))}}let t=H(w);return t.addEventListener("keydown",e),()=>{t.removeEventListener("keydown",e)}},[o,x,w,f,S,g,D,F,B]),a.useEffect(()=>{if(!o&&p&&w&&V(x))return x.addEventListener("focusout",t),x.addEventListener("pointerdown",e),f||w.addEventListener("focusout",t),()=>{x.removeEventListener("focusout",t),x.removeEventListener("pointerdown",e),f||w.removeEventListener("focusout",t)};function e(){L.current=!0,setTimeout(()=>{L.current=!1})}function t(e){let t=e.relatedTarget;queueMicrotask(()=>{let n=!(X(x,t)||X(w,t)||X(t,w)||X(null==C?void 0:C.portalNode,t)||null!=t&&t.hasAttribute(J("focus-guard"))||O&&(er(O.nodesRef.current,m).find(e=>{var n,r;return X(null==(n=e.context)?void 0:n.elements.floating,t)||X(null==(r=e.context)?void 0:r.elements.domReference,t)})||(function(e,t){var n;let r=[],o=null==(n=e.find(e=>e.id===t))?void 0:n.parentId;for(;o;){let t=e.find(e=>e.id===o);o=null==t?void 0:t.parentId,t&&(r=r.concat(t))}return r})(O.nodesRef.current,m).find(e=>{var n,r;return(null==(n=e.context)?void 0:n.elements.floating)===t||(null==(r=e.context)?void 0:r.elements.domReference)===t})));t&&n&&!L.current&&t!==j.current&&(P.current=!0,b(!1,e))})}},[o,x,w,f,m,O,C,b,p]),a.useEffect(()=>{var e;if(o)return;let t=Array.from((null==C?void 0:null==(e=C.portalNode)?void 0:e.querySelectorAll("["+J("portal")+"]"))||[]);if(w&&f){let e=[w,...t,N.current,R.current].filter(e=>null!=e),n=E?s.Ry:s.cJ,r=n(S.current.includes("reference")||D?e.concat(x||[]):e,void 0,J("inert"));return()=>{r()}}},[o,x,w,f,S,C,D,E]),M(()=>{if(o||!w)return;let e=H(w),t=ee(e);queueMicrotask(()=>{let e=B(w),n=k.current,r=("number"==typeof n?e[n]:n.current)||w,o=X(w,t);A||o||!h||en(r,{preventScroll:r===w})})},[o,h,w,A,B,k]),M(()=>{if(o||!w)return;let e=!1,t=H(w),n=ee(t),r=y.current;function i(t){if("escapeKey"===t.type&&g.domReference.current&&(j.current=g.domReference.current),["referencePress","escapeKey"].includes(t.type))return;let n=t.data.returnFocus;"object"==typeof n?(P.current=!1,e=n.preventScroll):P.current=!n}return j.current=n,v.on("dismiss",i),()=>{v.off("dismiss",i);let n=ee(t),o=X(w,n)||O&&er(O.nodesRef.current,m).some(e=>{var t;return X(null==(t=e.context)?void 0:t.elements.floating,n)})||r.openEvent&&["click","mousedown"].includes(r.openEvent.type);o&&g.domReference.current&&(j.current=g.domReference.current),_.current&&V(j.current)&&!P.current&&en(j.current,{cancelPrevious:!1,preventScroll:e})}},[o,w,_,y,g,v,O,m]),M(()=>{if(!o&&C)return C.setFocusManagerState({...n,modal:f,closeOnFocusOut:p,open:h}),()=>{C.setFocusManagerState(null)}},[o,C,f,h,p,n]),M(()=>{if(!o&&w&&"function"==typeof MutationObserver){let e=()=>{let e=w.getAttribute("tabindex");S.current.includes("floating")||ee(H(w))!==g.domReference.current&&0===F().length?"0"!==e&&w.setAttribute("tabindex","0"):"-1"!==e&&w.setAttribute("tabindex","-1")};e();let t=new MutationObserver(e);return t.observe(w,{childList:!0,subtree:!0,attributes:!0}),()=>{t.disconnect()}}},[o,w,g,S,F]);let $=!o&&E&&!D&&(I||f);return a.createElement(a.Fragment,null,$&&a.createElement(ef,{"data-type":"inside",ref:null==C?void 0:C.beforeInsideRef,onFocus:e=>{if(f){let e=B();en("reference"===i[0]?e[0]:e[e.length-1])}else if(null!=C&&C.preserveTabOrder&&C.portalNode){if(P.current=!1,es(e,C.portalNode)){let e=el(document.body,"next")||x;null==e||e.focus()}else{var t;null==(t=C.beforeOutsideRef.current)||t.focus()}}}}),!D&&z("start"),r,z("end"),$&&a.createElement(ef,{"data-type":"inside",ref:null==C?void 0:C.afterInsideRef,onFocus:e=>{if(f)en(B()[0]);else if(null!=C&&C.preserveTabOrder&&C.portalNode){if(p&&(P.current=!0),es(e,C.portalNode)){let e=el(document.body,"prev")||x;null==e||e.focus()}else{var t;null==(t=C.afterOutsideRef.current)||t.focus()}}}}))}function em(e,t){let n=e.compareDocumentPosition(t);return n&Node.DOCUMENT_POSITION_FOLLOWING||n&Node.DOCUMENT_POSITION_CONTAINED_BY?-1:n&Node.DOCUMENT_POSITION_PRECEDING||n&Node.DOCUMENT_POSITION_CONTAINS?1:0}let eb=a.createContext({register:()=>{},unregister:()=>{},map:new Map,elementsRef:{current:[]}});function ev(e){let{children:t,elementsRef:n,labelsRef:r}=e,[o,i]=a.useState(()=>new Map),l=a.useCallback(e=>{i(t=>new Map(t).set(e,null))},[]),s=a.useCallback(e=>{i(t=>{let n=new Map(t);return n.delete(e),n})},[]);return M(()=>{let e=new Map(o),t=Array.from(e.keys()).sort(em);t.forEach((t,n)=>{e.set(t,n)}),!function(e,t){if(e.size!==t.size)return!1;for(let[n,r]of e.entries())if(r!==t.get(n))return!1;return!0}(o,e)&&i(e)},[o]),a.createElement(eb.Provider,{value:a.useMemo(()=>({register:l,unregister:s,map:o,elementsRef:n,labelsRef:r}),[l,s,o,n,r])},t)}function ey(e){let{label:t}=void 0===e?{}:e,[n,r]=a.useState(null),o=a.useRef(null),{register:i,unregister:l,map:s,elementsRef:c,labelsRef:u}=a.useContext(eb),f=a.useCallback(e=>{if(o.current=e,null!==n&&(c.current[n]=e,u)){var r;let o=void 0!==t;u.current[n]=o?t:null!=(r=null==e?void 0:e.textContent)?r:null}},[n,c,u,t]);return M(()=>{let e=o.current;if(e)return i(e),()=>{l(e)}},[i,l]),M(()=>{let e=o.current?s.get(o.current):null;null!=e&&r(e)},[s]),a.useMemo(()=>({ref:f,index:null==n?-1:n}),[n,f])}let ex=l["useInsertionEffect".toString()],ew=ex||(e=>e());function eE(e){let t=a.useRef(()=>{});return ew(()=>{t.current=e}),a.useCallback(function(){for(var e=arguments.length,n=Array(e),r=0;r{var t,n;return{escapeKeyBubbles:"boolean"==typeof e?e:null!=(t=null==e?void 0:e.escapeKey)&&t,outsidePressBubbles:"boolean"==typeof e?e:null==(n=null==e?void 0:e.outsidePress)||n}};function eC(e,t){void 0===t&&(t={});let{open:n,onOpenChange:r,events:i,nodeId:l,elements:{reference:s,domReference:c,floating:u},dataRef:f}=e,{enabled:d=!0,escapeKey:p=!0,outsidePress:h=!0,outsidePressEvent:g="pointerdown",referencePress:m=!1,referencePressEvent:b="pointerdown",ancestorScroll:v=!1,bubbles:y}=t,x=U(),w=null!=$(),E=eE("function"==typeof h?h:()=>!1),S="function"==typeof h?E:h,k=a.useRef(!1),{escapeKeyBubbles:_,outsidePressBubbles:O}=eO(y),C=eE(e=>{if(!n||!d||!p||"Escape"!==e.key)return;let t=x?er(x.nodesRef.current,l):[];if(!_&&(e.stopPropagation(),t.length>0)){let e=!0;if(t.forEach(t=>{var n;if(null!=(n=t.context)&&n.open&&!t.context.dataRef.current.__escapeKeyBubbles){e=!1;return}}),!e)return}i.emit("dismiss",{type:"escapeKey",data:{returnFocus:{preventScroll:!1}}}),r(!1,"nativeEvent"in e?e.nativeEvent:e)}),A=eE(e=>{let t=k.current;if(k.current=!1,t||"function"==typeof S&&!S(e))return;let n=eo(e);if(V(n)&&u){let t=n.clientWidth>0&&n.scrollWidth>n.clientWidth,r=n.clientHeight>0&&n.scrollHeight>n.clientHeight,o=r&&e.offsetX>n.clientWidth;if(r){let t="rtl"===q(u).getComputedStyle(n).direction;t&&(o=e.offsetX<=n.offsetWidth-n.clientWidth)}if(o||t&&e.offsetY>n.clientHeight)return}let o=x&&er(x.nodesRef.current,l).some(t=>{var n;return eS(e,null==(n=t.context)?void 0:n.elements.floating)});if(eS(e,u)||eS(e,c)||o)return;let a=x?er(x.nodesRef.current,l):[];if(a.length>0){let e=!0;if(a.forEach(t=>{var n;if(null!=(n=t.context)&&n.open&&!t.context.dataRef.current.__outsidePressBubbles){e=!1;return}}),!e)return}i.emit("dismiss",{type:"outsidePress",data:{returnFocus:w?{preventScroll:!0}:G(e)||K(e)}}),r(!1,e)});return a.useEffect(()=>{if(!n||!d)return;function e(e){r(!1,e)}f.current.__escapeKeyBubbles=_,f.current.__outsidePressBubbles=O;let t=H(u);p&&t.addEventListener("keydown",C),S&&t.addEventListener(g,A);let i=[];return v&&(W(c)&&(i=(0,o.Kx)(c)),W(u)&&(i=i.concat((0,o.Kx)(u))),!W(s)&&s&&s.contextElement&&(i=i.concat((0,o.Kx)(s.contextElement)))),(i=i.filter(e=>{var n;return e!==(null==(n=t.defaultView)?void 0:n.visualViewport)})).forEach(t=>{t.addEventListener("scroll",e,{passive:!0})}),()=>{p&&t.removeEventListener("keydown",C),S&&t.removeEventListener(g,A),i.forEach(t=>{t.removeEventListener("scroll",e)})}},[f,u,c,s,p,S,g,n,r,v,d,_,O,C,A]),a.useEffect(()=>{k.current=!1},[S,g]),a.useMemo(()=>d?{reference:{onKeyDown:C,[ek[b]]:e=>{m&&(i.emit("dismiss",{type:"referencePress",data:{returnFocus:!1}}),r(!1,e.nativeEvent))}},floating:{onKeyDown:C,[e_[g]]:()=>{k.current=!0}}}:{},[d,i,m,g,b,r,C])}function eA(e){var t;void 0===e&&(e={});let{open:n=!1,onOpenChange:r,nodeId:o}=e,[l,s]=a.useState(null),c=(null==(t=e.elements)?void 0:t.reference)||l,u=(0,i.YF)(e),f=U(),d=eE((e,t)=>{e&&(h.current.openEvent=t),null==r||r(e,t)}),p=a.useRef(null),h=a.useRef({}),g=a.useState(()=>(function(){let e=new Map;return{emit(t,n){var r;null==(r=e.get(t))||r.forEach(e=>e(n))},on(t,n){e.set(t,[...e.get(t)||[],n])},off(t,n){var r;e.set(t,(null==(r=e.get(t))?void 0:r.filter(e=>e!==n))||[])}}})())[0],m=F(),b=a.useCallback(e=>{let t=W(e)?{getBoundingClientRect:()=>e.getBoundingClientRect(),contextElement:e}:e;u.refs.setReference(t)},[u.refs]),v=a.useCallback(e=>{(W(e)||null===e)&&(p.current=e,s(e)),(W(u.refs.reference.current)||null===u.refs.reference.current||null!==e&&!W(e))&&u.refs.setReference(e)},[u.refs]),y=a.useMemo(()=>({...u.refs,setReference:v,setPositionReference:b,domReference:p}),[u.refs,v,b]),x=a.useMemo(()=>({...u.elements,domReference:c}),[u.elements,c]),w=a.useMemo(()=>({...u,refs:y,elements:x,dataRef:h,nodeId:o,floatingId:m,events:g,open:n,onOpenChange:d}),[u,o,m,g,n,d,y,x]);return M(()=>{let e=null==f?void 0:f.nodesRef.current.find(e=>e.id===o);e&&(e.context=w)}),a.useMemo(()=>({...u,context:w,refs:y,elements:x}),[u,y,x,w])}function eN(e,t,n){let r=new Map;return{..."floating"===n&&{tabIndex:-1},...e,...t.map(e=>e?e[n]:null).concat(e).reduce((e,t)=>(t&&Object.entries(t).forEach(t=>{let[n,o]=t;if(0===n.indexOf("on")){if(r.has(n)||r.set(n,[]),"function"==typeof o){var i;null==(i=r.get(n))||i.push(o),e[n]=function(){for(var e,t=arguments.length,o=Array(t),i=0;ie(...o)).find(e=>void 0!==e)}}}else e[n]=o}),e),{})}}function eR(e){void 0===e&&(e=[]);let t=e,n=a.useCallback(t=>eN(t,e,"reference"),t),r=a.useCallback(t=>eN(t,e,"floating"),t),o=a.useCallback(t=>eN(t,e,"item"),e.map(e=>null==e?void 0:e.item));return a.useMemo(()=>({getReferenceProps:n,getFloatingProps:r,getItemProps:o}),[n,r,o])}let eT=!1,eP="ArrowUp",eM="ArrowDown",ej="ArrowLeft",eL="ArrowRight";function eI(e,t,n){return Math.floor(e/t)!==n}function eD(e,t){return t<0||t>=e.current.length}function eF(e,t){let{startingIndex:n=-1,decrement:r=!1,disabledIndices:o,amount:i=1}=void 0===t?{}:t,a=e.current,l=n;do{var s,c;l+=r?-i:i}while(l>=0&&l<=a.length-1&&(o?o.includes(l):null==a[l]||(null==(s=a[l])?void 0:s.hasAttribute("disabled"))||(null==(c=a[l])?void 0:c.getAttribute("aria-disabled"))==="true"));return l}function eB(e,t,n){switch(e){case"vertical":return t;case"horizontal":return n;default:return t||n}}function ez(e,t){return eB(t,e===eP||e===eM,e===ej||e===eL)}function e$(e,t,n){return eB(t,e===eM,n?e===ej:e===eL)||"Enter"===e||" "==e||""===e}function eU(e,t){return eF(e,{disabledIndices:t})}function eH(e,t){return eF(e,{decrement:!0,startingIndex:e.current.length,disabledIndices:t})}function eZ(e,t){let{open:n,onOpenChange:r,refs:o,elements:{domReference:i,floating:l}}=e,{listRef:s,activeIndex:c,onNavigate:u=()=>{},enabled:f=!0,selectedIndex:d=null,allowEscape:p=!1,loop:h=!1,nested:g=!1,rtl:m=!1,virtual:b=!1,focusItemOnOpen:v="auto",focusItemOnHover:y=!0,openOnArrowKeyDown:x=!0,disabledIndices:w,orientation:E="vertical",cols:S=1,scrollItemIntoView:k=!0}=t,_=$(),O=U(),C=eE(u),A=a.useRef(v),N=a.useRef(null!=d?d:-1),R=a.useRef(null),T=a.useRef(!0),P=a.useRef(C),j=a.useRef(!!l),L=a.useRef(!1),I=a.useRef(!1),D=Q(w),F=Q(n),B=Q(k),[z,q]=a.useState(),W=eE(function(e,t,n){void 0===n&&(n=!1);let r=e.current[t.current];r&&(b?q(r.id):en(r,{preventScroll:!0,sync:!!(Z().toLowerCase().startsWith("mac")&&!navigator.maxTouchPoints&&Y())&&(eT||L.current)}),requestAnimationFrame(()=>{let e=B.current,t=e&&r&&(n||!T.current);t&&(null==r.scrollIntoView||r.scrollIntoView("boolean"==typeof e?{block:"nearest",inline:"nearest"}:e))}))});M(()=>{document.createElement("div").focus({get preventScroll(){return eT=!0,!1}})},[]),M(()=>{f&&(n&&l?A.current&&null!=d&&(I.current=!0,C(d)):j.current&&(N.current=-1,P.current(null)))},[f,n,l,d,C]),M(()=>{if(f&&n&&l){if(null==c){if(L.current=!1,null==d&&(j.current&&(N.current=-1,W(s,N)),!j.current&&A.current&&(null!=R.current||!0===A.current&&null==R.current))){let e=0,t=()=>{if(null==s.current[0]){if(e<2){let n=e?requestAnimationFrame:queueMicrotask;n(t)}e++}else N.current=null==R.current||e$(R.current,E,m)||g?eU(s,D.current):eH(s,D.current),R.current=null,C(N.current)};t()}}else eD(s,c)||(N.current=c,W(s,N,I.current),I.current=!1)}},[f,n,l,c,d,g,s,E,m,C,W,D]),M(()=>{if(f&&j.current&&!l&&O){var e,t;let n=O.nodesRef.current,r=null==(e=n.find(e=>e.id===_))?void 0:null==(t=e.context)?void 0:t.elements.floating,o=ee(H(l)),i=n.some(e=>e.context&&X(e.context.elements.floating,o));r&&!i&&r.focus({preventScroll:!0})}},[f,l,O,_]),M(()=>{P.current=C,j.current=!!l}),M(()=>{n||(R.current=null)},[n]);let J=null!=c,et=a.useMemo(()=>{function e(e){if(!n)return;let t=s.current.indexOf(e);-1!==t&&C(t)}let t={onFocus(t){let{currentTarget:n}=t;e(n)},onClick:e=>{let{currentTarget:t}=e;return t.focus({preventScroll:!0})},...y&&{onMouseMove(t){let{currentTarget:n}=t;e(n)},onPointerLeave(e){let{pointerType:t}=e;T.current&&"touch"!==t&&(N.current=-1,W(s,N),C(null),b||en(o.floating.current,{preventScroll:!0}))}}};return t},[n,o,W,y,s,C,b]);return a.useMemo(()=>{if(!f)return{};let e=D.current;function t(t){var a;if(T.current=!1,L.current=!0,!F.current&&t.currentTarget===o.floating.current)return;if(g&&(a=t.key,eB(E,m?a===eL:a===ej,a===eP))){ei(t),r(!1,t.nativeEvent),V(i)&&i.focus();return}let l=N.current,c=eU(s,e),u=eH(s,e);if("Home"===t.key&&(ei(t),N.current=c,C(N.current)),"End"===t.key&&(ei(t),N.current=u,C(N.current)),S>1){let n=N.current;if(t.key===eP){if(ei(t),-1===n)N.current=u;else if(N.current=eF(s,{startingIndex:n,amount:S,decrement:!0,disabledIndices:e}),h&&(n-Se?r:r-S}eD(s,N.current)&&(N.current=n),C(N.current)}if(t.key===eM&&(ei(t),-1===n?N.current=c:(N.current=eF(s,{startingIndex:n,amount:S,disabledIndices:e}),h&&n+S>u&&(N.current=eF(s,{startingIndex:n%S-S,amount:S,disabledIndices:e}))),eD(s,N.current)&&(N.current=n),C(N.current)),"both"===E){let r=Math.floor(n/S);t.key===eL&&(ei(t),n%S!=S-1?(N.current=eF(s,{startingIndex:n,disabledIndices:e}),h&&eI(N.current,S,r)&&(N.current=eF(s,{startingIndex:n-n%S-1,disabledIndices:e}))):h&&(N.current=eF(s,{startingIndex:n-n%S-1,disabledIndices:e})),eI(N.current,S,r)&&(N.current=n)),t.key===ej&&(ei(t),n%S!=0?(N.current=eF(s,{startingIndex:n,disabledIndices:e,decrement:!0}),h&&eI(N.current,S,r)&&(N.current=eF(s,{startingIndex:n+(S-n%S),decrement:!0,disabledIndices:e}))):h&&(N.current=eF(s,{startingIndex:n+(S-n%S),decrement:!0,disabledIndices:e})),eI(N.current,S,r)&&(N.current=n));let o=Math.floor(u/S)===r;eD(s,N.current)&&(h&&o?N.current=t.key===ej?u:eF(s,{startingIndex:n-n%S-1,disabledIndices:e}):N.current=n),C(N.current);return}}if(ez(t.key,E)){if(ei(t),n&&!b&&ee(t.currentTarget.ownerDocument)===t.currentTarget){N.current=e$(t.key,E,m)?c:u,C(N.current);return}e$(t.key,E,m)?h?N.current=l>=u?p&&l!==s.current.length?-1:c:eF(s,{startingIndex:l,disabledIndices:e}):N.current=Math.min(u,eF(s,{startingIndex:l,disabledIndices:e})):h?N.current=l<=c?p&&-1!==l?s.current.length:u:eF(s,{startingIndex:l,decrement:!0,disabledIndices:e}):N.current=Math.max(c,eF(s,{startingIndex:l,decrement:!0,disabledIndices:e})),eD(s,N.current)?C(null):C(N.current)}}function a(e){"auto"===v&&G(e.nativeEvent)&&(A.current=!0)}let l=b&&n&&J&&{"aria-activedescendant":z};return{reference:{...l,onKeyDown(o){var i;T.current=!1;let a=0===o.key.indexOf("Arrow");if(b&&n)return t(o);if(!n&&!x&&a)return;let l=a||"Enter"===o.key||""===o.key.trim(),c=ez(o.key,E),u=(i=o.key,eB(E,m?i===ej:i===eL,i===eM));if(l&&(R.current=g&&c?null:o.key),g){u&&(ei(o),n?(N.current=eU(s,e),C(N.current)):r(!0,o.nativeEvent));return}c&&(null!=d&&(N.current=d),ei(o),!n&&x?r(!0,o.nativeEvent):t(o),n&&C(N.current))},onFocus(){n&&C(null)},onPointerDown:function(e){A.current=v,"auto"===v&&K(e.nativeEvent)&&(A.current=!0)},onMouseDown:a,onClick:a},floating:{"aria-orientation":"both"===E?void 0:E,...l,onKeyDown:t,onPointerMove(){T.current=!0}},item:et}},[i,o,z,D,F,s,f,E,m,b,n,J,g,d,x,p,S,h,v,C,r,et])}function eq(e,t){void 0===t&&(t={});let{open:n,floatingId:r}=e,{enabled:o=!0,role:i="dialog"}=t,l=F();return a.useMemo(()=>{let e={id:r,role:i};return o?"tooltip"===i?{reference:{"aria-describedby":n?r:void 0},floating:e}:{reference:{"aria-expanded":n?"true":"false","aria-haspopup":"alertdialog"===i?"dialog":i,"aria-controls":n?r:void 0,..."listbox"===i&&{role:"combobox"},..."menu"===i&&{id:l}},floating:{...e,..."menu"===i&&{"aria-labelledby":l}}}:{}},[o,i,n,r,l])}},29872:function(e,t,n){"use strict";var r,o=Object.assign||function(e){for(var t=1;t=0)&&Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(e,["fill","width","height","style"]);return i.default.createElement("svg",o({viewBox:"0 0 24 24",style:o({fill:void 0===t?"currentColor":t,width:void 0===n?24:n,height:void 0===r?24:r},l)},s),i.default.createElement("path",{d:"M21,7L9,19L3.5,13.5L4.91,12.09L9,16.17L19.59,5.59L21,7Z"}))}},42684:function(e,t,n){"use strict";var r,o=Object.assign||function(e){for(var t=1;t=0)&&Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(e,["fill","width","height","style"]);return i.default.createElement("svg",o({viewBox:"0 0 24 24",style:o({fill:void 0===t?"currentColor":t,width:void 0===n?24:n,height:void 0===r?24:r},l)},s),i.default.createElement("path",{d:"M12,18.17L8.83,15L7.42,16.41L12,21L16.59,16.41L15.17,15M12,5.83L15.17,9L16.58,7.59L12,3L7.41,7.59L8.83,9L12,5.83Z"}))}},16329:function(e,t,n){!function(e,t,n){"use strict";var r=function(e){if(e&&e.__esModule)return e;var t=Object.create(null);return e&&Object.keys(e).forEach(function(n){if("default"!==n){var r=Object.getOwnPropertyDescriptor(e,n);Object.defineProperty(t,n,r.get?r:{enumerable:!0,get:function(){return e[n]}})}}),t.default=e,Object.freeze(t)}(t);function o(){return(o=Object.assign?Object.assign.bind():function(e){for(var t=1;t{this.listeners.add(e);let t=this.options?.onSubscribe?.(e,this);return()=>{this.listeners.delete(e),t?.()}};setState=e=>{let t=this.state;this.state=this.options?.updateFn?this.options.updateFn(t)(e):e(t),this.state!==t&&(this.options?.onUpdate?.(this.state,t),this.queue.push(()=>{this.listeners.forEach(e=>e(this.state,t))}),this.#e())};#e=()=>{this.batching||(this.queue.forEach(e=>e()),this.queue=[])};batch=e=>{this.batching=!0,e(),this.batching=!1,this.#e()}}function l(e,t){if(Object.is(e,t))return!0;if("object"!=typeof e||null===e||"object"!=typeof t||null===t)return!1;let n=Object.keys(e);if(n.length!==Object.keys(t).length)return!1;for(let r=0;r(e.preventDefault(),e.returnValue=""),f=()=>{removeEventListener(c,u,{capture:!0})};function d(e){let t=e.getLocation(),n=()=>{},r=new Set,o=[],i=[],a=()=>{if(o.length)o[0]?.(a,()=>{o=[],f()});else{for(;i.length;)i.shift()?.();s()}},l=e=>{i.push(e),a()},s=()=>{t=e.getLocation(),r.forEach(e=>e())};return{get location(){return t},listen:t=>(0===r.size&&(n=e.listener(s)),r.add(t),()=>{r.delete(t),0===r.size&&n()}),push:(t,n)=>{l(()=>{e.pushState(t,n)})},replace:(t,n)=>{l(()=>{e.replaceState(t,n)})},go:t=>{l(()=>{e.go(t)})},back:()=>{l(()=>{e.back()})},forward:()=>{l(()=>{e.forward()})},createHref:t=>e.createHref(t),block:e=>(o.push(e),1===o.length&&addEventListener(c,u,{capture:!0}),()=>{(o=o.filter(t=>t!==e)).length||f()})}}function p(e){let t=e?.getHref??(()=>`${window.location.pathname}${window.location.hash}${window.location.search}`),n=e?.createHref??(e=>e);return d({getLocation:()=>g(t(),history.state),listener:e=>(window.addEventListener(s,e),()=>{window.removeEventListener(s,e)}),pushState:(e,t)=>{window.history.pushState({...t,key:m()},"",n(e))},replaceState:(e,t)=>{window.history.replaceState({...t,key:m()},"",n(e))},back:()=>window.history.back(),forward:()=>window.history.forward(),go:e=>window.history.go(e),createHref:e=>n(e)})}function h(e={initialEntries:["/"]}){let t=e.initialEntries,n=e.initialIndex??t.length-1,r={};return d({getLocation:()=>g(t[n],r),listener:()=>()=>{},pushState:(e,o)=>{r={...o,key:m()},t.push(e),n++},replaceState:(e,o)=>{r={...o,key:m()},t[n]=e},back:()=>{n--},forward:()=>{n=Math.min(n+1,t.length-1)},go:e=>window.history.go(e),createHref:e=>e})}function g(e,t){let n=e.indexOf("#"),r=e.indexOf("?");return{href:e,pathname:e.substring(0,n>0?r>0?Math.min(n,r):n:r>0?r:e.length),hash:n>-1?e.substring(n,r):"",search:r>-1?e.substring(r):"",state:t}}function m(){return(Math.random()+1).toString(36).substring(7)}function b(e){return e[e.length-1]}function v(e,t){return"function"==typeof e?e(t):e}function y(e,t){return t.reduce((t,n)=>(t[n]=e[n],t),{})}function x(e,t){if(e===t)return e;let n=Array.isArray(e)&&Array.isArray(t);if(n||w(e)&&w(t)){let r=n?e.length:Object.keys(e).length,o=n?t:Object.keys(t),i=o.length,a=n?[]:{},l=0;for(let r=0;r!S(e[n],t[n])):!(!Array.isArray(e)||!Array.isArray(t))&&e.length===t.length&&e.every((e,n)=>S(e,t[n])))}function k(e){return _(e.filter(Boolean).join("/"))}function _(e){return e.replace(/\/{2,}/g,"/")}function O(e){return"/"===e?e:e.replace(/^\/{1,}/,"")}function C(e){return"/"===e?e:e.replace(/\/{1,}$/,"")}function A(e){return C(O(e))}function N(e,t,n){t=t.replace(RegExp(`^${e}`),"/"),n=n.replace(RegExp(`^${e}`),"/");let r=R(t),o=R(n);return o.forEach((e,t)=>{if("/"===e.value)t?t===o.length-1&&r.push(e):r=[e];else if(".."===e.value)r.length>1&&"/"===b(r)?.value&&r.pop(),r.pop();else{if("."===e.value)return;r.push(e)}}),_(k([e,...r.map(e=>e.value)]))}function R(e){if(!e)return[];let t=[];if("/"===(e=_(e)).slice(0,1)&&(e=e.substring(1),t.push({type:"pathname",value:"/"})),!e)return t;let n=e.split("/").filter(Boolean);return t.push(...n.map(e=>"$"===e||"*"===e?{type:"wildcard",value:e}:"$"===e.charAt(0)?{type:"param",value:e}:{type:"pathname",value:e})),"/"===e.slice(-1)&&(e=e.substring(1),t.push({type:"pathname",value:"/"})),t}function T(e,t,n){return k(R(e).map(e=>["$","*"].includes(e.value)&&!n?"":"param"===e.type?t[e.value.substring(1)]??"":e.value))}function P(e,t,n){let r=M(e,t,n);if(!n.to||r)return r??{}}function M(e,t,n){if(!t.startsWith(e))return;let r=R(t="/"!=e?t.substring(e.length):t),o=R(`${n.to??"$"}`);"/"===b(r)?.value&&r.pop();let i={};return(()=>{for(let e=0;ee.value)),!0);if("pathname"===a.type){if("/"===a.value&&!t?.value)return!0;if(t){if(n.caseSensitive){if(a.value!==t.value)return!1}else if(a.value.toLowerCase()!==t.value.toLowerCase())return!1}}if(!t)return!1;if("param"===a.type){if("/"===t?.value)return!1;"$"!==t.value.charAt(0)&&(i[a.value.substring(1)]=t.value)}}if(l&&!s)return!!n.fuzzy}return!0})()?i:void 0}function j(e,t){var n,r,o,i="";for(n in e)if(void 0!==(o=e[n])){if(Array.isArray(o))for(r=0;r{this.originalIndex=e.originalIndex,this.router=e.router;let t=this.options,n=!t?.path&&!t?.id;this.parentRoute=this.options?.getParentRoute?.(),n?this.path=D:i(this.parentRoute);let r=n?D:t.path;r&&"/"!==r&&(r=A(r));let o=t?.id||r,a=n?D:k([this.parentRoute.id===D?"":this.parentRoute.id,o]);r===D&&(r="/"),a!==D&&(a=k(["/",a]));let l=a===D?"/":C(k([this.parentRoute.fullPath,r]));this.path=r,this.id=a,this.fullPath=l};addChildren=e=>(this.children=e,this)}class B extends F{constructor(e){super(e)}static withRouterContext=()=>e=>new B(e)}let z=U(JSON.parse),$=H(JSON.stringify);function U(e){return t=>{"?"===t.substring(0,1)&&(t=t.substring(1));let n=I(t);for(let t in n){let r=n[t];if("string"==typeof r)try{n[t]=e(r)}catch(e){}}return n}}function H(e){return t=>{Object.keys(t={...t}).forEach(n=>{let r=t[n];if(void 0===r||void 0===r)delete t[n];else if(r&&"object"==typeof r&&null!==r)try{t[n]=e(r)}catch(e){}});let n=j(t).toString();return n?`?${n}`:""}}let Z=async({router:e,routeMatch:t})=>{let n=e.buildNext({to:".",search:e=>({...e??{},__data:{matchId:t.id}})}),r=await fetch(n.href,{method:"GET",signal:t.abortController.signal});if(r.ok)return r.json();throw Error("Failed to fetch match data")};class q{#t;startedLoadingAt=Date.now();resolveNavigation=()=>{};constructor(e){this.options={defaultPreloadDelay:50,context:void 0,...e,stringifySearch:e?.stringifySearch??$,parseSearch:e?.parseSearch??z,fetchServerDataFn:e?.fetchServerDataFn??Z},this.__store=new a(V(),{onUpdate:e=>{this.state=e}}),this.state=this.__store.state,this.basepath="",this.update(e),this.options.Router?.(this);let t=this.buildNext({hash:!0,fromCurrent:!0,search:!0,state:!0});this.state.latestLocation.href!==t.href&&this.#n({...t,replace:!0})}reset=()=>{this.__store.setState(e=>Object.assign(e,V()))};mount=()=>(W||this.state.currentMatches.length||this.safeLoad(),()=>{});update=e=>{if(Object.assign(this.options,e),!this.history||this.options.history&&this.options.history!==this.history){this.#t&&this.#t(),this.history=this.options.history??(W?h():p());let e=this.#r();this.__store.setState(t=>({...t,latestLocation:e,currentLocation:e})),this.#t=this.history.listen(()=>{this.safeLoad({next:this.#r(this.state.latestLocation)})})}let{basepath:t,routeTree:n}=this.options;return this.basepath=`/${A(t??"")??""}`,n&&(this.routesById={},this.routeTree=this.#o(n)),this};buildNext=e=>{let t=this.#i(e),n=this.matchRoutes(t.pathname);return this.#i({...e,__matches:n})};cancelMatches=()=>{[...this.state.currentMatches,...this.state.pendingMatches||[]].forEach(e=>{e.cancel()})};safeLoad=e=>{this.load(e).catch(e=>{console.warn(e),i(!1)})};load=async e=>{let t,n=Date.now(),r=n;if(this.startedLoadingAt=r,this.cancelMatches(),this.__store.batch(()=>{e?.next&&this.__store.setState(t=>({...t,latestLocation:e.next})),t=this.matchRoutes(this.state.latestLocation.pathname,{strictParseParams:!0}),this.__store.setState(e=>({...e,status:"pending",pendingMatches:t,pendingLocation:this.state.latestLocation}))}),await this.loadMatches(t,this.state.pendingLocation),this.startedLoadingAt!==r)return this.navigationPromise;let o=this.state.currentMatches,i=[],a=[];o.forEach(e=>{t.find(t=>t.id===e.id)?a.push(e):i.push(e)});let l=t.filter(e=>!o.find(t=>t.id===e.id));n=Date.now(),i.forEach(e=>{e.__onExit?.({params:e.params,search:e.state.routeSearch}),"error"===e.state.status&&this.__store.setState(e=>({...e,status:"idle",error:void 0}))}),a.forEach(e=>{e.route.options.onTransition?.({params:e.params,search:e.state.routeSearch})}),l.forEach(e=>{e.__onExit=e.route.options.onLoaded?.({params:e.params,search:e.state.search})});let s=this.state.currentLocation;this.__store.setState(e=>({...e,status:"idle",currentLocation:this.state.latestLocation,currentMatches:t,pendingLocation:void 0,pendingMatches:void 0})),t.forEach(e=>{e.__commit()}),s.href!==this.state.currentLocation.href&&this.options.onRouteChange?.(),this.resolveNavigation()};getRoute=e=>{let t=this.routesById[e];return i(t),t};loadRoute=async(e=this.state.latestLocation)=>{let t=this.buildNext(e),n=this.matchRoutes(t.pathname,{strictParseParams:!0});return await this.loadMatches(n,t),n};preloadRoute=async(e=this.state.latestLocation)=>{let t=this.buildNext(e),n=this.matchRoutes(t.pathname,{strictParseParams:!0});return await this.loadMatches(n,t,{preload:!0}),n};matchRoutes=(e,t)=>{let n=[];if(!this.routeTree)return n;let r=[...this.state.currentMatches,...this.state.pendingMatches??[]],o=async i=>{let a=b(n)?.params??{},l=this.options.filterRoutes?.(i)??i,s=[],c=(n,r)=>(r.some(r=>{let o=r.children;if(!r.path&&o?.length)return c([...s,r],o);let i=!("/"===r.path&&!o?.length),l=P(this.basepath,e,{to:r.fullPath,fuzzy:i,caseSensitive:r.options.caseSensitive??this.options.caseSensitive});if(l){let e;try{e=r.options.parseParams?.(l)??l}catch(e){if(t?.strictParseParams)throw e}a={...a,...e}}return l&&(s=[...n,r]),!!s.length}),!!s.length);if(c([],l),!s.length)return;s.forEach(e=>{let t=T(e.path,a),o=T(e.id,a,!0),i=r.find(e=>e.id===o)||new Y(this,e,{id:o,params:a,pathname:k([this.basepath,t])});n.push(i)});let u=b(s).children;u?.length&&o(u)};return o([this.routeTree]),n};loadMatches=async(e,t,n)=>{let r;try{await Promise.all(e.map(async(e,t)=>{try{await e.route.options.beforeLoad?.({router:this,match:e})}catch(o){if(G(o))throw o;r=r??t;let n=e.route.options.onBeforeLoadError??e.route.options.onError;try{n?.(o)}catch(t){if(G(t))throw t;return void e.__store.setState(e=>({...e,error:t,status:"error",updatedAt:Date.now()}))}e.__store.setState(e=>({...e,error:o,status:"error",updatedAt:Date.now()}))}}))}catch(e){if(G(e))return void(n?.preload||this.navigate(e));throw e}let o=e.slice(0,r),i=o.map(async(e,r)=>{let i=o[r-1];e.__load({preload:n?.preload,location:t,parentMatch:i}),await e.__loadPromise,i&&await i.__loadPromise});await Promise.all(i)};reload=()=>{this.navigate({fromCurrent:!0,replace:!0,search:!0})};resolvePath=(e,t)=>N(this.basepath,e,_(t));navigate=async({from:e,to:t="",search:n,hash:r,replace:o,params:a})=>{let l;let s=String(t),c=void 0===e?e:String(e);try{new URL(`${s}`),l=!0}catch(e){}return i(!l),this.#n({from:c,to:s,search:n,hash:r,replace:o,params:a})};matchRoute=(e,t)=>{e={...e,to:e.to?this.resolvePath(e.from??"",e.to):void 0};let n=this.buildNext(e),r=t?.pending?this.state.pendingLocation:this.state.currentLocation;if(!r)return!1;let o=P(this.basepath,r.pathname,{...t,to:n.pathname});return!!o&&(t?.includeSearch??1?!!S(r.search,n.search)&&o:o)};buildLink=({from:e,to:t=".",search:n,params:r,hash:o,target:i,replace:a,activeOptions:l,preload:s,preloadDelay:c,disabled:u})=>{try{return new URL(`${t}`),{type:"external",href:t}}catch(e){}let f={from:e,to:t,search:n,params:r,hash:o,replace:a},d=this.buildNext(f);s=s??this.options.defaultPreload;let p=c??this.options.defaultPreloadDelay??0,h=this.state.currentLocation.pathname.split("/"),g=d.pathname.split("/").every((e,t)=>e===h[t]),m=l?.exact?this.state.currentLocation.pathname===d.pathname:g,b=!l?.includeHash||this.state.currentLocation.hash===d.hash,v=!(l?.includeSearch??1)||S(this.state.currentLocation.search,d.search);return{type:"internal",next:d,handleFocus:e=>{s&&this.preloadRoute(f).catch(e=>{console.warn(e),console.warn("Error preloading route! ☝️")})},handleClick:e=>{u||e.metaKey||e.altKey||e.ctrlKey||e.shiftKey||e.defaultPrevented||i&&"_self"!==i||0!==e.button||(e.preventDefault(),this.#n(f))},handleEnter:e=>{let t=e.target||{};if(s){if(t.preloadTimeout)return;t.preloadTimeout=setTimeout(()=>{t.preloadTimeout=null,this.preloadRoute(f).catch(e=>{console.warn(e),console.warn("Error preloading route! ☝️")})},p)}},handleLeave:e=>{let t=e.target||{};t.preloadTimeout&&(clearTimeout(t.preloadTimeout),t.preloadTimeout=null)},handleTouchStart:e=>{this.preloadRoute(f).catch(e=>{console.warn(e),console.warn("Error preloading route! ☝️")})},isActive:m&&b&&v,disabled:u}};dehydrate=()=>({state:{...y(this.state,["latestLocation","currentLocation","status","lastUpdated"]),currentMatches:this.state.currentMatches.map(e=>({id:e.id,state:{status:e.state.status}}))}});hydrate=e=>{this.__store.setState(t=>{let n=this.matchRoutes(e.state.latestLocation.pathname,{strictParseParams:!0});return n.forEach((t,n)=>{let r=e.state.currentMatches[n];i(r&&r.id===t.id),t.__store.setState(e=>({...e,...r.state}))}),{...t,...e.state,currentMatches:n}})};#o=e=>{let t=(e,n)=>{e.forEach((e,n)=>{e.init({originalIndex:n,router:this}),i(!this.routesById[e.id],String(e.id)),this.routesById[e.id]=e;let r=e.children;r?.length&&(t(r),e.children=r.map((e,t)=>{let n=R(O(_(e.path??"/")));for(;n.length>1&&"/"===n[0]?.value;)n.shift();let r=0;return n.forEach((e,t)=>{let n=1;for(;t--;)n*=.001;"pathname"===e.type&&"/"!==e.value?r+=1*n:"param"===e.type?r+=2*n:"wildcard"===e.type&&(r+=3*n)}),{child:e,parsed:n,index:t,score:r}}).sort((e,t)=>e.score!==t.score?e.score-t.score:e.index-t.index).map(e=>e.child))})};t([e]);let n=(e,t)=>{e.forEach(e=>{e.isRoot?i(!t):i(!t||e.parentRoute===t,(e.path,e.parentRoute?.id,t?.id)),e.children&&n(e.children,e)})};return n([e],void 0),e};#r=e=>{let{pathname:t,search:n,hash:r,state:o}=this.history.location,i=this.options.parseSearch(n);return{pathname:t,searchStr:n,search:x(e?.search,i),hash:r.split("#").reverse()[0]??"",href:`${t}${n}${r}`,state:o,key:o?.key||"__init__"}};#i=(e={})=>{e.fromCurrent=e.fromCurrent??""===e.to;let t=e.fromCurrent?this.state.latestLocation.pathname:e.from??this.state.latestLocation.pathname,n=N(this.basepath??"/",t,`${e.to??""}`),r={...b(this.matchRoutes(this.state.latestLocation.pathname,{strictParseParams:!0}))?.params},o=!0===(e.params??!0)?r:v(e.params,r);o&&e.__matches?.map(e=>e.route.options.stringifyParams).filter(Boolean).forEach(e=>{o={...o,...e(o)}}),n=T(n,o??{});let i=e.__matches?.map(e=>e.route.options.preSearchFilters??[]).flat().filter(Boolean)??[],a=e.__matches?.map(e=>e.route.options.postSearchFilters??[]).flat().filter(Boolean)??[],l=i?.length?i?.reduce((e,t)=>t(e),this.state.latestLocation.search):this.state.latestLocation.search,s=!0===e.search?l:e.search?v(e.search,l)??{}:i?.length?l:{},c=a?.length?a.reduce((e,t)=>t(e),s):s,u=x(this.state.latestLocation.search,c),f=this.options.stringifySearch(u),d=!0===e.hash?this.state.latestLocation.hash:v(e.hash,this.state.latestLocation.hash);return d=d?`#${d}`:"",{pathname:n,search:u,searchStr:f,state:!0===e.state?this.state.latestLocation.state:v(e.state,this.state.latestLocation.state),hash:d,href:this.history.createHref(`${n}${f}${d}`),key:e.key}};#n=async e=>{let t=this.buildNext(e),n=""+Date.now()+Math.random();this.navigateTimeout&&clearTimeout(this.navigateTimeout);let r="replace";e.replace||(r="push"),this.state.latestLocation.href!==t.href||t.key||(r="replace");let o=`${t.pathname}${t.searchStr}${t.hash?`${t.hash}`:""}`;return this.history["push"===r?"push":"replace"](o,{id:n,...t.state}),this.navigationPromise=new Promise(e=>{let t=this.resolveNavigation;this.resolveNavigation=()=>{t(),e()}})}}let W="undefined"==typeof window||!window.document.createElement;function V(){return{status:"idle",latestLocation:null,currentLocation:null,currentMatches:[],lastUpdated:Date.now()}}function G(e){return!!e?.isRedirect}let K=["component","errorComponent","pendingComponent"];class Y{abortController=new AbortController;constructor(e,t,n){Object.assign(this,{route:t,router:e,id:n.id,pathname:n.pathname,params:n.params,__store:new a({updatedAt:0,routeSearch:{},search:{},status:"idle"},{onUpdate:e=>{this.state=e}})}),this.state=this.__store.state,K.map(async e=>{let t=this.route.options[e];"function"!=typeof this[e]&&(this[e]=t)}),"idle"!==this.state.status||this.#a()||this.__store.setState(e=>({...e,status:"success"}))}#a=()=>!(!this.route.options.onLoad&&!K.some(e=>this.route.options[e]?.preload));__commit=()=>{let{routeSearch:e,search:t,context:n,routeContext:r}=this.#l({location:this.router.state.currentLocation});this.context=n,this.routeContext=r,this.__store.setState(n=>({...n,routeSearch:x(n.routeSearch,e),search:x(n.search,t)}))};cancel=()=>{this.abortController?.abort()};#s=e=>{let t=this.parentMatch?this.parentMatch.#s(e):{search:e.location.search,routeSearch:e.location.search};try{let e=("object"==typeof this.route.options.validateSearch?this.route.options.validateSearch.parse:this.route.options.validateSearch)?.(t.search)??{};return{routeSearch:e,search:{...t.search,...e}}}catch(t){if(G(t))throw t;(this.route.options.onValidateSearchError??this.route.options.onError)?.(t);let e=Error("Invalid search params found",{cause:t});throw e.code="INVALID_SEARCH_PARAMS",e}};#l=e=>{let{search:t,routeSearch:n}=this.#s(e);try{let e=this.route.options.getContext?.({parentContext:this.parentMatch?.routeContext??{},context:this.parentMatch?.context??this.router?.options.context??{},params:this.params,search:t})||{};return{routeSearch:n,search:t,context:{...this.parentMatch?.context??this.router?.options.context,...e},routeContext:e}}catch(e){throw this.route.options.onError?.(e),e}};__load=async e=>{let t;this.parentMatch=e.parentMatch;try{t=this.#l(e)}catch(t){return G(t)?void(e?.preload||this.router.navigate(t)):void this.__store.setState(e=>({...e,status:"error",error:t}))}let{routeSearch:n,search:r,context:o,routeContext:i}=t;if("pending"!==this.state.status)return this.__loadPromise=Promise.resolve().then(async()=>{let t;let a=""+Date.now()+Math.random();this.#c=a,"idle"===this.state.status&&this.__store.setState(e=>({...e,status:"pending"}));let l=(async()=>{await Promise.all(K.map(async e=>{let t=this.route.options[e];this[e]?.preload&&(this[e]=await this.router.options.loadComponent(t))}))})(),s=Promise.resolve().then(()=>{if(this.route.options.onLoad)return this.route.options.onLoad({params:this.params,routeSearch:n,search:r,signal:this.abortController.signal,preload:!!e?.preload,routeContext:i,context:o})});try{if(await Promise.all([l,s]),t=a!==this.#c?this.__loadPromise:void 0)return await t;this.__store.setState(e=>({...e,error:void 0,status:"success",updatedAt:Date.now()}))}catch(n){if(G(n))return void(e?.preload||this.router.navigate(n));let t=this.route.options.onLoadError??this.route.options.onError;try{t?.(n)}catch(t){return G(t)?void(e?.preload||this.router.navigate(t)):void this.__store.setState(e=>({...e,error:t,status:"error",updatedAt:Date.now()}))}this.__store.setState(e=>({...e,error:n,status:"error",updatedAt:Date.now()}))}finally{delete this.__loadPromise}}),this.__loadPromise};#c=""}/** - * react-store - * - * Copyright (c) TanStack - * - * This source code is licensed under the MIT license found in the - * LICENSE.md file in the root directory of this source tree. - * - * @license MIT - */function X(e,t=e=>e,r){return n.useSyncExternalStoreWithSelector(e.subscribe,()=>e.state,()=>e.state,t,r?l:void 0)}function J(e){let t=en(),{type:n,children:o,target:i,activeProps:a=()=>({className:"active"}),inactiveProps:l=()=>({}),activeOptions:s,disabled:c,hash:u,search:f,params:d,to:p=".",preload:h,preloadDelay:g,replace:m,style:b,className:y,onClick:x,onFocus:w,onMouseEnter:E,onMouseLeave:S,onTouchStart:k,..._}=e,O=t.buildLink(e);if("external"===O.type){let{href:e}=O;return{href:e}}let{handleClick:C,handleFocus:A,handleEnter:N,handleLeave:R,handleTouchStart:T,isActive:P,next:M}=O,j=e=>t=>{t.persist&&t.persist(),e.filter(Boolean).forEach(e=>{t.defaultPrevented||e(t)})},L=P?v(a,{})??{}:{},I=P?{}:v(l,{})??{};return{...L,...I,..._,href:c?void 0:M.href,onClick:j([x,e=>{r.startTransition?r.startTransition(()=>{C(e)}):C(e)}]),onFocus:j([w,A]),onMouseEnter:j([E,N]),onMouseLeave:j([S,R]),onTouchStart:j([k,T]),target:i,style:{...b,...L.style,...I.style},className:[y,L.className,I.className].filter(Boolean).join(" ")||void 0,...c?{role:"link","aria-disabled":!0}:void 0,"data-status":P?"active":void 0}}let Q=r.forwardRef((e,t)=>{let n=J(e);return r.createElement("a",o({ref:t},n,{children:"function"==typeof e.children?e.children({isActive:"active"===n["data-status"]}):e.children}))}),ee=r.createContext(null),et=r.createContext(null);function en(){let e=r.useContext(et);return X(e.router.__store),e.router}function er(e,t){let n=en();return X(n.__store,e,t),n}function eo(){return r.useContext(ee)}function ei(e){let t=en(),n=eo()[0],r=e?.from?t.state.currentMatches.find(t=>t.route.id===e?.from):n;return i(r,e?.from&&e.from),(e?.strict??1)&&i(n.route.id==r?.route.id,(r?.route.id,n.route.id,r?.route.id,r?.route.id)),X(r.__store,t=>e?.track?.(r)??r,e?.shallow),r}function ea(){let e=en();return r.useCallback(t=>{let{pending:n,caseSensitive:r,...o}=t;return e.matchRoute(o,{pending:n,caseSensitive:r})},[])}function el(){let e=eo().slice(1),t=e[0];return t?r.createElement(es,{matches:e,match:t}):null}function es({matches:e,match:t}){let n=en();X(t.__store,e=>[e.status,e.error],!0);let o=r.useCallback(()=>null,[]),i=t.pendingComponent??n.options.defaultPendingComponent??o,a=t.errorComponent??n.options.defaultErrorComponent,l=t.route.options.wrapInSuspense??1?r.Suspense:eu,s=a?ef:eu;return r.createElement(ee.Provider,{value:e},r.createElement(l,{fallback:r.createElement(i,null)},r.createElement(s,{key:t.route.id,errorComponent:a,onCatch:()=>{t.id}},r.createElement(ec,{match:t}))))}function ec(e){let t=en();if("error"===e.match.state.status)throw e.match.state.error;if("success"===e.match.state.status)return r.createElement(e.match.component??t.options.defaultComponent??el);if("pending"===e.match.state.status)throw e.match.__loadPromise;i(!1)}function eu(e){return r.createElement(r.Fragment,null,e.children)}class ef extends r.Component{state={error:!1,info:void 0};componentDidCatch(e,t){this.props.onCatch(e,t),console.error(e),this.setState({error:e,info:t})}render(){return r.createElement(ed,o({},this.props,{errorState:this.state,reset:()=>this.setState({})}))}}function ed(e){let[t,n]=r.useState(e.errorState),o=en(),i=e.errorComponent??ep,a=r.useRef("");return r.useEffect(()=>{t&&o.state.currentLocation.key!==a.current&&n({}),a.current=o.state.currentLocation.key},[t,o.state.currentLocation.key]),r.useEffect(()=>{e.errorState.error&&n(e.errorState)},[e.errorState.error]),e.errorState.error&&t.error?r.createElement(i,t):e.children}function ep({error:e}){return r.createElement("div",{style:{padding:".5rem",maxWidth:"100%"}},r.createElement("strong",{style:{fontSize:"1.2rem"}},"Something went wrong!"),r.createElement("div",{style:{height:".5rem"}}),r.createElement("div",null,r.createElement("pre",{style:{fontSize:".7em",border:"1px solid red",borderRadius:".25rem",padding:".5rem",color:"red",overflow:"auto"}},e.message?r.createElement("code",null,e.message):null)))}function eh(e,t=!0){let n=er();r.useEffect(()=>{if(!t)return;let r=n.history.block((t,n)=>{window.confirm(e)?(r(),t()):n()});return r})}e.Block=function({message:e,condition:t,children:n}){return eh(e,t),n??null},e.ErrorComponent=ep,e.Link=Q,e.MatchRoute=function(e){let t=ea()(e);return t?"function"==typeof e.children?e.children(t):t?e.children:null:null},e.Navigate=function(e){let t=en();return r.useLayoutEffect(()=>{t.navigate(e)},[]),null},e.Outlet=el,e.ReactRouter=class extends q{constructor(e){super({...e,loadComponent:async e=>(e.preload&&await e.preload(),e)})}},e.RootRoute=B,e.Route=F,e.RouteMatch=Y,e.Router=q,e.RouterProvider=function({router:e,...t}){e.update(t);let n=X(e.__store,e=>e.currentMatches);return r.useEffect(e.mount,[e]),r.createElement(et.Provider,{value:{router:e}},r.createElement(ee.Provider,{value:[void 0,...n]},r.createElement(ef,{errorComponent:ep,onCatch:()=>{}},r.createElement(el,null))))},e.cleanPath=_,e.createBrowserHistory=p,e.createHashHistory=function(){return p({getHref:()=>window.location.hash.substring(1),createHref:e=>`#${e}`})},e.createMemoryHistory=h,e.decode=I,e.defaultFetchServerDataFn=Z,e.defaultParseSearch=z,e.defaultStringifySearch=$,e.encode=j,e.functionalUpdate=v,e.interpolatePath=T,e.invariant=i,e.isPlainObject=w,e.isRedirect=G,e.joinPaths=k,e.last=b,e.lazy=function(e){let t=r.lazy(e);return t.preload=async()=>{await e()},t},e.matchByPath=M,e.matchPathname=P,e.matchesContext=ee,e.parsePathname=R,e.parseSearchWith=U,e.partialDeepEqual=S,e.pick=y,e.redirect=function(e){return e.isRedirect=!0,e},e.replaceEqualDeep=x,e.resolvePath=N,e.rootRouteId=D,e.routerContext=et,e.stringifySearchWith=H,e.trimPath=A,e.trimPathLeft=O,e.trimPathRight=C,e.useBlocker=eh,e.useLinkProps=J,e.useMatch=ei,e.useMatchRoute=ea,e.useMatches=eo,e.useNavigate=function(e){let t=en();return r.useCallback(n=>t.navigate({...e,...n}),[])},e.useParams=function(e){let t=en();return X(t.__store,t=>{let n=b(t.currentMatches)?.params;return e?.track?.(n)??n},!0),b(t.state.currentMatches)?.params},e.useRoute=function(e){let t=en().getRoute(e);return i(t),t},e.useRouter=er,e.useRouterContext=en,e.useSearch=function(e){let{track:t,...n}=e,r=ei(n);return X(r.__store,t=>e?.track?.(t.search)??t.search,!0),r.state.search},e.useStore=X,e.warning=function(e,t){},Object.defineProperty(e,"__esModule",{value:!0})}(t,n(86006),n(97737))},472:function(e,t,n){"use strict";n.d(t,{J_:function(){return d},Ry:function(){return u},cJ:function(){return p}});var r=function(e){return"undefined"==typeof document?null:(Array.isArray(e)?e[0]:e).ownerDocument.body},o=new WeakMap,i=new WeakMap,a={},l=0,s=function(e){return e&&(e.host||s(e.parentNode))},c=function(e,t,n,r){var c=(Array.isArray(e)?e:[e]).map(function(e){if(t.contains(e))return e;var n=s(e);return n&&t.contains(n)?n:(console.error("aria-hidden",e,"in not contained inside",t,". Doing nothing"),null)}).filter(function(e){return!!e});a[n]||(a[n]=new WeakMap);var u=a[n],f=[],d=new Set,p=new Set(c),h=function(e){!e||d.has(e)||(d.add(e),h(e.parentNode))};c.forEach(h);var g=function(e){!e||p.has(e)||Array.prototype.forEach.call(e.children,function(e){if(d.has(e))g(e);else{var t=e.getAttribute(r),a=null!==t&&"false"!==t,l=(o.get(e)||0)+1,s=(u.get(e)||0)+1;o.set(e,l),u.set(e,s),f.push(e),1===l&&a&&i.set(e,!0),1===s&&e.setAttribute(n,"true"),a||e.setAttribute(r,"true")}})};return g(t),d.clear(),l++,function(){f.forEach(function(e){var t=o.get(e)-1,a=u.get(e)-1;o.set(e,t),u.set(e,a),t||(i.has(e)||e.removeAttribute(r),i.delete(e)),a||e.removeAttribute(n)}),--l||(o=new WeakMap,o=new WeakMap,i=new WeakMap,a={})}},u=function(e,t,n){void 0===n&&(n="data-aria-hidden");var o=Array.from(Array.isArray(e)?e:[e]),i=t||r(e);return i?(o.push.apply(o,Array.from(i.querySelectorAll("[aria-live]"))),c(o,i,n,"aria-hidden")):function(){return null}},f=function(e,t,n){void 0===n&&(n="data-inert-ed");var o=t||r(e);return o?c(e,o,n,"inert"):function(){return null}},d=function(){return"undefined"!=typeof HTMLElement&&HTMLElement.prototype.hasOwnProperty("inert")},p=function(e,t,n){return void 0===n&&(n="data-suppressed"),(d()?f:u)(e,t,n)}},8683:function(e,t){var n;/*! - Copyright (c) 2018 Jed Watson. - Licensed under the MIT License (MIT), see - http://jedwatson.github.io/classnames -*/!function(){"use strict";var r={}.hasOwnProperty;function o(){for(var e=[],t=0;t=0;)(c=e(r,o,i,a,p+1,s+1))>h&&(p===l?c*=1:t.test(r.charAt(p-1))?(c*=.9,(f=r.slice(l,p-1).match(n))&&l>0&&(c*=Math.pow(.999,f.length))):t.test(r.slice(l,p-1))?(c*=0,l>0&&(c*=Math.pow(.999,p-l))):(c*=.3,l>0&&(c*=Math.pow(.999,p-l))),r.charAt(p)!==o.charAt(s)&&(c*=.9999)),c<.1&&i.charAt(p-1)===a.charAt(s+1)&&i.charAt(p-1)!==a.charAt(s)&&.1*(u=e(r,o,i,a,p+1,s+2))>c&&(c=.1*u),c>h&&(h=c),p=i.indexOf(d,p+1);return h}(e,r,e.toLowerCase(),r.toLowerCase(),0,0)}},27652:function(e,t,n){"use strict";var r=n(49494),o={"text/plain":"Text","text/html":"Url",default:"Text"};e.exports=function(e,t){var n,i,a,l,s,c,u,f,d=!1;t||(t={}),a=t.debug||!1;try{if(s=r(),c=document.createRange(),u=document.getSelection(),(f=document.createElement("span")).textContent=e,f.ariaHidden="true",f.style.all="unset",f.style.position="fixed",f.style.top=0,f.style.clip="rect(0, 0, 0, 0)",f.style.whiteSpace="pre",f.style.webkitUserSelect="text",f.style.MozUserSelect="text",f.style.msUserSelect="text",f.style.userSelect="text",f.addEventListener("copy",function(n){if(n.stopPropagation(),t.format){if(n.preventDefault(),void 0===n.clipboardData){a&&console.warn("unable to use e.clipboardData"),a&&console.warn("trying IE specific stuff"),window.clipboardData.clearData();var r=o[t.format]||o.default;window.clipboardData.setData(r,e)}else n.clipboardData.clearData(),n.clipboardData.setData(t.format,e)}t.onCopy&&(n.preventDefault(),t.onCopy(n.clipboardData))}),document.body.appendChild(f),c.selectNodeContents(f),u.addRange(c),!document.execCommand("copy"))throw Error("copy command was unsuccessful");d=!0}catch(r){a&&console.error("unable to copy using execCommand: ",r),a&&console.warn("trying IE specific stuff");try{window.clipboardData.setData(t.format||"text",e),t.onCopy&&t.onCopy(window.clipboardData),d=!0}catch(r){a&&console.error("unable to copy using clipboardData: ",r),a&&console.error("falling back to prompt"),n="message"in t?t.message:"Copy to clipboard: #{key}, Enter",i=(/mac os x/i.test(navigator.userAgent)?"⌘":"Ctrl")+"+C",l=n.replace(/#{\s*key\s*}/g,i),window.prompt(l,e)}}finally{u&&("function"==typeof u.removeRange?u.removeRange(c):u.removeAllRanges()),f&&document.body.removeChild(f),s()}return d}},19867:function(e,t,n){var r=n(34142);e.exports=r},44433:function(e,t,n){var r=n(7);e.exports=r},64519:function(e,t,n){var r=n(65050);e.exports=r},71008:function(e,t,n){var r=n(97434);e.exports=r},77685:function(e,t,n){var r=n(94531);e.exports=r},85344:function(e,t,n){var r=n(2608);e.exports=r},10986:function(e,t,n){var r=n(15587);n(47708),n(20551),n(87118),e.exports=r},30073:function(e,t,n){var r=n(51036);e.exports=r},51486:function(e,t,n){var r=n(43948);e.exports=r},30810:function(e,t,n){n(14560),n(99298);var r=n(1131);e.exports=r.Array.from},11750:function(e,t,n){n(11815);var r=n(1131);e.exports=r.Array.isArray},24378:function(e,t,n){n(23902);var r=n(12018);e.exports=r("Array").concat},29900:function(e,t,n){n(92642);var r=n(12018);e.exports=r("Array").filter},79107:function(e,t,n){n(56931);var r=n(12018);e.exports=r("Array").forEach},1753:function(e,t,n){n(9266);var r=n(12018);e.exports=r("Array").indexOf},65785:function(e,t,n){n(91343);var r=n(12018);e.exports=r("Array").push},68403:function(e,t,n){n(77920);var r=n(12018);e.exports=r("Array").slice},28285:function(e,t,n){n(48174);var r=n(12018);e.exports=r("Array").splice},13217:function(e,t,n){n(78944);var r=n(1131);e.exports=r.Date.now},90642:function(e,t,n){n(78312),n(14560);var r=n(89329);e.exports=r},80197:function(e,t,n){var r=n(49477),o=n(24378),i=Array.prototype;e.exports=function(e){var t=e.concat;return e===i||r(i,e)&&t===i.concat?o:t}},65874:function(e,t,n){var r=n(49477),o=n(29900),i=Array.prototype;e.exports=function(e){var t=e.filter;return e===i||r(i,e)&&t===i.filter?o:t}},45774:function(e,t,n){var r=n(49477),o=n(1753),i=Array.prototype;e.exports=function(e){var t=e.indexOf;return e===i||r(i,e)&&t===i.indexOf?o:t}},21151:function(e,t,n){var r=n(49477),o=n(65785),i=Array.prototype;e.exports=function(e){var t=e.push;return e===i||r(i,e)&&t===i.push?o:t}},58616:function(e,t,n){var r=n(49477),o=n(68403),i=Array.prototype;e.exports=function(e){var t=e.slice;return e===i||r(i,e)&&t===i.slice?o:t}},8231:function(e,t,n){var r=n(49477),o=n(28285),i=Array.prototype;e.exports=function(e){var t=e.splice;return e===i||r(i,e)&&t===i.splice?o:t}},36347:function(e,t,n){n(86461);var r=n(1131);e.exports=r.Math.sign},22030:function(e,t,n){n(58857);var r=n(1131).Object,o=e.exports=function(e,t){return r.defineProperties(e,t)};r.defineProperties.sham&&(o.sham=!0)},73304:function(e,t,n){n(86819);var r=n(1131).Object,o=e.exports=function(e,t,n){return r.defineProperty(e,t,n)};r.defineProperty.sham&&(o.sham=!0)},8768:function(e,t,n){n(51005);var r=n(1131).Object,o=e.exports=function(e,t){return r.getOwnPropertyDescriptor(e,t)};r.getOwnPropertyDescriptor.sham&&(o.sham=!0)},18312:function(e,t,n){n(72269);var r=n(1131);e.exports=r.Object.getOwnPropertyDescriptors},84715:function(e,t,n){n(60613);var r=n(1131);e.exports=r.Object.getOwnPropertySymbols},23197:function(e,t,n){n(46568);var r=n(1131);e.exports=r.Object.keys},26643:function(e,t,n){n(23902),n(8094),n(60613),n(83292),n(37375),n(46014),n(74639),n(66612),n(81790),n(49092),n(46176),n(15821),n(72926),n(77517),n(2978),n(22828),n(74598),n(7681),n(2675),n(50186);var r=n(1131);e.exports=r.Symbol},93872:function(e,t,n){n(78312),n(8094),n(14560),n(66612);var r=n(38197);e.exports=r.f("iterator")},20610:function(e,t,n){n(33345),n(2978);var r=n(38197);e.exports=r.f("toPrimitive")},35413:function(e,t,n){var r=n(19867);e.exports=r},82685:function(e,t,n){var r=n(44433);e.exports=r},83161:function(e,t,n){var r=n(64519);e.exports=r},99889:function(e,t,n){var r=n(71008);e.exports=r},24245:function(e,t,n){var r=n(77685);e.exports=r},23094:function(e,t,n){var r=n(85344);e.exports=r},3708:function(e,t,n){var r=n(10986);n(64473),n(43555),n(15894),n(47051),n(95359),n(26782),n(68316),n(64869),n(99244),n(95284),e.exports=r},22992:function(e,t,n){var r=n(30073);e.exports=r},22663:function(e,t,n){var r=n(51486);e.exports=r},21846:function(e,t,n){var r=n(75628),o=n(99525),i=TypeError;e.exports=function(e){if(r(e))return e;throw i(o(e)+" is not a function")}},77722:function(e,t,n){var r=n(75628),o=String,i=TypeError;e.exports=function(e){if("object"==typeof e||r(e))return e;throw i("Can't set "+o(e)+" as a prototype")}},74418:function(e){e.exports=function(){}},4152:function(e,t,n){var r=n(90545),o=String,i=TypeError;e.exports=function(e){if(r(e))return e;throw i(o(e)+" is not an object")}},34069:function(e,t,n){"use strict";var r=n(48390).forEach,o=n(84992)("forEach");e.exports=o?[].forEach:function(e){return r(this,e,arguments.length>1?arguments[1]:void 0)}},97525:function(e,t,n){"use strict";var r=n(87477),o=n(24871),i=n(97826),a=n(24180),l=n(51094),s=n(72521),c=n(42149),u=n(14965),f=n(73080),d=n(89329),p=Array;e.exports=function(e){var t,n,h,g,m,b,v=i(e),y=s(this),x=arguments.length,w=x>1?arguments[1]:void 0,E=void 0!==w;E&&(w=r(w,x>2?arguments[2]:void 0));var S=d(v),k=0;if(S&&!(this===p&&l(S)))for(m=(g=f(v,S)).next,n=y?new this:[];!(h=o(m,g)).done;k++)b=E?a(g,w,[h.value,k],!0):h.value,u(n,k,b);else for(t=c(v),n=y?new this(t):p(t);t>k;k++)b=E?w(v[k],k):v[k],u(n,k,b);return n.length=k,n}},34597:function(e,t,n){var r=n(69146),o=n(55950),i=n(42149),a=function(e){return function(t,n,a){var l,s=r(t),c=i(s),u=o(a,c);if(e&&n!=n){for(;c>u;)if((l=s[u++])!=l)return!0}else for(;c>u;u++)if((e||u in s)&&s[u]===n)return e||u||0;return!e&&-1}};e.exports={includes:a(!0),indexOf:a(!1)}},48390:function(e,t,n){var r=n(87477),o=n(60254),i=n(72911),a=n(97826),l=n(42149),s=n(62128),c=o([].push),u=function(e){var t=1==e,n=2==e,o=3==e,u=4==e,f=6==e,d=7==e,p=5==e||f;return function(h,g,m,b){for(var v,y,x=a(h),w=i(x),E=r(g,m),S=l(w),k=0,_=b||s,O=t?_(h,S):n||d?_(h,0):void 0;S>k;k++)if((p||k in w)&&(y=E(v=w[k],k,x),e)){if(t)O[k]=y;else if(y)switch(e){case 3:return!0;case 5:return v;case 6:return k;case 2:c(O,v)}else switch(e){case 4:return!1;case 7:c(O,v)}}return f?-1:o||u?u:O}};e.exports={forEach:u(0),map:u(1),filter:u(2),some:u(3),every:u(4),find:u(5),findIndex:u(6),filterReject:u(7)}},55893:function(e,t,n){var r=n(29720),o=n(45216),i=n(34750),a=o("species");e.exports=function(e){return i>=51||!r(function(){var t=[];return(t.constructor={})[a]=function(){return{foo:1}},1!==t[e](Boolean).foo})}},84992:function(e,t,n){"use strict";var r=n(29720);e.exports=function(e,t){var n=[][e];return!!n&&r(function(){n.call(null,t||function(){return 1},1)})}},83695:function(e,t,n){"use strict";var r=n(83383),o=n(4063),i=TypeError,a=Object.getOwnPropertyDescriptor,l=r&&!function(){if(void 0!==this)return!0;try{Object.defineProperty([],"length",{writable:!1}).length=1}catch(e){return e instanceof TypeError}}();e.exports=l?function(e,t){if(o(e)&&!a(e,"length").writable)throw i("Cannot set read only .length");return e.length=t}:function(e,t){return e.length=t}},89086:function(e,t,n){var r=n(55950),o=n(42149),i=n(14965),a=Array,l=Math.max;e.exports=function(e,t,n){for(var s=o(e),c=r(t,s),u=r(void 0===n?s:n,s),f=a(l(u-c,0)),d=0;c9007199254740991)throw t("Maximum allowed index exceeded");return e}},68166:function(e){e.exports={CSSRuleList:0,CSSStyleDeclaration:0,CSSValueList:0,ClientRectList:0,DOMRectList:0,DOMStringList:0,DOMTokenList:1,DataTransferItemList:0,FileList:0,HTMLAllCollection:0,HTMLCollection:0,HTMLFormElement:0,HTMLSelectElement:0,MediaList:0,MimeTypeArray:0,NamedNodeMap:0,NodeList:1,PaintRequestList:0,Plugin:0,PluginArray:0,SVGLengthList:0,SVGNumberList:0,SVGPathSegList:0,SVGPointList:0,SVGStringList:0,SVGTransformList:0,SourceBufferList:0,StyleSheetList:0,TextTrackCueList:0,TextTrackList:0,TouchList:0}},53207:function(e){e.exports="function"==typeof Bun&&Bun&&"string"==typeof Bun.version},47362:function(e){e.exports="undefined"!=typeof navigator&&String(navigator.userAgent)||""},34750:function(e,t,n){var r,o,i=n(32604),a=n(47362),l=i.process,s=i.Deno,c=l&&l.versions||s&&s.version,u=c&&c.v8;u&&(o=(r=u.split("."))[0]>0&&r[0]<4?1:+(r[0]+r[1])),!o&&a&&(!(r=a.match(/Edge\/(\d+)/))||r[1]>=74)&&(r=a.match(/Chrome\/(\d+)/))&&(o=+r[1]),e.exports=o},12018:function(e,t,n){var r=n(1131);e.exports=function(e){return r[e+"Prototype"]}},59528:function(e){e.exports=["constructor","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","toLocaleString","toString","valueOf"]},67001:function(e,t,n){"use strict";var r=n(32604),o=n(62863),i=n(31793),a=n(75628),l=n(6052).f,s=n(4817),c=n(1131),u=n(87477),f=n(7172),d=n(2177),p=function(e){var t=function(n,r,i){if(this instanceof t){switch(arguments.length){case 0:return new e;case 1:return new e(n);case 2:return new e(n,r)}return new e(n,r,i)}return o(e,this,arguments)};return t.prototype=e.prototype,t};e.exports=function(e,t){var n,o,h,g,m,b,v,y,x,w=e.target,E=e.global,S=e.stat,k=e.proto,_=E?r:S?r[w]:(r[w]||{}).prototype,O=E?c:c[w]||f(c,w,{})[w],C=O.prototype;for(g in t)o=!(n=s(E?g:w+(S?".":"#")+g,e.forced))&&_&&d(_,g),b=O[g],o&&(v=e.dontCallGetSet?(x=l(_,g))&&x.value:_[g]),m=o&&v?v:t[g],(!o||typeof b!=typeof m)&&(y=e.bind&&o?u(m,r):e.wrap&&o?p(m):k&&a(m)?i(m):m,(e.sham||m&&m.sham||b&&b.sham)&&f(y,"sham",!0),f(O,g,y),k&&(d(c,h=w+"Prototype")||f(c,h,{}),f(c[h],g,m),e.real&&C&&(n||!C[g])&&f(C,g,m)))}},29720:function(e){e.exports=function(e){try{return!!e()}catch(e){return!0}}},62863:function(e,t,n){var r=n(46391),o=Function.prototype,i=o.apply,a=o.call;e.exports="object"==typeof Reflect&&Reflect.apply||(r?a.bind(i):function(){return a.apply(i,arguments)})},87477:function(e,t,n){var r=n(31793),o=n(21846),i=n(46391),a=r(r.bind);e.exports=function(e,t){return o(e),void 0===t?e:i?a(e,t):function(){return e.apply(t,arguments)}}},46391:function(e,t,n){var r=n(29720);e.exports=!r(function(){var e=(function(){}).bind();return"function"!=typeof e||e.hasOwnProperty("prototype")})},24871:function(e,t,n){var r=n(46391),o=Function.prototype.call;e.exports=r?o.bind(o):function(){return o.apply(o,arguments)}},79752:function(e,t,n){var r=n(83383),o=n(2177),i=Function.prototype,a=r&&Object.getOwnPropertyDescriptor,l=o(i,"name"),s=l&&(!r||r&&a(i,"name").configurable);e.exports={EXISTS:l,PROPER:l&&"something"===(function(){}).name,CONFIGURABLE:s}},70145:function(e,t,n){var r=n(60254),o=n(21846);e.exports=function(e,t,n){try{return r(o(Object.getOwnPropertyDescriptor(e,t)[n]))}catch(e){}}},31793:function(e,t,n){var r=n(79307),o=n(60254);e.exports=function(e){if("Function"===r(e))return o(e)}},60254:function(e,t,n){var r=n(46391),o=Function.prototype,i=o.call,a=r&&o.bind.bind(i,i);e.exports=r?a:function(e){return function(){return i.apply(e,arguments)}}},80875:function(e,t,n){var r=n(1131),o=n(32604),i=n(75628),a=function(e){return i(e)?e:void 0};e.exports=function(e,t){return arguments.length<2?a(r[e])||a(o[e]):r[e]&&r[e][t]||o[e]&&o[e][t]}},89329:function(e,t,n){var r=n(95980),o=n(61024),i=n(45139),a=n(76577),l=n(45216)("iterator");e.exports=function(e){if(!i(e))return o(e,l)||o(e,"@@iterator")||a[r(e)]}},73080:function(e,t,n){var r=n(24871),o=n(21846),i=n(4152),a=n(99525),l=n(89329),s=TypeError;e.exports=function(e,t){var n=arguments.length<2?l(e):t;if(o(n))return i(r(n,e));throw s(a(e)+" is not iterable")}},96438:function(e,t,n){var r=n(60254),o=n(4063),i=n(75628),a=n(79307),l=n(9755),s=r([].push);e.exports=function(e){if(i(e))return e;if(o(e)){for(var t=e.length,n=[],r=0;r0?n:t)(r)}},38051:function(e,t,n){var r,o=n(4152),i=n(57685),a=n(59528),l=n(72291),s=n(25681),c=n(25053),u=n(99502),f="prototype",d="script",p=u("IE_PROTO"),h=function(){},g=function(e){return"<"+d+">"+e+""},m=function(e){e.write(g("")),e.close();var t=e.parentWindow.Object;return e=null,t},b=function(){var e,t=c("iframe");return t.style.display="none",s.appendChild(t),t.src=String("java"+d+":"),(e=t.contentWindow.document).open(),e.write(g("document.F=Object")),e.close(),e.F},v=function(){try{r=new ActiveXObject("htmlfile")}catch(e){}v="undefined"!=typeof document?document.domain&&r?m(r):b():m(r);for(var e=a.length;e--;)delete v[f][a[e]];return v()};l[p]=!0,e.exports=Object.create||function(e,t){var n;return null!==e?(h[f]=o(e),n=new h,h[f]=null,n[p]=e):n=v(),void 0===t?n:i.f(n,t)}},57685:function(e,t,n){var r=n(83383),o=n(19594),i=n(1237),a=n(4152),l=n(69146),s=n(14844);t.f=r&&!o?Object.defineProperties:function(e,t){a(e);for(var n,r=l(t),o=s(t),c=o.length,u=0;c>u;)i.f(e,n=o[u++],r[n]);return e}},1237:function(e,t,n){var r=n(83383),o=n(24343),i=n(19594),a=n(4152),l=n(24581),s=TypeError,c=Object.defineProperty,u=Object.getOwnPropertyDescriptor,f="enumerable",d="configurable",p="writable";t.f=r?i?function(e,t,n){if(a(e),t=l(t),a(n),"function"==typeof e&&"prototype"===t&&"value"in n&&p in n&&!n[p]){var r=u(e,t);r&&r[p]&&(e[t]=n.value,n={configurable:d in n?n[d]:r[d],enumerable:f in n?n[f]:r[f],writable:!1})}return c(e,t,n)}:c:function(e,t,n){if(a(e),t=l(t),a(n),o)try{return c(e,t,n)}catch(e){}if("get"in n||"set"in n)throw s("Accessors not supported");return"value"in n&&(e[t]=n.value),e}},6052:function(e,t,n){var r=n(83383),o=n(24871),i=n(59954),a=n(88863),l=n(69146),s=n(24581),c=n(2177),u=n(24343),f=Object.getOwnPropertyDescriptor;t.f=r?f:function(e,t){if(e=l(e),t=s(t),u)try{return f(e,t)}catch(e){}if(c(e,t))return a(!o(i.f,e,t),e[t])}},85801:function(e,t,n){var r=n(79307),o=n(69146),i=n(62627).f,a=n(89086),l="object"==typeof window&&window&&Object.getOwnPropertyNames?Object.getOwnPropertyNames(window):[],s=function(e){try{return i(e)}catch(e){return a(l)}};e.exports.f=function(e){return l&&"Window"==r(e)?s(e):i(o(e))}},62627:function(e,t,n){var r=n(74052),o=n(59528).concat("length","prototype");t.f=Object.getOwnPropertyNames||function(e){return r(e,o)}},86846:function(e,t){t.f=Object.getOwnPropertySymbols},45983:function(e,t,n){var r=n(2177),o=n(75628),i=n(97826),a=n(99502),l=n(27632),s=a("IE_PROTO"),c=Object,u=c.prototype;e.exports=l?c.getPrototypeOf:function(e){var t=i(e);if(r(t,s))return t[s];var n=t.constructor;return o(n)&&t instanceof n?n.prototype:t instanceof c?u:null}},49477:function(e,t,n){var r=n(60254);e.exports=r({}.isPrototypeOf)},74052:function(e,t,n){var r=n(60254),o=n(2177),i=n(69146),a=n(34597).indexOf,l=n(72291),s=r([].push);e.exports=function(e,t){var n,r=i(e),c=0,u=[];for(n in r)!o(l,n)&&o(r,n)&&s(u,n);for(;t.length>c;)o(r,n=t[c++])&&(~a(u,n)||s(u,n));return u}},14844:function(e,t,n){var r=n(74052),o=n(59528);e.exports=Object.keys||function(e){return r(e,o)}},59954:function(e,t){"use strict";var n={}.propertyIsEnumerable,r=Object.getOwnPropertyDescriptor,o=r&&!n.call({1:2},1);t.f=o?function(e){var t=r(this,e);return!!t&&t.enumerable}:n},23122:function(e,t,n){var r=n(70145),o=n(4152),i=n(77722);e.exports=Object.setPrototypeOf||("__proto__"in{}?function(){var e,t=!1,n={};try{(e=r(Object.prototype,"__proto__","set"))(n,[]),t=n instanceof Array}catch(e){}return function(n,r){return o(n),i(r),t?e(n,r):n.__proto__=r,n}}():void 0)},67018:function(e,t,n){"use strict";var r=n(51134),o=n(95980);e.exports=r?({}).toString:function(){return"[object "+o(this)+"]"}},64156:function(e,t,n){var r=n(24871),o=n(75628),i=n(90545),a=TypeError;e.exports=function(e,t){var n,l;if("string"===t&&o(n=e.toString)&&!i(l=r(n,e))||o(n=e.valueOf)&&!i(l=r(n,e))||"string"!==t&&o(n=e.toString)&&!i(l=r(n,e)))return l;throw a("Can't convert object to primitive value")}},57259:function(e,t,n){var r=n(80875),o=n(60254),i=n(62627),a=n(86846),l=n(4152),s=o([].concat);e.exports=r("Reflect","ownKeys")||function(e){var t=i.f(l(e)),n=a.f;return n?s(t,n(e)):t}},1131:function(e){e.exports={}},65896:function(e,t,n){var r=n(45139),o=TypeError;e.exports=function(e){if(r(e))throw o("Can't call method on "+e);return e}},61432:function(e,t,n){"use strict";var r,o=n(32604),i=n(62863),a=n(75628),l=n(53207),s=n(47362),c=n(95236),u=n(69248),f=o.Function,d=/MSIE .\./.test(s)||l&&((r=o.Bun.version.split(".")).length<3||0==r[0]&&(r[1]<3||3==r[1]&&0==r[2]));e.exports=function(e,t){var n=t?2:1;return d?function(r,o){var l=u(arguments.length,1)>n,s=a(r)?r:f(r),d=l?c(arguments,n):[],p=l?function(){i(s,this,d)}:s;return t?e(p,o):e(p)}:e}},795:function(e,t,n){var r=n(51134),o=n(1237).f,i=n(7172),a=n(2177),l=n(67018),s=n(45216)("toStringTag");e.exports=function(e,t,n,c){if(e){var u=n?e:e.prototype;a(u,s)||o(u,s,{configurable:!0,value:t}),c&&!r&&i(u,"toString",l)}}},99502:function(e,t,n){var r=n(28818),o=n(45357),i=r("keys");e.exports=function(e){return i[e]||(i[e]=o(e))}},59090:function(e,t,n){var r=n(32604),o=n(99827),i="__core-js_shared__",a=r[i]||o(i,{});e.exports=a},28818:function(e,t,n){var r=n(60411),o=n(59090);(e.exports=function(e,t){return o[e]||(o[e]=void 0!==t?t:{})})("versions",[]).push({version:"3.31.1",mode:r?"pure":"global",copyright:"\xa9 2014-2023 Denis Pushkarev (zloirock.ru)",license:"https://github.com/zloirock/core-js/blob/v3.31.1/LICENSE",source:"https://github.com/zloirock/core-js"})},66905:function(e,t,n){var r=n(60254),o=n(54354),i=n(9755),a=n(65896),l=r("".charAt),s=r("".charCodeAt),c=r("".slice),u=function(e){return function(t,n){var r,u,f=i(a(t)),d=o(n),p=f.length;return d<0||d>=p?e?"":void 0:(r=s(f,d))<55296||r>56319||d+1===p||(u=s(f,d+1))<56320||u>57343?e?l(f,d):r:e?c(f,d,d+2):(r-55296<<10)+(u-56320)+65536}};e.exports={codeAt:u(!1),charAt:u(!0)}},42112:function(e,t,n){var r=n(34750),o=n(29720),i=n(32604).String;e.exports=!!Object.getOwnPropertySymbols&&!o(function(){var e=Symbol();return!i(e)||!(Object(e) instanceof Symbol)||!Symbol.sham&&r&&r<41})},71607:function(e,t,n){var r=n(24871),o=n(80875),i=n(45216),a=n(14423);e.exports=function(){var e=o("Symbol"),t=e&&e.prototype,n=t&&t.valueOf,l=i("toPrimitive");t&&!t[l]&&a(t,l,function(e){return r(n,this)},{arity:1})}},96889:function(e,t,n){var r=n(80875),o=n(60254),i=r("Symbol"),a=i.keyFor,l=o(i.prototype.valueOf);e.exports=i.isRegisteredSymbol||function(e){try{return void 0!==a(l(e))}catch(e){return!1}}},88822:function(e,t,n){for(var r=n(28818),o=n(80875),i=n(60254),a=n(42617),l=n(45216),s=o("Symbol"),c=s.isWellKnownSymbol,u=o("Object","getOwnPropertyNames"),f=i(s.prototype.valueOf),d=r("wks"),p=0,h=u(s),g=h.length;p0?o(r(e),9007199254740991):0}},97826:function(e,t,n){var r=n(65896),o=Object;e.exports=function(e){return o(r(e))}},477:function(e,t,n){var r=n(24871),o=n(90545),i=n(42617),a=n(61024),l=n(64156),s=n(45216),c=TypeError,u=s("toPrimitive");e.exports=function(e,t){if(!o(e)||i(e))return e;var n,s=a(e,u);if(s){if(void 0===t&&(t="default"),!o(n=r(s,e,t))||i(n))return n;throw c("Can't convert object to primitive value")}return void 0===t&&(t="number"),l(e,t)}},24581:function(e,t,n){var r=n(477),o=n(42617);e.exports=function(e){var t=r(e,"string");return o(t)?t:t+""}},51134:function(e,t,n){var r=n(45216)("toStringTag"),o={};o[r]="z",e.exports="[object z]"===String(o)},9755:function(e,t,n){var r=n(95980),o=String;e.exports=function(e){if("Symbol"===r(e))throw TypeError("Cannot convert a Symbol value to a string");return o(e)}},99525:function(e){var t=String;e.exports=function(e){try{return t(e)}catch(e){return"Object"}}},45357:function(e,t,n){var r=n(60254),o=0,i=Math.random(),a=r(1..toString);e.exports=function(e){return"Symbol("+(void 0===e?"":e)+")_"+a(++o+i,36)}},58371:function(e,t,n){var r=n(42112);e.exports=r&&!Symbol.sham&&"symbol"==typeof Symbol.iterator},19594:function(e,t,n){var r=n(83383),o=n(29720);e.exports=r&&o(function(){return 42!=Object.defineProperty(function(){},"prototype",{value:42,writable:!1}).prototype})},69248:function(e){var t=TypeError;e.exports=function(e,n){if(e=51||!o(function(){var e=[];return e[g]=!1,e.concat()[0]!==e}),b=function(e){if(!a(e))return!1;var t=e[g];return void 0!==t?!!t:i(e)};r({target:"Array",proto:!0,arity:1,forced:!m||!d("concat")},{concat:function(e){var t,n,r,o,i,a=l(this),d=f(a,0),p=0;for(t=-1,r=arguments.length;t1?arguments[1]:void 0)}})},56931:function(e,t,n){"use strict";var r=n(67001),o=n(34069);r({target:"Array",proto:!0,forced:[].forEach!=o},{forEach:o})},99298:function(e,t,n){var r=n(67001),o=n(97525);r({target:"Array",stat:!0,forced:!n(81985)(function(e){Array.from(e)})},{from:o})},9266:function(e,t,n){"use strict";var r=n(67001),o=n(31793),i=n(34597).indexOf,a=n(84992),l=o([].indexOf),s=!!l&&1/l([1],1,-0)<0;r({target:"Array",proto:!0,forced:s||!a("indexOf")},{indexOf:function(e){var t=arguments.length>1?arguments[1]:void 0;return s?l(this,e,t)||0:i(this,e,t)}})},11815:function(e,t,n){n(67001)({target:"Array",stat:!0},{isArray:n(4063)})},78312:function(e,t,n){"use strict";var r=n(69146),o=n(74418),i=n(76577),a=n(64535),l=n(1237).f,s=n(25149),c=n(96398),u=n(60411),f=n(83383),d="Array Iterator",p=a.set,h=a.getterFor(d);e.exports=s(Array,"Array",function(e,t){p(this,{type:d,target:r(e),index:0,kind:t})},function(){var e=h(this),t=e.target,n=e.kind,r=e.index++;return!t||r>=t.length?(e.target=void 0,c(void 0,!0)):"keys"==n?c(r,!1):"values"==n?c(t[r],!1):c([r,t[r]],!1)},"values");var g=i.Arguments=i.Array;if(o("keys"),o("values"),o("entries"),!u&&f&&"values"!==g.name)try{l(g,"name",{value:"values"})}catch(e){}},91343:function(e,t,n){"use strict";var r=n(67001),o=n(97826),i=n(42149),a=n(83695),l=n(6439);r({target:"Array",proto:!0,arity:1,forced:n(29720)(function(){return 4294967297!==[].push.call({length:4294967296},1)})||!function(){try{Object.defineProperty([],"length",{writable:!1}).push()}catch(e){return e instanceof TypeError}}()},{push:function(e){var t=o(this),n=i(t),r=arguments.length;l(n+r);for(var s=0;sx-r+n;m--)d(y,m-1)}else if(n>r)for(m=x-r;m>w;m--)b=m+r-1,v=m+n-1,b in y?y[v]=y[b]:d(y,v);for(m=0;mf;)void 0!==(n=o(r,t=c[f++]))&&s(u,t,n);return u}})},27112:function(e,t,n){var r=n(67001),o=n(42112),i=n(29720),a=n(86846),l=n(97826);r({target:"Object",stat:!0,forced:!o||i(function(){a.f(1)})},{getOwnPropertySymbols:function(e){var t=a.f;return t?t(l(e)):[]}})},46568:function(e,t,n){var r=n(67001),o=n(97826),i=n(14844);r({target:"Object",stat:!0,forced:n(29720)(function(){i(1)})},{keys:function(e){return i(o(e))}})},8094:function(){},50186:function(){},14560:function(e,t,n){"use strict";var r=n(66905).charAt,o=n(9755),i=n(64535),a=n(25149),l=n(96398),s="String Iterator",c=i.set,u=i.getterFor(s);a(String,"String",function(e){c(this,{type:s,string:o(e),index:0})},function(){var e,t=u(this),n=t.string,o=t.index;return o>=n.length?l(void 0,!0):(e=r(n,o),t.index+=e.length,l(e,!1))})},83292:function(e,t,n){n(28547)("asyncIterator")},27892:function(e,t,n){"use strict";var r=n(67001),o=n(32604),i=n(24871),a=n(60254),l=n(60411),s=n(83383),c=n(42112),u=n(29720),f=n(2177),d=n(49477),p=n(4152),h=n(69146),g=n(24581),m=n(9755),b=n(88863),v=n(38051),y=n(14844),x=n(62627),w=n(85801),E=n(86846),S=n(6052),k=n(1237),_=n(57685),O=n(59954),C=n(14423),A=n(70866),N=n(28818),R=n(99502),T=n(72291),P=n(45357),M=n(45216),j=n(38197),L=n(28547),I=n(71607),D=n(795),F=n(64535),B=n(48390).forEach,z=R("hidden"),$="Symbol",U="prototype",H=F.set,Z=F.getterFor($),q=Object[U],W=o.Symbol,V=W&&W[U],G=o.TypeError,K=o.QObject,Y=S.f,X=k.f,J=w.f,Q=O.f,ee=a([].push),et=N("symbols"),en=N("op-symbols"),er=N("wks"),eo=!K||!K[U]||!K[U].findChild,ei=s&&u(function(){return 7!=v(X({},"a",{get:function(){return X(this,"a",{value:7}).a}})).a})?function(e,t,n){var r=Y(q,t);r&&delete q[t],X(e,t,n),r&&e!==q&&X(q,t,r)}:X,ea=function(e,t){var n=et[e]=v(V);return H(n,{type:$,tag:e,description:t}),s||(n.description=t),n},el=function(e,t,n){e===q&&el(en,t,n),p(e);var r=g(t);return(p(n),f(et,r))?(n.enumerable?(f(e,z)&&e[z][r]&&(e[z][r]=!1),n=v(n,{enumerable:b(0,!1)})):(f(e,z)||X(e,z,b(1,{})),e[z][r]=!0),ei(e,r,n)):X(e,r,n)},es=function(e,t){p(e);var n=h(t),r=y(n).concat(ed(n));return B(r,function(t){(!s||i(ec,n,t))&&el(e,t,n[t])}),e},ec=function(e){var t=g(e),n=i(Q,this,t);return(!(this===q&&f(et,t))||!!f(en,t))&&(!(n||!f(this,t)||!f(et,t)||f(this,z)&&this[z][t])||n)},eu=function(e,t){var n=h(e),r=g(t);if(!(n===q&&f(et,r))||f(en,r)){var o=Y(n,r);return o&&f(et,r)&&!(f(n,z)&&n[z][r])&&(o.enumerable=!0),o}},ef=function(e){var t=J(h(e)),n=[];return B(t,function(e){f(et,e)||f(T,e)||ee(n,e)}),n},ed=function(e){var t=e===q,n=J(t?en:h(e)),r=[];return B(n,function(e){f(et,e)&&(!t||f(q,e))&&ee(r,et[e])}),r};c||(C(V=(W=function(){if(d(V,this))throw G("Symbol is not a constructor");var e=arguments.length&&void 0!==arguments[0]?m(arguments[0]):void 0,t=P(e),n=function(e){this===q&&i(n,en,e),f(this,z)&&f(this[z],t)&&(this[z][t]=!1),ei(this,t,b(1,e))};return s&&eo&&ei(q,t,{configurable:!0,set:n}),ea(t,e)})[U],"toString",function(){return Z(this).tag}),C(W,"withoutSetter",function(e){return ea(P(e),e)}),O.f=ec,k.f=el,_.f=es,S.f=eu,x.f=w.f=ef,E.f=ed,j.f=function(e){return ea(M(e),e)},s&&(A(V,"description",{configurable:!0,get:function(){return Z(this).description}}),l||C(q,"propertyIsEnumerable",ec,{unsafe:!0}))),r({global:!0,constructor:!0,wrap:!0,forced:!c,sham:!c},{Symbol:W}),B(y(er),function(e){L(e)}),r({target:$,stat:!0,forced:!c},{useSetter:function(){eo=!0},useSimple:function(){eo=!1}}),r({target:"Object",stat:!0,forced:!c,sham:!s},{create:function(e,t){return void 0===t?v(e):es(v(e),t)},defineProperty:el,defineProperties:es,getOwnPropertyDescriptor:eu}),r({target:"Object",stat:!0,forced:!c},{getOwnPropertyNames:ef}),I(),D(W,$),T[z]=!0},37375:function(){},89367:function(e,t,n){var r=n(67001),o=n(80875),i=n(2177),a=n(9755),l=n(28818),s=n(34601),c=l("string-to-symbol-registry"),u=l("symbol-to-string-registry");r({target:"Symbol",stat:!0,forced:!s},{for:function(e){var t=a(e);if(i(c,t))return c[t];var n=o("Symbol")(t);return c[t]=n,u[n]=t,n}})},46014:function(e,t,n){n(28547)("hasInstance")},74639:function(e,t,n){n(28547)("isConcatSpreadable")},66612:function(e,t,n){n(28547)("iterator")},60613:function(e,t,n){n(27892),n(89367),n(71574),n(32148),n(27112)},71574:function(e,t,n){var r=n(67001),o=n(2177),i=n(42617),a=n(99525),l=n(28818),s=n(34601),c=l("symbol-to-string-registry");r({target:"Symbol",stat:!0,forced:!s},{keyFor:function(e){if(!i(e))throw TypeError(a(e)+" is not a symbol");if(o(c,e))return c[e]}})},49092:function(e,t,n){n(28547)("matchAll")},81790:function(e,t,n){n(28547)("match")},46176:function(e,t,n){n(28547)("replace")},15821:function(e,t,n){n(28547)("search")},72926:function(e,t,n){n(28547)("species")},77517:function(e,t,n){n(28547)("split")},2978:function(e,t,n){var r=n(28547),o=n(71607);r("toPrimitive"),o()},22828:function(e,t,n){var r=n(80875),o=n(28547),i=n(795);o("toStringTag"),i(r("Symbol"),"Symbol")},74598:function(e,t,n){n(28547)("unscopables")},47708:function(e,t,n){var r=n(45216),o=n(1237).f,i=r("metadata"),a=Function.prototype;void 0===a[i]&&o(a,i,{value:null})},64473:function(e,t,n){n(28547)("asyncDispose")},20551:function(e,t,n){n(28547)("dispose")},43555:function(e,t,n){n(67001)({target:"Symbol",stat:!0},{isRegisteredSymbol:n(96889)})},26782:function(e,t,n){n(67001)({target:"Symbol",stat:!0,name:"isRegisteredSymbol"},{isRegistered:n(96889)})},15894:function(e,t,n){n(67001)({target:"Symbol",stat:!0,forced:!0},{isWellKnownSymbol:n(88822)})},68316:function(e,t,n){n(67001)({target:"Symbol",stat:!0,name:"isWellKnownSymbol",forced:!0},{isWellKnown:n(88822)})},47051:function(e,t,n){n(28547)("matcher")},64869:function(e,t,n){n(28547)("metadataKey")},87118:function(e,t,n){n(28547)("metadata")},95359:function(e,t,n){n(28547)("observable")},99244:function(e,t,n){n(28547)("patternMatch")},95284:function(e,t,n){n(28547)("replaceAll")},4583:function(e,t,n){n(78312);var r=n(68166),o=n(32604),i=n(95980),a=n(7172),l=n(76577),s=n(45216)("toStringTag");for(var c in r){var u=o[c],f=u&&u.prototype;f&&i(f)!==s&&a(f,s,c),l[c]=l.Array}},23929:function(e,t,n){var r=n(67001),o=n(32604),i=n(61432)(o.setInterval,!0);r({global:!0,bind:!0,forced:o.setInterval!==i},{setInterval:i})},31768:function(e,t,n){var r=n(67001),o=n(32604),i=n(61432)(o.setTimeout,!0);r({global:!0,bind:!0,forced:o.setTimeout!==i},{setTimeout:i})},16078:function(e,t,n){n(23929),n(31768)},34142:function(e,t,n){var r=n(30810);e.exports=r},7:function(e,t,n){var r=n(11750);e.exports=r},40981:function(e,t,n){var r=n(79107);e.exports=r},84699:function(e,t,n){var r=n(13217);e.exports=r},65050:function(e,t,n){var r=n(90642);n(4583),e.exports=r},69194:function(e,t,n){var r=n(80197);e.exports=r},59960:function(e,t,n){var r=n(65874);e.exports=r},20792:function(e,t,n){n(4583);var r=n(95980),o=n(2177),i=n(49477),a=n(40981),l=Array.prototype,s={DOMTokenList:!0,NodeList:!0};e.exports=function(e){var t=e.forEach;return e===l||i(l,e)&&t===l.forEach||o(s,r(e))?a:t}},45956:function(e,t,n){var r=n(45774);e.exports=r},97434:function(e,t,n){var r=n(21151);e.exports=r},94531:function(e,t,n){var r=n(58616);e.exports=r},16474:function(e,t,n){var r=n(8231);e.exports=r},43631:function(e,t,n){var r=n(36347);e.exports=r},25166:function(e,t,n){var r=n(22030);e.exports=r},2608:function(e,t,n){var r=n(73304);e.exports=r},13782:function(e,t,n){var r=n(8768);e.exports=r},28436:function(e,t,n){var r=n(18312);e.exports=r},58542:function(e,t,n){var r=n(84715);e.exports=r},20736:function(e,t,n){var r=n(23197);e.exports=r},66013:function(e,t,n){n(16078);var r=n(1131);e.exports=r.setInterval},51126:function(e,t,n){n(16078);var r=n(1131);e.exports=r.setTimeout},15587:function(e,t,n){var r=n(26643);n(4583),e.exports=r},51036:function(e,t,n){var r=n(93872);n(4583),e.exports=r},43948:function(e,t,n){var r=n(20610);e.exports=r},5370:function(e,t,n){var r=n(65170),o=n(72386);e.exports=function(e){if(r(e))return e;throw TypeError(o(e)+" is not a function")}},88507:function(e,t,n){"use strict";var r=n(46159).charAt;e.exports=function(e,t,n){return t+(n?r(e,t).length:1)}},24601:function(e,t,n){var r=n(86157);e.exports=function(e){if(r(e))return e;throw TypeError(String(e)+" is not an object")}},55122:function(e,t,n){var r=n(83798),o=n(38791),i=n(93584),a=function(e){return function(t,n,a){var l,s=r(t),c=i(s),u=o(a,c);if(e&&n!=n){for(;c>u;)if((l=s[u++])!=l)return!0}else for(;c>u;u++)if((e||u in s)&&s[u]===n)return e||u||0;return!e&&-1}};e.exports={includes:a(!0),indexOf:a(!1)}},51746:function(e){var t={}.toString;e.exports=function(e){return t.call(e).slice(8,-1)}},63658:function(e,t,n){var r=n(38823),o=n(65170),i=n(51746),a=n(26739)("toStringTag"),l="Arguments"==i(function(){return arguments}()),s=function(e,t){try{return e[t]}catch(e){}};e.exports=r?i:function(e){var t,n,r;return void 0===e?"Undefined":null===e?"Null":"string"==typeof(n=s(t=Object(e),a))?n:l?i(t):"Object"==(r=i(t))&&o(t.callee)?"Arguments":r}},48565:function(e,t,n){var r=n(36984),o=n(15105),i=n(47604),a=n(69128);e.exports=function(e,t){for(var n=o(t),l=a.f,s=i.f,c=0;c=74)&&(r=a.match(/Chrome\/(\d+)/))&&(o=r[1]),e.exports=o&&+o},41780:function(e){e.exports=["constructor","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","toLocaleString","toString","valueOf"]},54271:function(e,t,n){var r=n(92727),o=n(47604).f,i=n(30430),a=n(81980),l=n(4039),s=n(48565),c=n(95160);e.exports=function(e,t){var n,u,f,d,p,h=e.target,g=e.global,m=e.stat;if(n=g?r:m?r[h]||l(h,{}):(r[h]||{}).prototype)for(u in t){if(d=t[u],f=e.noTargetGet?(p=o(n,u))&&p.value:n[u],!c(g?u:h+(m?".":"#")+u,e.forced)&&void 0!==f){if(typeof d==typeof f)continue;s(d,f)}(e.sham||f&&f.sham)&&i(d,"sham",!0),a(n,u,d,e)}}},61531:function(e){e.exports=function(e){try{return!!e()}catch(e){return!0}}},49069:function(e,t,n){"use strict";n(8914);var r=n(81980),o=n(88467),i=n(61531),a=n(26739),l=n(30430),s=a("species"),c=RegExp.prototype;e.exports=function(e,t,n,u){var f=a(e),d=!i(function(){var t={};return t[f]=function(){return 7},7!=""[e](t)}),p=d&&!i(function(){var t=!1,n=/a/;return"split"===e&&((n={}).constructor={},n.constructor[s]=function(){return n},n.flags="",n[f]=/./[f]),n.exec=function(){return t=!0,null},n[f](""),!t});if(!d||!p||n){var h=/./[f],g=t(f,""[e],function(e,t,n,r,i){var a=t.exec;return a===o||a===c.exec?d&&!i?{done:!0,value:h.call(t,n,r)}:{done:!0,value:e.call(n,t,r)}:{done:!1}});r(String.prototype,e,g[0]),r(c,f,g[1])}u&&l(c[f],"sham",!0)}},15112:function(e,t,n){var r=n(56667),o=n(36984),i=Function.prototype,a=r&&Object.getOwnPropertyDescriptor,l=o(i,"name"),s=l&&(!r||r&&a(i,"name").configurable);e.exports={EXISTS:l,PROPER:l&&"something"===(function(){}).name,CONFIGURABLE:s}},99604:function(e,t,n){var r=n(92727),o=n(65170);e.exports=function(e,t){var n;return arguments.length<2?o(n=r[e])?n:void 0:r[e]&&r[e][t]}},92567:function(e,t,n){var r=n(5370);e.exports=function(e,t){var n=e[t];return null==n?void 0:r(n)}},9562:function(e,t,n){var r=n(47322),o=Math.floor,i="".replace,a=/\$([$&'`]|\d{1,2}|<[^>]*>)/g,l=/\$([$&'`]|\d{1,2})/g;e.exports=function(e,t,n,s,c,u){var f=n+e.length,d=s.length,p=l;return void 0!==c&&(c=r(c),p=a),i.call(u,p,function(r,i){var a;switch(i.charAt(0)){case"$":return"$";case"&":return e;case"`":return t.slice(0,n);case"'":return t.slice(f);case"<":a=c[i.slice(1,-1)];break;default:var l=+i;if(0===l)return r;if(l>d){var u=o(l/10);if(0===u)return r;if(u<=d)return void 0===s[u-1]?i.charAt(1):s[u-1]+i.charAt(1);return r}a=s[l-1]}return void 0===a?"":a})}},92727:function(e,t,n){var r=function(e){return e&&e.Math==Math&&e};e.exports=r("object"==typeof globalThis&&globalThis)||r("object"==typeof window&&window)||r("object"==typeof self&&self)||r("object"==typeof n.g&&n.g)||function(){return this}()||Function("return this")()},36984:function(e,t,n){var r=n(47322),o={}.hasOwnProperty;e.exports=Object.hasOwn||function(e,t){return o.call(r(e),t)}},90090:function(e){e.exports={}},66294:function(e,t,n){var r=n(99604);e.exports=r("document","documentElement")},50066:function(e,t,n){var r=n(56667),o=n(61531),i=n(88506);e.exports=!r&&!o(function(){return 7!=Object.defineProperty(i("div"),"a",{get:function(){return 7}}).a})},29554:function(e,t,n){var r=n(61531),o=n(51746),i="".split;e.exports=r(function(){return!Object("z").propertyIsEnumerable(0)})?function(e){return"String"==o(e)?i.call(e,""):Object(e)}:Object},12319:function(e,t,n){var r=n(65170),o=n(41679),i=Function.toString;r(o.inspectSource)||(o.inspectSource=function(e){return i.call(e)}),e.exports=o.inspectSource},32784:function(e,t,n){var r,o,i,a=n(74073),l=n(92727),s=n(86157),c=n(30430),u=n(36984),f=n(41679),d=n(28182),p=n(90090),h="Object already initialized",g=l.WeakMap;if(a||f.state){var m=f.state||(f.state=new g),b=m.get,v=m.has,y=m.set;r=function(e,t){if(v.call(m,e))throw TypeError(h);return t.facade=e,y.call(m,e,t),t},o=function(e){return b.call(m,e)||{}},i=function(e){return v.call(m,e)}}else{var x=d("state");p[x]=!0,r=function(e,t){if(u(e,x))throw TypeError(h);return t.facade=e,c(e,x,t),t},o=function(e){return u(e,x)?e[x]:{}},i=function(e){return u(e,x)}}e.exports={set:r,get:o,has:i,enforce:function(e){return i(e)?o(e):r(e,{})},getterFor:function(e){return function(t){var n;if(!s(t)||(n=o(t)).type!==e)throw TypeError("Incompatible receiver, "+e+" required");return n}}}},65170:function(e){e.exports=function(e){return"function"==typeof e}},95160:function(e,t,n){var r=n(61531),o=n(65170),i=/#|\.prototype\./,a=function(e,t){var n=s[l(e)];return n==u||n!=c&&(o(t)?r(t):!!t)},l=a.normalize=function(e){return String(e).replace(i,".").toLowerCase()},s=a.data={},c=a.NATIVE="N",u=a.POLYFILL="P";e.exports=a},86157:function(e,t,n){var r=n(65170);e.exports=function(e){return"object"==typeof e?null!==e:r(e)}},38277:function(e){e.exports=!1},66290:function(e,t,n){var r=n(65170),o=n(99604),i=n(78451);e.exports=i?function(e){return"symbol"==typeof e}:function(e){var t=o("Symbol");return r(t)&&Object(e) instanceof t}},93584:function(e,t,n){var r=n(44446);e.exports=function(e){return r(e.length)}},26200:function(e,t,n){var r=n(28583),o=n(61531);e.exports=!!Object.getOwnPropertySymbols&&!o(function(){var e=Symbol();return!String(e)||!(Object(e) instanceof Symbol)||!Symbol.sham&&r&&r<41})},74073:function(e,t,n){var r=n(92727),o=n(65170),i=n(12319),a=r.WeakMap;e.exports=o(a)&&/native code/.test(i(a))},65581:function(e,t,n){var r,o=n(24601),i=n(28587),a=n(41780),l=n(90090),s=n(66294),c=n(88506),u=n(28182),f="prototype",d="script",p=u("IE_PROTO"),h=function(){},g=function(e){return"<"+d+">"+e+""},m=function(e){e.write(g("")),e.close();var t=e.parentWindow.Object;return e=null,t},b=function(){var e,t=c("iframe");return t.style.display="none",s.appendChild(t),t.src=String("java"+d+":"),(e=t.contentWindow.document).open(),e.write(g("document.F=Object")),e.close(),e.F},v=function(){try{r=new ActiveXObject("htmlfile")}catch(e){}v="undefined"!=typeof document?document.domain&&r?m(r):b():m(r);for(var e=a.length;e--;)delete v[f][a[e]];return v()};l[p]=!0,e.exports=Object.create||function(e,t){var n;return null!==e?(h[f]=o(e),n=new h,h[f]=null,n[p]=e):n=v(),void 0===t?n:i(n,t)}},28587:function(e,t,n){var r=n(56667),o=n(69128),i=n(24601),a=n(63835);e.exports=r?Object.defineProperties:function(e,t){i(e);for(var n,r=a(t),l=r.length,s=0;l>s;)o.f(e,n=r[s++],t[n]);return e}},69128:function(e,t,n){var r=n(56667),o=n(50066),i=n(24601),a=n(87892),l=Object.defineProperty;t.f=r?l:function(e,t,n){if(i(e),t=a(t),i(n),o)try{return l(e,t,n)}catch(e){}if("get"in n||"set"in n)throw TypeError("Accessors not supported");return"value"in n&&(e[t]=n.value),e}},47604:function(e,t,n){var r=n(56667),o=n(66681),i=n(49173),a=n(83798),l=n(87892),s=n(36984),c=n(50066),u=Object.getOwnPropertyDescriptor;t.f=r?u:function(e,t){if(e=a(e),t=l(t),c)try{return u(e,t)}catch(e){}if(s(e,t))return i(!o.f.call(e,t),e[t])}},93572:function(e,t,n){var r=n(87535),o=n(41780).concat("length","prototype");t.f=Object.getOwnPropertyNames||function(e){return r(e,o)}},8831:function(e,t){t.f=Object.getOwnPropertySymbols},87535:function(e,t,n){var r=n(36984),o=n(83798),i=n(55122).indexOf,a=n(90090);e.exports=function(e,t){var n,l=o(e),s=0,c=[];for(n in l)!r(a,n)&&r(l,n)&&c.push(n);for(;t.length>s;)r(l,n=t[s++])&&(~i(c,n)||c.push(n));return c}},63835:function(e,t,n){var r=n(87535),o=n(41780);e.exports=Object.keys||function(e){return r(e,o)}},66681:function(e,t){"use strict";var n={}.propertyIsEnumerable,r=Object.getOwnPropertyDescriptor,o=r&&!n.call({1:2},1);t.f=o?function(e){var t=r(this,e);return!!t&&t.enumerable}:n},3211:function(e,t,n){"use strict";var r=n(38823),o=n(63658);e.exports=r?({}).toString:function(){return"[object "+o(this)+"]"}},87450:function(e,t,n){var r=n(65170),o=n(86157);e.exports=function(e,t){var n,i;if("string"===t&&r(n=e.toString)&&!o(i=n.call(e))||r(n=e.valueOf)&&!o(i=n.call(e))||"string"!==t&&r(n=e.toString)&&!o(i=n.call(e)))return i;throw TypeError("Can't convert object to primitive value")}},15105:function(e,t,n){var r=n(99604),o=n(93572),i=n(8831),a=n(24601);e.exports=r("Reflect","ownKeys")||function(e){var t=o.f(a(e)),n=i.f;return n?t.concat(n(e)):t}},81980:function(e,t,n){var r=n(92727),o=n(65170),i=n(36984),a=n(30430),l=n(4039),s=n(12319),c=n(32784),u=n(15112).CONFIGURABLE,f=c.get,d=c.enforce,p=String(String).split("String");(e.exports=function(e,t,n,s){var c,f=!!s&&!!s.unsafe,h=!!s&&!!s.enumerable,g=!!s&&!!s.noTargetGet,m=s&&void 0!==s.name?s.name:t;if(o(n)&&("Symbol("===String(m).slice(0,7)&&(m="["+String(m).replace(/^Symbol\(([^)]*)\)/,"$1")+"]"),(!i(n,"name")||u&&n.name!==m)&&a(n,"name",m),(c=d(n)).source||(c.source=p.join("string"==typeof m?m:""))),e===r){h?e[t]=n:l(t,n);return}f?!g&&e[t]&&(h=!0):delete e[t],h?e[t]=n:a(e,t,n)})(Function.prototype,"toString",function(){return o(this)&&f(this).source||s(this)})},49583:function(e,t,n){var r=n(24601),o=n(65170),i=n(51746),a=n(88467);e.exports=function(e,t){var n=e.exec;if(o(n)){var l=n.call(e,t);return null!==l&&r(l),l}if("RegExp"===i(e))return a.call(e,t);throw TypeError("RegExp#exec called on incompatible receiver")}},88467:function(e,t,n){"use strict";var r,o,i=n(93542),a=n(54181),l=n(51591),s=n(25396),c=n(65581),u=n(32784).get,f=n(80155),d=n(4023),p=RegExp.prototype.exec,h=s("native-string-replace",String.prototype.replace),g=p,m=(r=/a/,o=/b*/g,p.call(r,"a"),p.call(o,"a"),0!==r.lastIndex||0!==o.lastIndex),b=l.UNSUPPORTED_Y||l.BROKEN_CARET,v=void 0!==/()??/.exec("")[1];(m||v||b||f||d)&&(g=function(e){var t,n,r,o,l,s,f,d=u(this),y=i(e),x=d.raw;if(x)return x.lastIndex=this.lastIndex,t=g.call(x,y),this.lastIndex=x.lastIndex,t;var w=d.groups,E=b&&this.sticky,S=a.call(this),k=this.source,_=0,O=y;if(E&&(-1===(S=S.replace("y","")).indexOf("g")&&(S+="g"),O=y.slice(this.lastIndex),this.lastIndex>0&&(!this.multiline||this.multiline&&"\n"!==y.charAt(this.lastIndex-1))&&(k="(?: "+k+")",O=" "+O,_++),n=RegExp("^(?:"+k+")",S)),v&&(n=RegExp("^"+k+"$(?!\\s)",S)),m&&(r=this.lastIndex),o=p.call(E?n:this,O),E?o?(o.input=o.input.slice(_),o[0]=o[0].slice(_),o.index=this.lastIndex,this.lastIndex+=o[0].length):this.lastIndex=0:m&&o&&(this.lastIndex=this.global?o.index+o[0].length:r),v&&o&&o.length>1&&h.call(o[0],n,function(){for(l=1;lb)","g");return"b"!==e.exec("b").groups.a||"bc"!=="b".replace(e,"$c")})},96884:function(e){e.exports=function(e){if(void 0==e)throw TypeError("Can't call method on "+e);return e}},4039:function(e,t,n){var r=n(92727);e.exports=function(e,t){try{Object.defineProperty(r,e,{value:t,configurable:!0,writable:!0})}catch(n){r[e]=t}return t}},28182:function(e,t,n){var r=n(25396),o=n(68176),i=r("keys");e.exports=function(e){return i[e]||(i[e]=o(e))}},41679:function(e,t,n){var r=n(92727),o=n(4039),i="__core-js_shared__",a=r[i]||o(i,{});e.exports=a},25396:function(e,t,n){var r=n(38277),o=n(41679);(e.exports=function(e,t){return o[e]||(o[e]=void 0!==t?t:{})})("versions",[]).push({version:"3.18.3",mode:r?"pure":"global",copyright:"\xa9 2021 Denis Pushkarev (zloirock.ru)"})},46159:function(e,t,n){var r=n(48946),o=n(93542),i=n(96884),a=function(e){return function(t,n){var a,l,s=o(i(t)),c=r(n),u=s.length;return c<0||c>=u?e?"":void 0:(a=s.charCodeAt(c))<55296||a>56319||c+1===u||(l=s.charCodeAt(c+1))<56320||l>57343?e?s.charAt(c):a:e?s.slice(c,c+2):(a-55296<<10)+(l-56320)+65536}};e.exports={codeAt:a(!1),charAt:a(!0)}},38791:function(e,t,n){var r=n(48946),o=Math.max,i=Math.min;e.exports=function(e,t){var n=r(e);return n<0?o(n+t,0):i(n,t)}},83798:function(e,t,n){var r=n(29554),o=n(96884);e.exports=function(e){return r(o(e))}},48946:function(e){var t=Math.ceil,n=Math.floor;e.exports=function(e){var r=+e;return r!=r||0===r?0:(r>0?n:t)(r)}},44446:function(e,t,n){var r=n(48946),o=Math.min;e.exports=function(e){return e>0?o(r(e),9007199254740991):0}},47322:function(e,t,n){var r=n(96884);e.exports=function(e){return Object(r(e))}},67256:function(e,t,n){var r=n(86157),o=n(66290),i=n(92567),a=n(87450),l=n(26739)("toPrimitive");e.exports=function(e,t){if(!r(e)||o(e))return e;var n,s=i(e,l);if(s){if(void 0===t&&(t="default"),!r(n=s.call(e,t))||o(n))return n;throw TypeError("Can't convert object to primitive value")}return void 0===t&&(t="number"),a(e,t)}},87892:function(e,t,n){var r=n(67256),o=n(66290);e.exports=function(e){var t=r(e,"string");return o(t)?t:String(t)}},38823:function(e,t,n){var r=n(26739)("toStringTag"),o={};o[r]="z",e.exports="[object z]"===String(o)},93542:function(e,t,n){var r=n(63658);e.exports=function(e){if("Symbol"===r(e))throw TypeError("Cannot convert a Symbol value to a string");return String(e)}},72386:function(e){e.exports=function(e){try{return String(e)}catch(e){return"Object"}}},68176:function(e){var t=0,n=Math.random();e.exports=function(e){return"Symbol("+String(void 0===e?"":e)+")_"+(++t+n).toString(36)}},78451:function(e,t,n){var r=n(26200);e.exports=r&&!Symbol.sham&&"symbol"==typeof Symbol.iterator},26739:function(e,t,n){var r=n(92727),o=n(25396),i=n(36984),a=n(68176),l=n(26200),s=n(78451),c=o("wks"),u=r.Symbol,f=s?u:u&&u.withoutSetter||a;e.exports=function(e){return i(c,e)&&(l||"string"==typeof c[e])||(l&&i(u,e)?c[e]=u[e]:c[e]=f("Symbol."+e)),c[e]}},74009:function(e,t,n){var r=n(81980),o=Date.prototype,i="Invalid Date",a="toString",l=o[a],s=o.getTime;String(new Date(NaN))!=i&&r(o,a,function(){var e=s.call(this);return e==e?l.call(this):i})},95761:function(e,t,n){var r=n(56667),o=n(15112).EXISTS,i=n(69128).f,a=Function.prototype,l=a.toString,s=/^\s*function ([^ (]*)/;r&&!o&&i(a,"name",{configurable:!0,get:function(){try{return l.call(this).match(s)[1]}catch(e){return""}}})},93935:function(e,t,n){var r=n(38823),o=n(81980),i=n(3211);r||o(Object.prototype,"toString",i,{unsafe:!0})},8914:function(e,t,n){"use strict";var r=n(54271),o=n(88467);r({target:"RegExp",proto:!0,forced:/./.exec!==o},{exec:o})},8016:function(e,t,n){"use strict";var r=n(15112).PROPER,o=n(81980),i=n(24601),a=n(93542),l=n(61531),s=n(54181),c="toString",u=RegExp.prototype,f=u[c],d=l(function(){return"/a/b"!=f.call({source:"a",flags:"b"})}),p=r&&f.name!=c;(d||p)&&o(RegExp.prototype,c,function(){var e=i(this),t=a(e.source),n=e.flags;return"/"+t+"/"+a(void 0===n&&e instanceof RegExp&&!("flags"in u)?s.call(e):n)},{unsafe:!0})},45684:function(e,t,n){"use strict";var r=n(49069),o=n(61531),i=n(24601),a=n(65170),l=n(48946),s=n(44446),c=n(93542),u=n(96884),f=n(88507),d=n(92567),p=n(9562),h=n(49583),g=n(26739)("replace"),m=Math.max,b=Math.min,v="$0"==="a".replace(/./,"$0"),y=!!/./[g]&&""===/./[g]("a","$0");r("replace",function(e,t,n){var r=y?"$":"$0";return[function(e,n){var r=u(this),o=void 0==e?void 0:d(e,g);return o?o.call(e,r,n):t.call(c(r),e,n)},function(e,o){var u=i(this),d=c(e);if("string"==typeof o&&-1===o.indexOf(r)&&-1===o.indexOf("$<")){var g=n(t,u,d,o);if(g.done)return g.value}var v=a(o);v||(o=c(o));var y=u.global;if(y){var x=u.unicode;u.lastIndex=0}for(var w=[];;){var E=h(u,d);if(null===E||(w.push(E),!y))break;""===c(E[0])&&(u.lastIndex=f(d,s(u.lastIndex),x))}for(var S="",k=0,_=0;_=k&&(S+=d.slice(k,A)+M,k=A+C.length)}return S+d.slice(k)}]},!!o(function(){var e=/./;return e.exec=function(){var e=[];return e.groups={a:"7"},e},"7"!=="".replace(e,"$")})||!v||y)},68274:function(e,t,n){let r;var o=n(52040);t.formatArgs=function(t){if(t[0]=(this.useColors?"%c":"")+this.namespace+(this.useColors?" %c":" ")+t[0]+(this.useColors?"%c ":" ")+"+"+e.exports.humanize(this.diff),!this.useColors)return;let n="color: "+this.color;t.splice(1,0,n,"color: inherit");let r=0,o=0;t[0].replace(/%[a-zA-Z%]/g,e=>{"%%"!==e&&(r++,"%c"===e&&(o=r))}),t.splice(o,0,n)},t.save=function(e){try{e?t.storage.setItem("debug",e):t.storage.removeItem("debug")}catch(e){}},t.load=function(){let e;try{e=t.storage.getItem("debug")}catch(e){}return!e&&void 0!==o&&"env"in o&&(e=o.env.DEBUG),e},t.useColors=function(){return"undefined"!=typeof window&&!!window.process&&("renderer"===window.process.type||!!window.process.__nwjs)||!("undefined"!=typeof navigator&&navigator.userAgent&&navigator.userAgent.toLowerCase().match(/(edge|trident)\/(\d+)/))&&("undefined"!=typeof document&&document.documentElement&&document.documentElement.style&&document.documentElement.style.WebkitAppearance||"undefined"!=typeof window&&window.console&&(window.console.firebug||window.console.exception&&window.console.table)||"undefined"!=typeof navigator&&navigator.userAgent&&navigator.userAgent.toLowerCase().match(/firefox\/(\d+)/)&&parseInt(RegExp.$1,10)>=31||"undefined"!=typeof navigator&&navigator.userAgent&&navigator.userAgent.toLowerCase().match(/applewebkit\/(\d+)/))},t.storage=function(){try{return localStorage}catch(e){}}(),t.destroy=(r=!1,()=>{r||(r=!0,console.warn("Instance method `debug.destroy()` is deprecated and no longer does anything. It will be removed in the next major version of `debug`."))}),t.colors=["#0000CC","#0000FF","#0033CC","#0033FF","#0066CC","#0066FF","#0099CC","#0099FF","#00CC00","#00CC33","#00CC66","#00CC99","#00CCCC","#00CCFF","#3300CC","#3300FF","#3333CC","#3333FF","#3366CC","#3366FF","#3399CC","#3399FF","#33CC00","#33CC33","#33CC66","#33CC99","#33CCCC","#33CCFF","#6600CC","#6600FF","#6633CC","#6633FF","#66CC00","#66CC33","#9900CC","#9900FF","#9933CC","#9933FF","#99CC00","#99CC33","#CC0000","#CC0033","#CC0066","#CC0099","#CC00CC","#CC00FF","#CC3300","#CC3333","#CC3366","#CC3399","#CC33CC","#CC33FF","#CC6600","#CC6633","#CC9900","#CC9933","#CCCC00","#CCCC33","#FF0000","#FF0033","#FF0066","#FF0099","#FF00CC","#FF00FF","#FF3300","#FF3333","#FF3366","#FF3399","#FF33CC","#FF33FF","#FF6600","#FF6633","#FF9900","#FF9933","#FFCC00","#FFCC33"],t.log=console.debug||console.log||(()=>{}),e.exports=n(31765)(t);let{formatters:i}=e.exports;i.j=function(e){try{return JSON.stringify(e)}catch(e){return"[UnexpectedJSONParseError]: "+e.message}}},31765:function(e,t,n){e.exports=function(e){function t(e){let n,o,i;let a=null;function l(...e){if(!l.enabled)return;let r=Number(new Date),o=r-(n||r);l.diff=o,l.prev=n,l.curr=r,n=r,e[0]=t.coerce(e[0]),"string"!=typeof e[0]&&e.unshift("%O");let i=0;e[0]=e[0].replace(/%([a-zA-Z%])/g,(n,r)=>{if("%%"===n)return"%";i++;let o=t.formatters[r];if("function"==typeof o){let t=e[i];n=o.call(l,t),e.splice(i,1),i--}return n}),t.formatArgs.call(l,e);let a=l.log||t.log;a.apply(l,e)}return l.namespace=e,l.useColors=t.useColors(),l.color=t.selectColor(e),l.extend=r,l.destroy=t.destroy,Object.defineProperty(l,"enabled",{enumerable:!0,configurable:!1,get:()=>null!==a?a:(o!==t.namespaces&&(o=t.namespaces,i=t.enabled(e)),i),set:e=>{a=e}}),"function"==typeof t.init&&t.init(l),l}function r(e,n){let r=t(this.namespace+(void 0===n?":":n)+e);return r.log=this.log,r}function o(e){return e.toString().substring(2,e.toString().length-2).replace(/\.\*\?$/,"*")}return t.debug=t,t.default=t,t.coerce=function(e){return e instanceof Error?e.stack||e.message:e},t.disable=function(){let e=[...t.names.map(o),...t.skips.map(o).map(e=>"-"+e)].join(",");return t.enable(""),e},t.enable=function(e){let n;t.save(e),t.namespaces=e,t.names=[],t.skips=[];let r=("string"==typeof e?e:"").split(/[\s,]+/),o=r.length;for(n=0;n{t[n]=e[n]}),t.names=[],t.skips=[],t.formatters={},t.selectColor=function(e){let n=0;for(let t=0;t0?parseInt(n):null}(),t){case"b":c+=parseInt(d(),10).toString(2);break;case"c":"string"==typeof(n=d())||n instanceof String?c+=n:c+=String.fromCharCode(parseInt(n,10));break;case"d":c+=parseInt(d(),10);break;case"f":r=String(parseFloat(d()).toFixed(o||6)),c+=f?r:r.replace(/^0/,"");break;case"j":c+=JSON.stringify(d());break;case"o":c+="0"+parseInt(d(),10).toString(8);break;case"s":c+=d();break;case"x":c+="0x"+parseInt(d(),10).toString(16);break;case"X":c+="0x"+parseInt(d(),10).toString(16).toUpperCase();break;default:c+=t}else"%"===t?u=!0:c+=t;return c}(t=e.exports=n).format=n,t.vsprintf=function(e,t){return n.apply(null,[e].concat(t))},"undefined"!=typeof console&&"function"==typeof console.log&&(t.printf=function(){console.log(n.apply(null,arguments))})}()},10184:function(e,t,n){"use strict";function r(e){return Array.isArray?Array.isArray(e):"[object Array]"===u(e)}n.d(t,{Z:function(){return q}});let o=1/0;function i(e){return"string"==typeof e}function a(e){return"number"==typeof e}function l(e){return"object"==typeof e}function s(e){return null!=e}function c(e){return!e.trim().length}function u(e){return null==e?void 0===e?"[object Undefined]":"[object Null]":Object.prototype.toString.call(e)}let f=e=>`Invalid value for key ${e}`,d=e=>`Pattern length exceeds max of ${e}.`,p=e=>`Missing ${e} property in key`,h=e=>`Property 'weight' in key '${e}' must be a positive integer`,g=Object.prototype.hasOwnProperty;class m{constructor(e){this._keys=[],this._keyMap={};let t=0;e.forEach(e=>{let n=b(e);t+=n.weight,this._keys.push(n),this._keyMap[n.id]=n,t+=n.weight}),this._keys.forEach(e=>{e.weight/=t})}get(e){return this._keyMap[e]}keys(){return this._keys}toJSON(){return JSON.stringify(this._keys)}}function b(e){let t=null,n=null,o=null,a=1,l=null;if(i(e)||r(e))o=e,t=v(e),n=y(e);else{if(!g.call(e,"name"))throw Error(p("name"));let r=e.name;if(o=r,g.call(e,"weight")&&(a=e.weight)<=0)throw Error(h(r));t=v(r),n=y(r),l=e.getFn}return{path:t,id:n,weight:a,src:o,getFn:l}}function v(e){return r(e)?e:e.split(".")}function y(e){return r(e)?e.join("."):e}var x={isCaseSensitive:!1,includeScore:!1,keys:[],shouldSort:!0,sortFn:(e,t)=>e.score===t.score?e.idx{if(s(e)){if(t[d]){var p,h;let g=t[d],m=e[g];if(s(m)){if(d===t.length-1&&(i(m)||a(m)||!0===(p=m)||!1===p||l(h=p)&&null!==h&&"[object Boolean]"==u(p)))n.push(null==m?"":function(e){if("string"==typeof e)return e;let t=e+"";return"0"==t&&1/e==-o?"-0":t}(m));else if(r(m)){c=!0;for(let e=0,n=m.length;e{this._keysMap[e.id]=t})}create(){!this.isCreated&&this.docs.length&&(this.isCreated=!0,i(this.docs[0])?this.docs.forEach((e,t)=>{this._addString(e,t)}):this.docs.forEach((e,t)=>{this._addObject(e,t)}),this.norm.clear())}add(e){let t=this.size();i(e)?this._addString(e,t):this._addObject(e,t)}removeAt(e){this.records.splice(e,1);for(let t=e,n=this.size();t{let a=t.getFn?t.getFn(e):this.getFn(e,t.path);if(s(a)){if(r(a)){let e=[],t=[{nestedArrIndex:-1,value:a}];for(;t.length;){let{nestedArrIndex:n,value:o}=t.pop();if(s(o)){if(i(o)&&!c(o)){let t={v:o,i:n,n:this.norm.get(o)};e.push(t)}else r(o)&&o.forEach((e,n)=>{t.push({nestedArrIndex:n,value:e})})}}n.$[o]=e}else if(i(a)&&!c(a)){let e={v:a,n:this.norm.get(a)};n.$[o]=e}}}),this.records.push(n)}toJSON(){return{keys:this.keys,records:this.records}}}function S(e,t,{getFn:n=x.getFn,fieldNormWeight:r=x.fieldNormWeight}={}){let o=new E({getFn:n,fieldNormWeight:r});return o.setKeys(e.map(b)),o.setSources(t),o.create(),o}function k(e,{errors:t=0,currentLocation:n=0,expectedLocation:r=0,distance:o=x.distance,ignoreLocation:i=x.ignoreLocation}={}){let a=t/e.length;if(i)return a;let l=Math.abs(r-n);return o?a+l/o:l?1:a}class _{constructor(e,{location:t=x.location,threshold:n=x.threshold,distance:r=x.distance,includeMatches:o=x.includeMatches,findAllMatches:i=x.findAllMatches,minMatchCharLength:a=x.minMatchCharLength,isCaseSensitive:l=x.isCaseSensitive,ignoreLocation:s=x.ignoreLocation}={}){if(this.options={location:t,threshold:n,distance:r,includeMatches:o,findAllMatches:i,minMatchCharLength:a,isCaseSensitive:l,ignoreLocation:s},this.pattern=l?e:e.toLowerCase(),this.chunks=[],!this.pattern.length)return;let c=(e,t)=>{this.chunks.push({pattern:e,alphabet:function(e){let t={};for(let n=0,r=e.length;n32){let e=0,t=u%32,n=u-t;for(;e{let{isMatch:g,score:m,indices:b}=function(e,t,n,{location:r=x.location,distance:o=x.distance,threshold:i=x.threshold,findAllMatches:a=x.findAllMatches,minMatchCharLength:l=x.minMatchCharLength,includeMatches:s=x.includeMatches,ignoreLocation:c=x.ignoreLocation}={}){let u;if(t.length>32)throw Error(d(32));let f=t.length,p=e.length,h=Math.max(0,Math.min(r,p)),g=i,m=h,b=l>1||s,v=b?Array(p):[];for(;(u=e.indexOf(t,m))>-1;)if(g=Math.min(k(t,{currentLocation:u,expectedLocation:h,distance:o,ignoreLocation:c}),g),m=u+f,b){let e=0;for(;e=s;i-=1){let a=i-1,l=n[e.charAt(a)];if(b&&(v[a]=+!!l),d[i]=(d[i+1]<<1|1)&l,r&&(d[i]|=(y[i+1]|y[i])<<1|1|y[i+1]),d[i]&S&&(w=k(t,{errors:r,currentLocation:a,expectedLocation:h,distance:o,ignoreLocation:c}))<=g){if(g=w,(m=a)<=h)break;s=Math.max(1,2*h-m)}}let x=k(t,{errors:r+1,currentLocation:h,expectedLocation:h,distance:o,ignoreLocation:c});if(x>g)break;y=d}let _={isMatch:m>=0,score:Math.max(.001,w)};if(b){let e=function(e=[],t=x.minMatchCharLength){let n=[],r=-1,o=-1,i=0;for(let a=e.length;i=t&&n.push([r,o]),r=-1)}return e[i-1]&&i-r>=t&&n.push([r,i-1]),n}(v,l);e.length?s&&(_.indices=e):_.isMatch=!1}return _}(e,t,p,{location:r+h,distance:o,threshold:i,findAllMatches:a,minMatchCharLength:l,includeMatches:n,ignoreLocation:s});g&&(f=!0),u+=m,g&&b&&(c=[...c,...b])});let p={isMatch:f,score:f?u/this.chunks.length:1};return f&&n&&(p.indices=c),p}}class O{constructor(e){this.pattern=e}static isMultiMatch(e){return C(e,this.multiRegex)}static isSingleMatch(e){return C(e,this.singleRegex)}search(){}}function C(e,t){let n=e.match(t);return n?n[1]:null}class A extends O{constructor(e,{location:t=x.location,threshold:n=x.threshold,distance:r=x.distance,includeMatches:o=x.includeMatches,findAllMatches:i=x.findAllMatches,minMatchCharLength:a=x.minMatchCharLength,isCaseSensitive:l=x.isCaseSensitive,ignoreLocation:s=x.ignoreLocation}={}){super(e),this._bitapSearch=new _(e,{location:t,threshold:n,distance:r,includeMatches:o,findAllMatches:i,minMatchCharLength:a,isCaseSensitive:l,ignoreLocation:s})}static get type(){return"fuzzy"}static get multiRegex(){return/^"(.*)"$/}static get singleRegex(){return/^(.*)$/}search(e){return this._bitapSearch.searchIn(e)}}class N extends O{constructor(e){super(e)}static get type(){return"include"}static get multiRegex(){return/^'"(.*)"$/}static get singleRegex(){return/^'(.*)$/}search(e){let t,n=0,r=[],o=this.pattern.length;for(;(t=e.indexOf(this.pattern,n))>-1;)n=t+o,r.push([t,n-1]);let i=!!r.length;return{isMatch:i,score:i?0:1,indices:r}}}let R=[class extends O{constructor(e){super(e)}static get type(){return"exact"}static get multiRegex(){return/^="(.*)"$/}static get singleRegex(){return/^=(.*)$/}search(e){let t=e===this.pattern;return{isMatch:t,score:t?0:1,indices:[0,this.pattern.length-1]}}},N,class extends O{constructor(e){super(e)}static get type(){return"prefix-exact"}static get multiRegex(){return/^\^"(.*)"$/}static get singleRegex(){return/^\^(.*)$/}search(e){let t=e.startsWith(this.pattern);return{isMatch:t,score:t?0:1,indices:[0,this.pattern.length-1]}}},class extends O{constructor(e){super(e)}static get type(){return"inverse-prefix-exact"}static get multiRegex(){return/^!\^"(.*)"$/}static get singleRegex(){return/^!\^(.*)$/}search(e){let t=!e.startsWith(this.pattern);return{isMatch:t,score:t?0:1,indices:[0,e.length-1]}}},class extends O{constructor(e){super(e)}static get type(){return"inverse-suffix-exact"}static get multiRegex(){return/^!"(.*)"\$$/}static get singleRegex(){return/^!(.*)\$$/}search(e){let t=!e.endsWith(this.pattern);return{isMatch:t,score:t?0:1,indices:[0,e.length-1]}}},class extends O{constructor(e){super(e)}static get type(){return"suffix-exact"}static get multiRegex(){return/^"(.*)"\$$/}static get singleRegex(){return/^(.*)\$$/}search(e){let t=e.endsWith(this.pattern);return{isMatch:t,score:t?0:1,indices:[e.length-this.pattern.length,e.length-1]}}},class extends O{constructor(e){super(e)}static get type(){return"inverse-exact"}static get multiRegex(){return/^!"(.*)"$/}static get singleRegex(){return/^!(.*)$/}search(e){let t=e.indexOf(this.pattern),n=-1===t;return{isMatch:n,score:n?0:1,indices:[0,e.length-1]}}},A],T=R.length,P=/ +(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)/,M=new Set([A.type,N.type]),j=[];function L(e,t){for(let n=0,r=j.length;n!!(e[I.AND]||e[I.OR]),B=e=>!!e[D.PATH],z=e=>!r(e)&&l(e)&&!F(e),$=e=>({[I.AND]:Object.keys(e).map(t=>({[t]:e[t]}))});function U(e,t,{auto:n=!0}={}){let o=e=>{let a=Object.keys(e),l=B(e);if(!l&&a.length>1&&!F(e))return o($(e));if(z(e)){let r=l?e[D.PATH]:a[0],o=l?e[D.PATTERN]:e[r];if(!i(o))throw Error(f(r));let s={keyId:y(r),pattern:o};return n&&(s.searcher=L(o,t)),s}let s={children:[],operator:a[0]};return a.forEach(t=>{let n=e[t];r(n)&&n.forEach(e=>{s.children.push(o(e))})}),s};return F(e)||(e=$(e)),o(e)}function H(e,t){let n=e.matches;t.matches=[],s(n)&&n.forEach(e=>{if(!s(e.indices)||!e.indices.length)return;let{indices:n,value:r}=e,o={indices:n,value:r};e.key&&(o.key=e.key.src),e.idx>-1&&(o.refIndex=e.idx),t.matches.push(o)})}function Z(e,t){t.score=e.score}class q{constructor(e,t={},n){this.options={...x,...t},this.options.useExtendedSearch,this._keyStore=new m(this.options.keys),this.setCollection(e,n)}setCollection(e,t){if(this._docs=e,t&&!(t instanceof E))throw Error("Incorrect 'index' type");this._myIndex=t||S(this.options.keys,this._docs,{getFn:this.options.getFn,fieldNormWeight:this.options.fieldNormWeight})}add(e){s(e)&&(this._docs.push(e),this._myIndex.add(e))}remove(e=()=>!1){let t=[];for(let n=0,r=this._docs.length;n{let n=1;e.matches.forEach(({key:e,norm:r,score:o})=>{let i=e?e.weight:null;n*=Math.pow(0===o&&i?Number.EPSILON:o,(i||1)*(t?1:r))}),e.score=n})}(c,{ignoreFieldNorm:s}),o&&c.sort(l),a(t)&&t>-1&&(c=c.slice(0,t)),function(e,t,{includeMatches:n=x.includeMatches,includeScore:r=x.includeScore}={}){let o=[];return n&&o.push(H),r&&o.push(Z),e.map(e=>{let{idx:n}=e,r={item:t[n],refIndex:n};return o.length&&o.forEach(t=>{t(e,r)}),r})}(c,this._docs,{includeMatches:n,includeScore:r})}_searchStringList(e){let t=L(e,this.options),{records:n}=this._myIndex,r=[];return n.forEach(({v:e,i:n,n:o})=>{if(!s(e))return;let{isMatch:i,score:a,indices:l}=t.searchIn(e);i&&r.push({item:e,idx:n,matches:[{score:a,value:e,norm:o,indices:l}]})}),r}_searchLogical(e){let t=U(e,this.options),n=(e,t,r)=>{if(!e.children){let{keyId:n,searcher:o}=e,i=this._findMatches({key:this._keyStore.get(n),value:this._myIndex.getValueForItemAtKeyId(t,n),searcher:o});return i&&i.length?[{idx:r,item:t,matches:i}]:[]}let o=[];for(let i=0,a=e.children.length;i{if(s(e)){let a=n(t,e,r);a.length&&(o[r]||(o[r]={idx:r,item:e,matches:[]},i.push(o[r])),a.forEach(({matches:e})=>{o[r].matches.push(...e)}))}}),i}_searchObjectList(e){let t=L(e,this.options),{keys:n,records:r}=this._myIndex,o=[];return r.forEach(({$:e,i:r})=>{if(!s(e))return;let i=[];n.forEach((n,r)=>{i.push(...this._findMatches({key:n,value:e[r],searcher:t}))}),i.length&&o.push({idx:r,item:e,matches:i})}),o}_findMatches({key:e,value:t,searcher:n}){if(!s(t))return[];let o=[];if(r(t))t.forEach(({v:t,i:r,n:i})=>{if(!s(t))return;let{isMatch:a,score:l,indices:c}=n.searchIn(t);a&&o.push({score:l,key:e,value:t,idx:r,norm:i,indices:c})});else{let{v:r,n:i}=t,{isMatch:a,score:l,indices:s}=n.searchIn(r);a&&o.push({score:l,key:e,value:r,norm:i,indices:s})}return o}}q.version="6.6.2",q.createIndex=S,q.parseIndex=function(e,{getFn:t=x.getFn,fieldNormWeight:n=x.fieldNormWeight}={}){let{keys:r,records:o}=e,i=new E({getFn:t,fieldNormWeight:n});return i.setKeys(r),i.setIndexRecords(o),i},q.config=x,q.parseQuery=U,function(...e){j.push(...e)}(class{constructor(e,{isCaseSensitive:t=x.isCaseSensitive,includeMatches:n=x.includeMatches,minMatchCharLength:r=x.minMatchCharLength,ignoreLocation:o=x.ignoreLocation,findAllMatches:i=x.findAllMatches,location:a=x.location,threshold:l=x.threshold,distance:s=x.distance}={}){this.query=null,this.options={isCaseSensitive:t,includeMatches:n,minMatchCharLength:r,findAllMatches:i,ignoreLocation:o,location:a,threshold:l,distance:s},this.pattern=t?e:e.toLowerCase(),this.query=function(e,t={}){return e.split("|").map(e=>{let n=e.trim().split(P).filter(e=>e&&!!e.trim()),r=[];for(let e=0,o=n.length;eMath.random().toString(36).substring(2);let f=new i.default({rules:{emphasis:{filter:["br"],replacement:()=>"\n"}}}),d=e=>Array.isArray(e)?d(e.at(-1)):e?.value||e,p=(e,t,n)=>{let r=e.find(e=>"button"===e.type&&"Submit"===e.props.value)?.id,o=t.findIndex(e=>e.targets?.includes?.(r));return -1===o?t.findIndex((e={})=>e.inputs?.length&&e.outputs?.length&&e.backend_fn&&e.trigger===n):o},h=(e,t)=>{let n=e.find(e=>"button"===e.type)?.id;return n?t.findIndex(e=>e.targets?.includes?.(n)):-1};t.GradioChatBot=class{options;history=[];session_hash;instance_map;constructor(e="0"){if("string"==typeof e?this.options={url:e}:this.options=e,(0,l.default)(this.options.endpoint||this.options.url,"endpoint and url must specify one of them"),!isNaN(this.options.url)){let e=parseInt(this.options.url,10);(0,l.default)(e{let{components:r,dependencies:o}=t,i=o[e],a=i?.inputs.map(e=>this.instance_map[e].props.value);u("fnIndex",e);let s=n?0:i?.inputs.indexOf(i?.targets?.[0]);return s<0&&(s=i?.inputs.findIndex(e=>r?.find(t=>e===t.id&&("textbox"===t.type||t.example_input)))),(0,l.default)(s>-1,"Cannot find the input box"),u("inputIndex",s),[a,s]};html2Markdown(e){return e=this.options.parseHtml?f.turndown(e||""):e,e?.replace?.(/�/g,"").trim()}async reset(){this.history=[],this.instance_map=null,this.session_hash=(0,t.generateHash)()}async chat(e,t){return(0,l.default)(e,"input can't be empty!"),new Promise(async(n,o)=>{try{let{endpoint:i,fnIndex:a,args:c=[],hf_token:f}=this.options,d=await (0,s.client)(i,{session_hash:this.session_hash,hf_token:f,normalise_files:!0}),{components:g,dependencies:m}=d.config,b=this.instance_map;b||(b=g.reduce((e,t)=>(e[t.id]=t,e),{}),this.instance_map=b),(a=a??p(g,m,"submit"))<0&&(a=Math.max(h(g,m),p(g,m,"click"))),(0,l.default)(-1!==a,"Failed to parse this space, you may need to specify the fnIndex manually!");let[v,y]=this.parseInputs(a,d.config);c?.length||(c=v);let x=this.options.inputIndex??y;x>-1&&(c[x]=e),u("args",a,JSON.stringify(c));let w=[],E=-1,S=[],k=/^'([^]+)'$/,_=new Map,O=(e,n)=>{let o=m[n].outputs;e?.forEach((e,n)=>{let i=b[o[n]];if(i.props.value_is_output=!0,"object"==typeof e&&null!==e&&"update"===e.__type__)for(let[t,n]of Object.entries(e))"__type__"!==t&&(i.props[t]=n);else if(i.props.value=e,r.env.DEBUG&&u("value",i.type,JSON.stringify(e)),"chatbot"===i.type&&e){this.history=e.slice(-this.options.historySize),i.props.value=this.history;let n=e?.at(-1)?.at(-1);t?.onMessage?.(this.html2Markdown(n))}})},C=async(e,r=null,i=null)=>{let a=m[e],l=w[e];if(S=S.filter(({fn_index:t})=>t!==e),a.cancels&&await Promise.all(a.cancels.map(async e=>{let t=_.get(e);return t?.cancel(),t})),"pending"===l||"generating"===l)return;let s={fn_index:e,data:r||a.inputs.map(e=>b[e].props.value),event_data:a.collects_event_data?i:null},c=()=>{let r=d.submit(s.fn_index,s.data,s.event_data).on("data",({data:e,fn_index:t})=>{O(e,t)}).on("status",({fn_index:e,...i})=>{if(w[e]=i.stage,u("status",i.stage),"complete"===i.stage){let t=!0;if(m.map(async(n,r)=>{n.trigger_after===e&&(t=!1,C(r))}),r.destroy(),t){let e=this.history?.at(-1)?.at(-1);n(this.html2Markdown(e))}}if("error"===i.stage){if(i.message){let t=i.message.replace(k,(e,t)=>t);S=[{type:"error",message:t,id:++E,fn_index:e},...S]}m.map(async(t,n)=>{t.trigger_after!==e||t.trigger_only_on_success||C(n)}),t?.onError?.(i.message||"error"),o(i.message||"error"),r.destroy()}});_.set(e,r)};a.frontend_fn?a.frontend_fn(s.data.concat(a.outputs.map(e=>b[e].props.value))).then(t=>{a.backend_fn?(s.data=t,c()):O(t,e)}):a.backend_fn&&c()};C(a,c)}catch(e){o(e)}})}}},23067:function(e,t,n){"use strict";var r=n(91083).Buffer,o=this&&this.__importDefault||function(e){return e&&e.__esModule?e:{default:e}};Object.defineProperty(t,"__esModule",{value:!0}),t.walk_and_store_blobs=t.handle_blob=t.client=t.duplicate=t.upload_files=t.post_data=void 0;let i=o(n(16218)),a=n(62961),l=n(69564),s="Connection errored out.";async function c(e,t,n){let r={"Content-Type":"application/json"};n&&(r.Authorization=`Bearer ${n}`);try{var o=await (0,l.fetch)(e,{method:"POST",body:JSON.stringify(t),headers:r})}catch(e){return[{error:s},500]}let i=await o.json();return[i,o.status]}async function u(e,t,n){let r={};n&&(r.Authorization=`Bearer ${n}`);let o=new FormData;t.forEach(e=>{o.append("files",e)});try{var i=await (0,l.fetch)(`${e}/upload`,{method:"POST",body:o,headers:r})}catch(e){return{error:s}}let a=await i.json();return{files:a}}async function f(e,t){let{hf_token:n,private:r,hardware:o,timeout:i}=t;if(o&&!a.hardware_types.includes(o))throw Error(`Invalid hardware type provided. Valid types are: ${a.hardware_types.map(e=>`"${e}"`).join(",")}.`);let s={Authorization:`Bearer ${n}`},c=(await (await (0,l.fetch)("https://huggingface.co/api/whoami-v2",{headers:s})).json()).name,u=e.split("/")[1],f={repository:`${c}/${u}`};r&&(f.private=!0);try{let r=await (0,l.fetch)(`https://huggingface.co/api/spaces/${e}/duplicate`,{method:"POST",headers:{"Content-Type":"application/json",...s},body:JSON.stringify(f)});if(409===r.status)return d(`${c}/${u}`,t);{let l;let s=await r.json();o||(l=await (0,a.get_space_hardware)(e,n));let f=o||l||"cpu-basic";return await (0,a.set_space_hardware)(`${c}/${u}`,f,n),await (0,a.set_space_timeout)(`${c}/${u}`,i||300,n),d(s.url,t)}}catch(e){throw Error(e)}}async function d(e,t={normalise_files:!0,session_hash:Math.random().toString(36).substring(2)}){return new Promise(async n=>{let r,o;let{status_callback:c,hf_token:u,normalise_files:f,session_hash:d}=t,v={predict:function(e,t,n){let r=!1,o=!1;return new Promise((i,a)=>{let l=T(e,t,n);l.on("data",e=>{r=!0,o&&l.destroy(),i(e)}).on("status",e=>{"error"===e.stage&&a(e),"complete"===e.stage&&r&&l.destroy(),"complete"===e.stage&&(o=!0)})})},submit:T,view_api:P},y=f??!0,{ws_protocol:E,http_protocol:S,host:k,space_id:_}=await (0,a.process_endpoint)(e,u),O={},C={},A=!1;async function N(e){r=e,C=(0,a.map_names_to_ids)(e?.dependencies||[]);try{o=await P(r)}catch(e){console.error(`Could not get api details: ${e.message}`)}return{config:r,...v}}async function R(e){if(c&&c(e),"running"===e.status)try{r=await x(`${S}//${k}`,u);let e=await N(r);n(e)}catch(e){c&&c({status:"error",message:"Could not load this space.",load_status:"error",detail:"NOT_FOUND"})}}u&&_&&(A=await m(_,u));try{r=await x(`${S}//${k}`,u);let e=await N(r);n(e)}catch(e){console.log("e",e),_?w(_,a.RE_SPACE_NAME.test(_)?"space_name":"subdomain",R):c&&c({status:"error",message:"Could not load this space.",load_status:"error",detail:"NOT_FOUND"})}function T(e,t,n){let a,c,f,h;if("number"==typeof e)a=e,c=o.unnamed_endpoints[a];else{let t=e.replace(/^\//,"");a=C[t],c=o.named_endpoints[e.trim()]}if("number"!=typeof a)throw Error("There is no endpoint matching that name of fn_index matching that number.");let g="number"==typeof e?"/predict":e,m=!1,v={};function x(e){let t=v[e.type]||[];t?.forEach(t=>t(e))}function w(e,t){let n=v[e]||[];return v[e]=n,n?.push(t),{on:w,off:_,cancel:N,destroy:R}}function _(e,t){let n=v[e]||[];return n=n?.filter(e=>e!==t),v[e]=n,{on:w,off:_,cancel:N,destroy:R}}async function N(){let e={stage:"complete",queue:!1,time:new Date};m=e,x({...e,type:"status",endpoint:g,fn_index:a}),h&&0===h.readyState?h.addEventListener("open",()=>{h.close()}):h.close();try{await (0,l.fetch)(`${S}//${k+r.path}/reset`,{headers:{"Content-Type":"application/json"},method:"POST",body:JSON.stringify({fn_index:a,session_hash:d})})}catch(e){console.warn("The `/reset` endpoint could not be called. Subsequent endpoint results may be unreliable.")}}function R(){for(let e in v)(v[e]||[]).forEach(t=>{_(e,t)})}return b(`${S}//${k+r.path}`,t,c,u).then(e=>{f={data:e||[],event_data:n,fn_index:a};{x({type:"status",stage:"pending",queue:!0,endpoint:g,fn_index:a,time:new Date});let e=new URL(`${E}://${k}${r.path} - /queue/join`);A&&e.searchParams.set("__sign",A),(h=new l.WebSocket(e)).onclose=e=>{e.wasClean||x({type:"status",stage:"error",message:s,queue:!0,endpoint:g,fn_index:a,time:new Date})},h.onmessage=function(e){let t=JSON.parse(e.data),{type:n,status:o,data:i}=function(e,t){switch(e?.msg){case"send_data":return{type:"data"};case"send_hash":return{type:"hash"};case"queue_full":return{type:"update",status:{queue:!0,message:"This application is too busy. Keep trying!",stage:"error",code:e.code,success:e.success}};case"estimation":return{type:"update",status:{queue:!0,stage:t||"pending",code:e.code,size:e.queue_size,position:e.rank,eta:e.rank_eta,success:e.success}};case"progress":return{type:"update",status:{queue:!0,stage:"pending",code:e.code,progress_data:e.progress_data,success:e.success}};case"process_generating":return{type:"generating",status:{queue:!0,message:e.success?null:e.output.error,stage:e.success?"generating":"error",code:e.code,progress_data:e.progress_data,eta:e.average_duration},data:e.success?e.output:null};case"process_completed":if("error"in e.output)return{type:"update",status:{queue:!0,message:e.output.error,stage:"error",code:e.code,success:e.success}};return{type:"complete",status:{queue:!0,message:e.success?void 0:e.output.error,stage:e.success?"complete":"error",code:e.code,progress_data:e.progress_data,eta:e.output.average_duration},data:e.success?e.output:null};case"process_starts":return{type:"update",status:{queue:!0,stage:"pending",code:e.code,size:e.rank,position:0,success:e.success}}}return{type:"none",status:{stage:"error",queue:!0}}}(t,O[a]);if("update"===n&&o&&!m)x({type:"status",endpoint:g,fn_index:a,time:new Date,...o}),"error"===o.stage&&h.close();else if("hash"===n){h.send(JSON.stringify({fn_index:a,session_hash:d}));return}else"data"===n?h.send(JSON.stringify({...f,session_hash:d})):"complete"===n?m=o:"generating"===n&&x({type:"status",time:new Date,...o,stage:o?.stage,queue:!0,endpoint:g,fn_index:a});i&&(x({type:"data",time:new Date,data:y?function(e,t,n,r){return e.map((e,o)=>t?.returns?.[o]?.component==="File"?p(e,n,r):t?.returns?.[o]?.component==="Gallery"?e.map(e=>Array.isArray(e)?[p(e[0],n,r),e[1]]:[p(e,n,r),null]):e&&"object"==typeof e&&e.is_file?p(e,n,r):e)}(i.data,c,r.root,r.root_url):i.data,endpoint:g,fn_index:a}),m&&(x({type:"status",time:new Date,...m,stage:o?.stage,queue:!0,endpoint:g,fn_index:a}),h.close()))},0>(0,i.default)(r.version||"2.0.0","3.6")&&h.addEventListener("open",()=>h.send(JSON.stringify({hash:d})))}}),{on:w,off:_,cancel:N,destroy:R}}async function P(e){let t;if(o)return o;let n={"Content-Type":"application/json"};if(u&&(n.Authorization=`Bearer ${u}`),!(t=0>(0,i.default)(e.version||"2.0.0","3.30")?await (0,l.fetch)("https://gradio-space-api-fetcher-v2.hf.space/api",{method:"POST",body:JSON.stringify({serialize:!1,config:JSON.stringify(e)}),headers:n}):await (0,l.fetch)(`${e.root}/info`,{headers:n})).ok)throw Error(s);let r=await t.json();"api"in r&&(r=r.api),r.named_endpoints["/predict"]&&!r.unnamed_endpoints["0"]&&(r.unnamed_endpoints[0]=r.named_endpoints["/predict"]);let a=function(e,t,n){let r={named_endpoints:{},unnamed_endpoints:{}};for(let o in e){let i=e[o];for(let e in i){let a=t.dependencies[e]?e:n[e.replace("/","")],l=i[e];r[o][e]={},r[o][e].parameters={},r[o][e].returns={},r[o][e].type=t.dependencies[a].types,r[o][e].parameters=l.parameters.map(({label:e,component:t,type:n,serializer:r})=>({label:e,component:t,type:h(n,t,r,"parameter"),description:g(n,r)})),r[o][e].returns=l.returns.map(({label:e,component:t,type:n,serializer:r})=>({label:e,component:t,type:h(n,t,r,"return"),description:g(n,r)}))}}return r}(r,e,C);return a}})}function p(e,t,n){if(null==e)return null;if("string"==typeof e)return{name:"file_data",data:e};if(Array.isArray(e)){let r=[];for(let o of e)null===o?r.push(null):r.push(p(o,t,n));return r}return e.is_file&&(n?e.data="/proxy="+n+"/file="+e.name:e.data=t+"/file="+e.name),e}function h(e,t,n,r){switch(e.type){case"string":return"string";case"boolean":return"boolean";case"number":return"number"}return"JSONSerializable"===n||"StringSerializable"===n?"any":"ListStringSerializable"===n?"string[]":"Image"===t?"parameter"===r?"Blob | File | Buffer":"string":"FileSerializable"===n?e?.type==="array"?"parameter"===r?"(Blob | File | Buffer)[]":"{ name: string; data: string; size?: number; is_file?: boolean; orig_name?: string}[]":"parameter"===r?"Blob | File | Buffer":"{ name: string; data: string; size?: number; is_file?: boolean; orig_name?: string}":"GallerySerializable"===n?"parameter"===r?"[(Blob | File | Buffer), (string | null)][]":"[{ name: string; data: string; size?: number; is_file?: boolean; orig_name?: string}, (string | null))][]":void 0}function g(e,t){return"GallerySerializable"===t?"array of [file, label] tuples":"ListStringSerializable"===t?"array of strings":"FileSerializable"===t?"array of files or single file":e.description}async function m(e,t){try{let n=await (0,l.fetch)(`https://huggingface.co/api/spaces/${e}/jwt`,{headers:{Authorization:`Bearer ${t}`}}),r=(await n.json()).token;return r||!1}catch(e){return console.error(e),!1}}async function b(e,t,n,r){let o=await y(t,void 0,[],!0,n);return Promise.all(o.map(async({path:t,blob:n,data:o,type:i})=>{if(!n)return{path:t,base64:o,type:i};{let o=(await u(e,[n],r)).files[0];return{path:t,file_url:o,type:i}}})).then(e=>(e.forEach(({path:e,file_url:n,base64:r,type:o})=>{if(r)v(t,r,e);else if("Gallery"===o)v(t,n,e);else if(n){let r={is_file:!0,name:`${n}`,data:null};v(t,r,e)}}),t))}function v(e,t,n){for(;n.length>1;)e=e[n.shift()];e[n.shift()]=t}async function y(e,t,n=[],o=!1,i){if(Array.isArray(e)){let r=[];return await Promise.all(e.map(async(a,l)=>{let s=n.slice();s.push(l);let c=await y(e[l],o?i?.parameters[l]?.component||void 0:t,s,!1,i);r=r.concat(c)})),r}if(globalThis.Buffer&&e instanceof globalThis.Buffer){let r="Image"===t;return[{path:n,blob:!r&&new l.Blob([e]),data:!!r&&`${e.toString("base64")}`,type:t}]}if(e instanceof l.Blob||"undefined"!=typeof window&&e instanceof File){if("Image"!==t)return[{path:n,blob:e,type:t}];{let o;if("undefined"!=typeof window)o=await new Promise((t,n)=>{let r=new FileReader;r.onloadend=()=>t(r.result),r.readAsDataURL(e)});else{let t=await e.arrayBuffer();o=r.from(t).toString("base64")}return[{path:n,data:o,type:t}]}}{if("object"!=typeof e)return[];let t=[];for(let r in e)if(e.hasOwnProperty(r)){let o=n.slice();o.push(r),t=t.concat(await y(e[r],void 0,o,!1,i))}return t}}async function x(e,t){let n={};if(t&&(n.Authorization=`Bearer ${t}`),"undefined"!=typeof window&&window.gradio_config&&"http://localhost:9876"!==location.origin){let t=window.gradio_config.root,n=window.gradio_config;return n.root=e+n.root,{...n,path:t}}if(e){let t=await (0,l.fetch)(`${e}/config`,{headers:n});if(200===t.status){let n=await t.json();return n.path=n.path??"",n.root=e,n}throw Error("Could not get config.")}throw Error("No config or app endpoint found")}async function w(e,t,n){let r,o,i="subdomain"===t?`https://huggingface.co/api/spaces/by-subdomain/${e}`:`https://huggingface.co/api/spaces/${e}`;try{if(o=(r=await (0,l.fetch)(i)).status,200!==o)throw Error();r=await r.json()}catch(e){n({status:"error",load_status:"error",message:"Could not get space status",detail:"NOT_FOUND"});return}if(!r||200!==o)return;let{runtime:{stage:s},id:c}=r;switch(s){case"STOPPED":case"SLEEPING":n({status:"sleeping",load_status:"pending",message:"Space is asleep. Waking it up...",detail:s}),setTimeout(()=>{w(e,t,n)},1e3);break;case"RUNNING":case"RUNNING_BUILDING":n({status:"running",load_status:"complete",message:"",detail:s});break;case"BUILDING":n({status:"building",load_status:"pending",message:"Space is building...",detail:s}),setTimeout(()=>{w(e,t,n)},1e3);break;default:n({status:"space_error",load_status:"error",message:"This space is experiencing an issue.",detail:s,discussions_enabled:await (0,a.discussions_enabled)(c)})}}t.post_data=c,t.upload_files=u,t.duplicate=f,t.client=d,t.handle_blob=b,t.walk_and_store_blobs=y},42794:function(e,t,n){"use strict";var r=this&&this.__createBinding||(Object.create?function(e,t,n,r){void 0===r&&(r=n);var o=Object.getOwnPropertyDescriptor(t,n);(!o||("get"in o?!t.__esModule:o.writable||o.configurable))&&(o={enumerable:!0,get:function(){return t[n]}}),Object.defineProperty(e,r,o)}:function(e,t,n,r){void 0===r&&(r=n),e[r]=t[n]}),o=this&&this.__exportStar||function(e,t){for(var n in e)"default"===n||Object.prototype.hasOwnProperty.call(t,n)||r(t,e,n)};Object.defineProperty(t,"__esModule",{value:!0}),t.duplicate=t.upload_files=t.post_data=t.client=void 0;var i=n(23067);Object.defineProperty(t,"client",{enumerable:!0,get:function(){return i.client}}),Object.defineProperty(t,"post_data",{enumerable:!0,get:function(){return i.post_data}}),Object.defineProperty(t,"upload_files",{enumerable:!0,get:function(){return i.upload_files}}),Object.defineProperty(t,"duplicate",{enumerable:!0,get:function(){return i.duplicate}}),o(n(60276),t)},62961:function(e,t){"use strict";function n(e){if(e.startsWith("http")){let{protocol:t,host:n}=new URL(e);return n.endsWith("hf.space")?{ws_protocol:"wss",host:n,http_protocol:t}:{ws_protocol:"https:"===t?"wss":"ws",http_protocol:t,host:n}}return{ws_protocol:"wss",http_protocol:"https:",host:e}}async function r(e,r){let o={};r&&(o.Authorization=`Bearer ${r}`);let i=e.trim();if(t.RE_SPACE_NAME.test(i))try{let t=await fetch(`https://huggingface.co/api/spaces/${i}/host`,{headers:o});if(200!==t.status)throw Error("Space metadata could not be loaded.");let r=(await t.json()).host;return{space_id:e,...n(r)}}catch(e){throw Error("Space metadata could not be loaded."+e.message)}if(t.RE_SPACE_DOMAIN.test(i)){let{ws_protocol:e,http_protocol:t,host:r}=n(i);return{space_id:r.replace(".hf.space",""),ws_protocol:e,http_protocol:t,host:r}}if(t.MD_SPACE_DOMAIN.test(i)){let e=new URL(i);return{space_id:!1,ws_protocol:"wss",http_protocol:"https:",host:`${e.host}${e.pathname}`}}return{space_id:!1,...n(i)}}Object.defineProperty(t,"__esModule",{value:!0}),t.hardware_types=t.set_space_timeout=t.set_space_hardware=t.get_space_hardware=t.discussions_enabled=t.map_names_to_ids=t.process_endpoint=t.MD_SPACE_DOMAIN=t.RE_SPACE_DOMAIN=t.RE_SPACE_NAME=t.determine_protocol=void 0,t.determine_protocol=n,t.RE_SPACE_NAME=/^[^\/]*\/[^\/]*$/,t.RE_SPACE_DOMAIN=/.*hf\.space\/{0,1}$/,t.MD_SPACE_DOMAIN=/^https:\/\/modelscope\.cn\//,t.process_endpoint=r,t.map_names_to_ids=function(e){let t={};return e.forEach(({api_name:e},n)=>{e&&(t[e]=n)}),t};let o=/^(?=[^]*\b[dD]iscussions{0,1}\b)(?=[^]*\b[dD]isabled\b)[^]*$/;async function i(e){try{let t=await fetch(`https://huggingface.co/api/spaces/${e}/discussions`,{method:"HEAD"}),n=t.headers.get("x-error-message");if(n&&o.test(n))return!1;return!0}catch(e){return!1}}async function a(e,t){let n={};t&&(n.Authorization=`Bearer ${t}`);try{let t=await fetch(`https://huggingface.co/api/spaces/${e}/runtime`,{headers:n});if(200!==t.status)throw Error("Space hardware could not be obtained.");let{hardware:r}=await t.json();return r}catch(e){throw Error(e.message)}}async function l(e,t,n){let r={};n&&(r.Authorization=`Bearer ${n}`);try{let n=await fetch(`https://huggingface.co/api/spaces/${e}/hardware`,{headers:r,body:JSON.stringify(t)});if(200!==n.status)throw Error("Space hardware could not be set. Please ensure the space hardware provided is valid and that a Hugging Face token is passed in.");let{hardware:o}=await n.json();return o}catch(e){throw Error(e.message)}}async function s(e,t,n){let r={};n&&(r.Authorization=`Bearer ${n}`);try{let n=await fetch(`https://huggingface.co/api/spaces/${e}/hardware`,{headers:r,body:JSON.stringify({seconds:t})});if(200!==n.status)throw Error("Space hardware could not be set. Please ensure the space hardware provided is valid and that a Hugging Face token is passed in.");let{hardware:o}=await n.json();return o}catch(e){throw Error(e.message)}}t.discussions_enabled=i,t.get_space_hardware=a,t.set_space_hardware=l,t.set_space_timeout=s,t.hardware_types=["cpu-basic","cpu-upgrade","t4-small","t4-medium","a10g-small","a10g-large","a100-large"]},89742:function(e,t){"use strict";class n extends WebSocket{constructor(e,...t){super(e)}}t.Z={fetch,WebSocket:n,Blob}},69564:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.Blob=t.WebSocket=t.fetch=void 0;let r={};Object.assign(r,n(89742).Z),t.default=r,t.fetch=r.fetch,t.WebSocket=r.WebSocket,t.Blob=r.Blob},24645:function(e){var t=/\/\*[^*]*\*+([^/*][^*]*\*+)*\//g,n=/\n/g,r=/^\s*/,o=/^(\*?[-#/*\\\w]+(\[[0-9a-z_-]+\])?)\s*/,i=/^:\s*/,a=/^((?:'(?:\\'|.)*?'|"(?:\\"|.)*?"|\([^)]*?\)|[^};])+)/,l=/^[;\s]*/,s=/^\s+|\s+$/g;function c(e){return e?e.replace(s,""):""}e.exports=function(e,s){if("string"!=typeof e)throw TypeError("First argument must be a string");if(!e)return[];s=s||{};var u=1,f=1;function d(e){var t=e.match(n);t&&(u+=t.length);var r=e.lastIndexOf("\n");f=~r?e.length-r:f+e.length}function p(){var e={line:u,column:f};return function(t){return t.position=new h(e),b(r),t}}function h(e){this.start=e,this.end={line:u,column:f},this.source=s.source}h.prototype.content=e;var g=[];function m(t){var n=Error(s.source+":"+u+":"+f+": "+t);if(n.reason=t,n.filename=s.source,n.line=u,n.column=f,n.source=e,s.silent)g.push(n);else throw n}function b(t){var n=t.exec(e);if(n){var r=n[0];return d(r),e=e.slice(r.length),n}}function v(e){var t;for(e=e||[];t=y();)!1!==t&&e.push(t);return e}function y(){var t=p();if("/"==e.charAt(0)&&"*"==e.charAt(1)){for(var n=2;""!=e.charAt(n)&&("*"!=e.charAt(n)||"/"!=e.charAt(n+1));)++n;if(n+=2,""===e.charAt(n-1))return m("End of comment missing");var r=e.slice(2,n-2);return f+=2,d(r),e=e.slice(n),f+=2,t({type:"comment",comment:r})}}return b(r),function(){var e,n=[];for(v(n);e=function(){var e=p(),n=b(o);if(n){if(y(),!b(i))return m("property missing ':'");var r=b(a),s=e({type:"declaration",property:c(n[0].replace(t,"")),value:r?c(r[0].replace(t,"")):""});return b(l),s}}();)!1!==e&&(n.push(e),v(n));return n}()}},65192:function(e,t,n){"use strict";function r(e){for(var t=arguments.length,n=Array(t>1?t-1:0),r=1;r3?t.i-4:t.i:Array.isArray(e)?1:u(e)?2:f(e)?3:0}function s(e,t){return 2===l(e)?e.has(t):Object.prototype.hasOwnProperty.call(e,t)}function c(e,t,n){var r=l(e);2===r?e.set(t,n):3===r?e.add(n):e[t]=n}function u(e){return I&&e instanceof Map}function f(e){return D&&e instanceof Set}function d(e){return e.o||e.t}function p(e){if(Array.isArray(e))return Array.prototype.slice.call(e);var t=Z(e);delete t[$];for(var n=H(t),r=0;r1&&(e.set=e.add=e.clear=e.delete=g),Object.freeze(e),t&&a(e,function(e,t){return h(t,!0)},!0)),e}function g(){r(2)}function m(e){return null==e||"object"!=typeof e||Object.isFrozen(e)}function b(e){var t=q[e];return t||r(18,e),t}function v(e,t){t&&(b("Patches"),e.u=[],e.s=[],e.v=t)}function y(e){x(e),e.p.forEach(E),e.p=null}function x(e){e===j&&(j=e.l)}function w(e){return j={p:[],l:j,h:e,m:!0,_:0}}function E(e){var t=e[$];0===t.i||1===t.i?t.j():t.g=!0}function S(e,t){t._=t.p.length;var n=t.p[0],o=void 0!==e&&e!==n;return t.h.O||b("ES5").S(t,e,o),o?(n[$].P&&(y(t),r(4)),i(e)&&(e=k(t,e),t.l||O(t,e)),t.u&&b("Patches").M(n[$].t,e,t.u,t.s)):e=k(t,n,[]),y(t),t.u&&t.v(t.u,t.s),e!==B?e:void 0}function k(e,t,n){if(m(t))return t;var r=t[$];if(!r)return a(t,function(o,i){return _(e,r,t,o,i,n)},!0),t;if(r.A!==e)return t;if(!r.P)return O(e,r.t,!0),r.t;if(!r.I){r.I=!0,r.A._--;var o=4===r.i||5===r.i?r.o=p(r.k):r.o,i=o,l=!1;3===r.i&&(i=new Set(o),o.clear(),l=!0),a(i,function(t,i){return _(e,r,o,t,i,n,l)}),O(e,o,!1),n&&e.u&&b("Patches").N(r,n,e.u,e.s)}return r.o}function _(e,t,n,r,a,l,u){if(o(a)){var f=k(e,a,l&&t&&3!==t.i&&!s(t.R,r)?l.concat(r):void 0);if(c(n,r,f),!o(f))return;e.m=!1}else u&&n.add(a);if(i(a)&&!m(a)){if(!e.h.D&&e._<1)return;k(e,a),t&&t.A.l||O(e,a)}}function O(e,t,n){void 0===n&&(n=!1),!e.l&&e.h.D&&e.m&&h(t,n)}function C(e,t){var n=e[$];return(n?d(n):e)[t]}function A(e,t){if(t in e)for(var n=Object.getPrototypeOf(e);n;){var r=Object.getOwnPropertyDescriptor(n,t);if(r)return r;n=Object.getPrototypeOf(n)}}function N(e){e.P||(e.P=!0,e.l&&N(e.l))}function R(e){e.o||(e.o=p(e.t))}function T(e,t,n){var r,o,i,a,l,s,c,d=u(t)?b("MapSet").F(t,n):f(t)?b("MapSet").T(t,n):e.O?(i=o={i:(r=Array.isArray(t))?1:0,A:n?n.A:j,P:!1,I:!1,R:{},l:n,t:t,k:null,o:null,j:null,C:!1},a=W,r&&(i=[o],a=V),s=(l=Proxy.revocable(i,a)).revoke,c=l.proxy,o.k=c,o.j=s,c):b("ES5").J(t,n);return(n?n.A:j).p.push(d),d}function P(e,t){switch(t){case 2:return new Map(e);case 3:return Array.from(e)}return p(e)}n.d(t,{sn:function(){return X}});var M,j,L="undefined"!=typeof Symbol&&"symbol"==typeof Symbol("x"),I="undefined"!=typeof Map,D="undefined"!=typeof Set,F="undefined"!=typeof Proxy&&void 0!==Proxy.revocable&&"undefined"!=typeof Reflect,B=L?Symbol.for("immer-nothing"):((M={})["immer-nothing"]=!0,M),z=L?Symbol.for("immer-draftable"):"__$immer_draftable",$=L?Symbol.for("immer-state"):"__$immer_state",U=""+Object.prototype.constructor,H="undefined"!=typeof Reflect&&Reflect.ownKeys?Reflect.ownKeys:void 0!==Object.getOwnPropertySymbols?function(e){return Object.getOwnPropertyNames(e).concat(Object.getOwnPropertySymbols(e))}:Object.getOwnPropertyNames,Z=Object.getOwnPropertyDescriptors||function(e){var t={};return H(e).forEach(function(n){t[n]=Object.getOwnPropertyDescriptor(e,n)}),t},q={},W={get:function(e,t){if(t===$)return e;var n,r,o=d(e);if(!s(o,t))return(r=A(o,t))?"value"in r?r.value:null===(n=r.get)||void 0===n?void 0:n.call(e.k):void 0;var a=o[t];return e.I||!i(a)?a:a===C(e.t,t)?(R(e),e.o[t]=T(e.A.h,a,e)):a},has:function(e,t){return t in d(e)},ownKeys:function(e){return Reflect.ownKeys(d(e))},set:function(e,t,n){var r=A(d(e),t);if(null==r?void 0:r.set)return r.set.call(e.k,n),!0;if(!e.P){var o=C(d(e),t),i=null==o?void 0:o[$];if(i&&i.t===n)return e.o[t]=n,e.R[t]=!1,!0;if((n===o?0!==n||1/n==1/o:n!=n&&o!=o)&&(void 0!==n||s(e.t,t)))return!0;R(e),N(e)}return e.o[t]===n&&(void 0!==n||t in e.o)||Number.isNaN(n)&&Number.isNaN(e.o[t])||(e.o[t]=n,e.R[t]=!0),!0},deleteProperty:function(e,t){return void 0!==C(e.t,t)||t in e.t?(e.R[t]=!1,R(e),N(e)):delete e.R[t],e.o&&delete e.o[t],!0},getOwnPropertyDescriptor:function(e,t){var n=d(e),r=Reflect.getOwnPropertyDescriptor(n,t);return r?{writable:!0,configurable:1!==e.i||"length"!==t,enumerable:r.enumerable,value:n[t]}:r},defineProperty:function(){r(11)},getPrototypeOf:function(e){return Object.getPrototypeOf(e.t)},setPrototypeOf:function(){r(12)}},V={};a(W,function(e,t){V[e]=function(){return arguments[0]=arguments[0][0],t.apply(this,arguments)}}),V.deleteProperty=function(e,t){return V.set.call(this,e,t,void 0)},V.set=function(e,t,n){return W.set.call(this,e[0],t,n,e[0])};var G=new(function(){function e(e){var t=this;this.O=F,this.D=!0,this.produce=function(e,n,o){if("function"==typeof e&&"function"!=typeof n){var a,l=n;return n=e,function(e){var r=this;void 0===e&&(e=l);for(var o=arguments.length,i=Array(o>1?o-1:0),a=1;a1?r-1:0),i=1;i=0;n--){var n,r=t[n];if(0===r.path.length&&"replace"===r.op){e=r.value;break}}n>-1&&(t=t.slice(n+1));var i=b("Patches").$;return o(e)?i(e,t):this.produce(e,function(e){return i(e,t)})},e}()),K=G.produce;G.produceWithPatches.bind(G),G.setAutoFreeze.bind(G),G.setUseProxies.bind(G),G.applyPatches.bind(G),G.createDraft.bind(G),G.finishDraft.bind(G);var Y=n(48115);function X(e){let t=(0,Y.cn)(e,(e,n,r)=>n(t,K(e(t),"function"==typeof r?r:()=>r)));return t}n(86006),new WeakMap},79922:function(e,t,n){var r=n(21671)(n(41314),"DataView");e.exports=r},7845:function(e,t,n){var r=n(44338),o=n(74779),i=n(28231),a=n(14798),l=n(90926);function s(e){var t=-1,n=null==e?0:e.length;for(this.clear();++tu))return!1;var d=s.get(e),p=s.get(t);if(d&&p)return d==t&&p==e;var h=-1,g=!0,m=2&n?new r:void 0;for(s.set(e,t),s.set(t,e);++h-1&&e%1==0&&e-1}},13332:function(e,t,n){var r=n(53457);e.exports=function(e,t){var n=this.__data__,o=r(n,e);return o<0?(++this.size,n.push([e,t])):n[o][1]=t,this}},63596:function(e,t,n){var r=n(7845),o=n(25214),i=n(357);e.exports=function(){this.size=0,this.__data__={hash:new r,map:new(i||o),string:new r}}},62353:function(e,t,n){var r=n(87225);e.exports=function(e){var t=r(this,e).delete(e);return this.size-=t?1:0,t}},89659:function(e,t,n){var r=n(87225);e.exports=function(e){return r(this,e).get(e)}},2730:function(e,t,n){var r=n(87225);e.exports=function(e){return r(this,e).has(e)}},2752:function(e,t,n){var r=n(87225);e.exports=function(e,t){var n=r(this,e),o=n.size;return n.set(e,t),this.size+=n.size==o?0:1,this}},56395:function(e){e.exports=function(e){var t=-1,n=Array(e.size);return e.forEach(function(e,r){n[++t]=[r,e]}),n}},45211:function(e){e.exports=function(e,t){return function(n){return null!=n&&n[e]===t&&(void 0!==t||e in Object(n))}}},18757:function(e,t,n){var r=n(85679);e.exports=function(e){var t=r(e,function(e){return 500===n.size&&n.clear(),e}),n=t.cache;return t}},98851:function(e,t,n){var r=n(21671)(Object,"create");e.exports=r},27978:function(e,t,n){var r=n(4605)(Object.keys,Object);e.exports=r},46348:function(e){e.exports=function(e){var t=[];if(null!=e)for(var n in Object(e))t.push(n);return t}},78084:function(e,t,n){e=n.nmd(e);var r=n(99499),o=t&&!t.nodeType&&t,i=o&&e&&!e.nodeType&&e,a=i&&i.exports===o&&r.process,l=function(){try{var e=i&&i.require&&i.require("util").types;if(e)return e;return a&&a.binding&&a.binding("util")}catch(e){}}();e.exports=l},59774:function(e){var t=Object.prototype.toString;e.exports=function(e){return t.call(e)}},4605:function(e){e.exports=function(e,t){return function(n){return e(t(n))}}},41314:function(e,t,n){var r=n(99499),o="object"==typeof self&&self&&self.Object===Object&&self,i=r||o||Function("return this")();e.exports=i},70954:function(e){e.exports=function(e){return this.__data__.set(e,"__lodash_hash_undefined__"),this}},56352:function(e){e.exports=function(e){return this.__data__.has(e)}},6789:function(e){e.exports=function(e){var t=-1,n=Array(e.size);return e.forEach(function(e){n[++t]=e}),n}},85846:function(e,t,n){var r=n(25214);e.exports=function(){this.__data__=new r,this.size=0}},47918:function(e){e.exports=function(e){var t=this.__data__,n=t.delete(e);return this.size=t.size,n}},51816:function(e){e.exports=function(e){return this.__data__.get(e)}},3373:function(e){e.exports=function(e){return this.__data__.has(e)}},14715:function(e,t,n){var r=n(25214),o=n(357),i=n(97794);e.exports=function(e,t){var n=this.__data__;if(n instanceof r){var a=n.__data__;if(!o||a.length<199)return a.push([e,t]),this.size=++n.size,this;n=this.__data__=new i(a)}return n.set(e,t),this.size=n.size,this}},52588:function(e,t,n){var r=n(18757),o=/[^.[\]]+|\[(?:(-?\d+(?:\.\d+)?)|(["'])((?:(?!\2)[^\\]|\\.)*?)\2)\]|(?=(?:\.|\[\])(?:\.|\[\]|$))/g,i=/\\(\\)?/g,a=r(function(e){var t=[];return 46===e.charCodeAt(0)&&t.push(""),e.replace(o,function(e,n,r,o){t.push(r?o.replace(i,"$1"):n||e)}),t});e.exports=a},87912:function(e,t,n){var r=n(50246),o=1/0;e.exports=function(e){if("string"==typeof e||r(e))return e;var t=e+"";return"0"==t&&1/e==-o?"-0":t}},77425:function(e){var t=Function.prototype.toString;e.exports=function(e){if(null!=e){try{return t.call(e)}catch(e){}try{return e+""}catch(e){}}return""}},48797:function(e,t,n){var r=n(33130);e.exports=function(e){return r(e,5)}},98895:function(e){e.exports=function(e,t){return e===t||e!=e&&t!=t}},17766:function(e,t,n){var r=n(23699),o=n(54434);e.exports=function(e,t){return e&&r(e,o(t))}},53671:function(e,t,n){var r=n(86271);e.exports=function(e,t,n){var o=null==e?void 0:r(e,t);return void 0===o?n:o}},87191:function(e,t,n){var r=n(91790),o=n(36015);e.exports=function(e,t){return null!=e&&o(e,t,r)}},14032:function(e){e.exports=function(e){return e}},20628:function(e,t,n){var r=n(73274),o=n(60655),i=Object.prototype,a=i.hasOwnProperty,l=i.propertyIsEnumerable,s=r(function(){return arguments}())?r:function(e){return o(e)&&a.call(e,"callee")&&!l.call(e,"callee")};e.exports=s},3642:function(e){var t=Array.isArray;e.exports=t},96717:function(e,t,n){var r=n(84547),o=n(78890);e.exports=function(e){return null!=e&&o(e.length)&&!r(e)}},49681:function(e,t,n){e=n.nmd(e);var r=n(41314),o=n(74367),i=t&&!t.nodeType&&t,a=i&&e&&!e.nodeType&&e,l=a&&a.exports===i?r.Buffer:void 0,s=l?l.isBuffer:void 0;e.exports=s||o},84547:function(e,t,n){var r=n(48276),o=n(74331);e.exports=function(e){if(!o(e))return!1;var t=r(e);return"[object Function]"==t||"[object GeneratorFunction]"==t||"[object AsyncFunction]"==t||"[object Proxy]"==t}},78890:function(e){e.exports=function(e){return"number"==typeof e&&e>-1&&e%1==0&&e<=9007199254740991}},8905:function(e,t,n){var r=n(87235),o=n(86080),i=n(78084),a=i&&i.isMap,l=a?o(a):r;e.exports=l},74331:function(e){e.exports=function(e){var t=typeof e;return null!=e&&("object"==t||"function"==t)}},60655:function(e){e.exports=function(e){return null!=e&&"object"==typeof e}},54477:function(e,t,n){var r=n(48276),o=n(27271),i=n(60655),a=Object.prototype,l=Function.prototype.toString,s=a.hasOwnProperty,c=l.call(Object);e.exports=function(e){if(!i(e)||"[object Object]"!=r(e))return!1;var t=o(e);if(null===t)return!0;var n=s.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&l.call(n)==c}},90911:function(e,t,n){var r=n(58651),o=n(86080),i=n(78084),a=i&&i.isSet,l=a?o(a):r;e.exports=l},782:function(e,t,n){var r=n(48276),o=n(3642),i=n(60655);e.exports=function(e){return"string"==typeof e||!o(e)&&i(e)&&"[object String]"==r(e)}},50246:function(e,t,n){var r=n(48276),o=n(60655);e.exports=function(e){return"symbol"==typeof e||o(e)&&"[object Symbol]"==r(e)}},97095:function(e,t,n){var r=n(59972),o=n(86080),i=n(78084),a=i&&i.isTypedArray,l=a?o(a):r;e.exports=l},28287:function(e,t,n){var r=n(86164),o=n(60922),i=n(96717);e.exports=function(e){return i(e)?r(e):o(e)}},76183:function(e,t,n){var r=n(86164),o=n(52449),i=n(96717);e.exports=function(e){return i(e)?r(e,!0):o(e)}},77636:function(e,t,n){var r=n(52908),o=n(23393),i=n(22525),a=n(3642);e.exports=function(e,t){return(a(e)?r:i)(e,o(t,3))}},85679:function(e,t,n){var r=n(97794);function o(e,t){if("function"!=typeof e||null!=t&&"function"!=typeof t)throw TypeError("Expected a function");var n=function(){var r=arguments,o=t?t.apply(this,r):r[0],i=n.cache;if(i.has(o))return i.get(o);var a=e.apply(this,r);return n.cache=i.set(o,a)||i,a};return n.cache=new(o.Cache||r),n}o.Cache=r,e.exports=o},78626:function(e,t,n){var r=n(31661),o=n(30452),i=n(78128),a=n(87912);e.exports=function(e){return i(e)?r(a(e)):o(e)}},6403:function(e){e.exports=function(){return[]}},74367:function(e){e.exports=function(){return!1}},51299:function(e,t,n){var r=n(84778);e.exports=function(e){return null==e?"":r(e)}},61750:function(e,t,n){"use strict";n.d(t,{Z:function(){return a}});var r=n(86006),o={xmlns:"http://www.w3.org/2000/svg",width:24,height:24,viewBox:"0 0 24 24",fill:"none",stroke:"currentColor",strokeWidth:2,strokeLinecap:"round",strokeLinejoin:"round"};let i=e=>e.replace(/([a-z0-9])([A-Z])/g,"$1-$2").toLowerCase(),a=(e,t)=>{let n=(0,r.forwardRef)(({color:n="currentColor",size:a=24,strokeWidth:l=2,absoluteStrokeWidth:s,children:c,...u},f)=>(0,r.createElement)("svg",{ref:f,...o,width:a,height:a,stroke:n,strokeWidth:s?24*Number(l)/Number(a):l,className:`lucide lucide-${i(e)}`,...u},[...t.map(([e,t])=>(0,r.createElement)(e,t)),...(Array.isArray(c)?c:[c])||[]]));return n.displayName=`${e}`,n}},87594:function(e,t,n){"use strict";n.d(t,{Z:function(){return o}});var r=n(61750);let o=(0,r.Z)("Search",[["circle",{cx:"11",cy:"11",r:"8",key:"4ej97u"}],["path",{d:"m21 21-4.3-4.3",key:"1qie3q"}]])},18178:function(e,t,n){"use strict";n.d(t,{Z:function(){return o}});var r=n(61750);let o=(0,r.Z)("X",[["path",{d:"M18 6 6 18",key:"1bl5f8"}],["path",{d:"m6 6 12 12",key:"d8bk6v"}]])},28352:function(e){var t="undefined"!=typeof window?window:self;e.exports=t.crypto||t.msCrypto},89586:function(e,t,n){e.exports=function(e){if(!e)return Math.random;var t=new Uint32Array(1);return function(){return e.getRandomValues(t)[0]/4294967296}}(n(28352))},27410:function(e){function t(e,t,n,r){return Math.round(e/n)+" "+r+(t>=1.5*n?"s":"")}e.exports=function(e,n){n=n||{};var r,o,i=typeof e;if("string"===i&&e.length>0)return function(e){if(!((e=String(e)).length>100)){var t=/^(-?(?:\d+)?\.?\d+) *(milliseconds?|msecs?|ms|seconds?|secs?|s|minutes?|mins?|m|hours?|hrs?|h|days?|d|weeks?|w|years?|yrs?|y)?$/i.exec(e);if(t){var n=parseFloat(t[1]);switch((t[2]||"ms").toLowerCase()){case"years":case"year":case"yrs":case"yr":case"y":return 315576e5*n;case"weeks":case"week":case"w":return 6048e5*n;case"days":case"day":case"d":return 864e5*n;case"hours":case"hour":case"hrs":case"hr":case"h":return 36e5*n;case"minutes":case"minute":case"mins":case"min":case"m":return 6e4*n;case"seconds":case"second":case"secs":case"sec":case"s":return 1e3*n;case"milliseconds":case"millisecond":case"msecs":case"msec":case"ms":return n;default:return}}}}(e);if("number"===i&&isFinite(e))return n.long?(r=Math.abs(e))>=864e5?t(e,r,864e5,"day"):r>=36e5?t(e,r,36e5,"hour"):r>=6e4?t(e,r,6e4,"minute"):r>=1e3?t(e,r,1e3,"second"):e+" ms":(o=Math.abs(e))>=864e5?Math.round(e/864e5)+"d":o>=36e5?Math.round(e/36e5)+"h":o>=6e4?Math.round(e/6e4)+"m":o>=1e3?Math.round(e/1e3)+"s":e+"ms";throw Error("val is not a non-empty string or a valid number. val="+JSON.stringify(e))}},52040:function(e,t,n){"use strict";var r,o;e.exports=(null==(r=n.g.process)?void 0:r.env)&&"object"==typeof(null==(o=n.g.process)?void 0:o.env)?n.g.process:n(66003)},73029:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"Image",{enumerable:!0,get:function(){return g}});let r=n(26927),o=n(25909),i=o._(n(86006)),a=r._(n(86174)),l=n(80529),s=n(17302),c=n(23442);n(46731);let u=r._(n(47235)),f={deviceSizes:[640,750,828,1080,1200,1920,2048,3840],imageSizes:[16,32,48,64,96,128,256,384],path:"/_next/image",loader:"default",dangerouslyAllowSVG:!1,unoptimized:!1};function d(e,t,n,r,o,i){let a=null==e?void 0:e.src;if(!e||e["data-loaded-src"]===a)return;e["data-loaded-src"]=a;let l="decode"in e?e.decode():Promise.resolve();l.catch(()=>{}).then(()=>{if(e.parentElement&&e.isConnected){if("blur"===t&&o(!0),null==n?void 0:n.current){let t=new Event("load");Object.defineProperty(t,"target",{writable:!1,value:e});let r=!1,o=!1;n.current({...t,nativeEvent:t,currentTarget:e,target:e,isDefaultPrevented:()=>r,isPropagationStopped:()=>o,persist:()=>{},preventDefault:()=>{r=!0,t.preventDefault()},stopPropagation:()=>{o=!0,t.stopPropagation()}})}(null==r?void 0:r.current)&&r.current(e)}})}function p(e){let[t,n]=i.version.split("."),r=parseInt(t,10),o=parseInt(n,10);return r>18||18===r&&o>=3?{fetchPriority:e}:{fetchpriority:e}}let h=(0,i.forwardRef)((e,t)=>{let{src:n,srcSet:r,sizes:o,height:a,width:l,decoding:s,className:c,style:u,fetchPriority:f,placeholder:h,loading:g,unoptimized:m,fill:b,onLoadRef:v,onLoadingCompleteRef:y,setBlurComplete:x,setShowAltText:w,onLoad:E,onError:S,...k}=e;return i.default.createElement("img",{...k,...p(f),loading:g,width:l,height:a,decoding:s,"data-nimg":b?"fill":"1",className:c,style:u,sizes:o,srcSet:r,src:n,ref:(0,i.useCallback)(e=>{t&&("function"==typeof t?t(e):"object"==typeof t&&(t.current=e)),e&&(S&&(e.src=e.src),e.complete&&d(e,h,v,y,x,m))},[n,h,v,y,x,S,m,t]),onLoad:e=>{let t=e.currentTarget;d(t,h,v,y,x,m)},onError:e=>{w(!0),"blur"===h&&x(!0),S&&S(e)}})}),g=(0,i.forwardRef)((e,t)=>{let n=(0,i.useContext)(c.ImageConfigContext),r=(0,i.useMemo)(()=>{let e=f||n||s.imageConfigDefault,t=[...e.deviceSizes,...e.imageSizes].sort((e,t)=>e-t),r=e.deviceSizes.sort((e,t)=>e-t);return{...e,allSizes:t,deviceSizes:r}},[n]),{onLoad:o,onLoadingComplete:d}=e,g=(0,i.useRef)(o);(0,i.useEffect)(()=>{g.current=o},[o]);let m=(0,i.useRef)(d);(0,i.useEffect)(()=>{m.current=d},[d]);let[b,v]=(0,i.useState)(!1),[y,x]=(0,i.useState)(!1),{props:w,meta:E}=(0,l.getImgProps)(e,{defaultLoader:u.default,imgConf:r,blurComplete:b,showAltText:y});return i.default.createElement(i.default.Fragment,null,i.default.createElement(h,{...w,unoptimized:E.unoptimized,placeholder:E.placeholder,fill:E.fill,onLoadRef:g,onLoadingCompleteRef:m,setBlurComplete:v,setShowAltText:x,ref:t}),E.priority?i.default.createElement(a.default,null,i.default.createElement("link",{key:"__nimg-"+w.src+w.srcSet+w.sizes,rel:"preload",as:"image",href:w.srcSet?void 0:w.src,imageSrcSet:w.srcSet,imageSizes:w.sizes,crossOrigin:w.crossOrigin,referrerPolicy:w.referrerPolicy,...p(w.fetchPriority)})):null)});("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},14620:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"AmpStateContext",{enumerable:!0,get:function(){return i}});let r=n(26927),o=r._(n(86006)),i=o.default.createContext({})},40353:function(e,t){"use strict";function n(e){let{ampFirst:t=!1,hybrid:n=!1,hasQuery:r=!1}=void 0===e?{}:e;return t||n&&r}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"isInAmpMode",{enumerable:!0,get:function(){return n}})},80529:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"getImgProps",{enumerable:!0,get:function(){return l}}),n(46731);let r=n(16542),o=n(17302);function i(e){return void 0!==e.default}function a(e){return void 0===e?e:"number"==typeof e?Number.isFinite(e)?e:NaN:"string"==typeof e&&/^[0-9]+$/.test(e)?parseInt(e,10):NaN}function l(e,t){var n;let l,s,c,{src:u,sizes:f,unoptimized:d=!1,priority:p=!1,loading:h,className:g,quality:m,width:b,height:v,fill:y=!1,style:x,onLoad:w,onLoadingComplete:E,placeholder:S="empty",blurDataURL:k,fetchPriority:_,layout:O,objectFit:C,objectPosition:A,lazyBoundary:N,lazyRoot:R,...T}=e,{imgConf:P,showAltText:M,blurComplete:j,defaultLoader:L}=t,I=P||o.imageConfigDefault;if("allSizes"in I)l=I;else{let e=[...I.deviceSizes,...I.imageSizes].sort((e,t)=>e-t),t=I.deviceSizes.sort((e,t)=>e-t);l={...I,allSizes:e,deviceSizes:t}}let D=T.loader||L;delete T.loader,delete T.srcSet;let F="__next_img_default"in D;if(F){if("custom"===l.loader)throw Error('Image with src "'+u+'" is missing "loader" prop.\nRead more: https://nextjs.org/docs/messages/next-image-missing-loader')}else{let e=D;D=t=>{let{config:n,...r}=t;return e(r)}}if(O){"fill"===O&&(y=!0);let e={intrinsic:{maxWidth:"100%",height:"auto"},responsive:{width:"100%",height:"auto"}}[O];e&&(x={...x,...e});let t={responsive:"100vw",fill:"100vw"}[O];t&&!f&&(f=t)}let B="",z=a(b),$=a(v);if("object"==typeof(n=u)&&(i(n)||void 0!==n.src)){let e=i(u)?u.default:u;if(!e.src)throw Error("An object should only be passed to the image component src parameter if it comes from a static image import. It must include src. Received "+JSON.stringify(e));if(!e.height||!e.width)throw Error("An object should only be passed to the image component src parameter if it comes from a static image import. It must include height and width. Received "+JSON.stringify(e));if(s=e.blurWidth,c=e.blurHeight,k=k||e.blurDataURL,B=e.src,!y){if(z||$){if(z&&!$){let t=z/e.width;$=Math.round(e.height*t)}else if(!z&&$){let t=$/e.height;z=Math.round(e.width*t)}}else z=e.width,$=e.height}}let U=!p&&("lazy"===h||void 0===h);(!(u="string"==typeof u?u:B)||u.startsWith("data:")||u.startsWith("blob:"))&&(d=!0,U=!1),l.unoptimized&&(d=!0),F&&u.endsWith(".svg")&&!l.dangerouslyAllowSVG&&(d=!0),p&&(_="high");let H=a(m),Z=Object.assign(y?{position:"absolute",height:"100%",width:"100%",left:0,top:0,right:0,bottom:0,objectFit:C,objectPosition:A}:{},M?{}:{color:"transparent"},x),q="blur"===S&&k&&!j?{backgroundSize:Z.objectFit||"cover",backgroundPosition:Z.objectPosition||"50% 50%",backgroundRepeat:"no-repeat",backgroundImage:'url("data:image/svg+xml;charset=utf-8,'+(0,r.getImageBlurSvg)({widthInt:z,heightInt:$,blurWidth:s,blurHeight:c,blurDataURL:k,objectFit:Z.objectFit})+'")'}:{},W=function(e){let{config:t,src:n,unoptimized:r,width:o,quality:i,sizes:a,loader:l}=e;if(r)return{src:n,srcSet:void 0,sizes:void 0};let{widths:s,kind:c}=function(e,t,n){let{deviceSizes:r,allSizes:o}=e;if(n){let e=/(^|\s)(1?\d?\d)vw/g,t=[];for(let r;r=e.exec(n);r)t.push(parseInt(r[2]));if(t.length){let e=.01*Math.min(...t);return{widths:o.filter(t=>t>=r[0]*e),kind:"w"}}return{widths:o,kind:"w"}}if("number"!=typeof t)return{widths:r,kind:"w"};let i=[...new Set([t,2*t].map(e=>o.find(t=>t>=e)||o[o.length-1]))];return{widths:i,kind:"x"}}(t,o,a),u=s.length-1;return{sizes:a||"w"!==c?a:"100vw",srcSet:s.map((e,r)=>l({config:t,src:n,quality:i,width:e})+" "+("w"===c?e:r+1)+c).join(", "),src:l({config:t,src:n,quality:i,width:s[u]})}}({config:l,src:u,unoptimized:d,width:z,quality:H,sizes:f,loader:D}),V={...T,loading:U?"lazy":h,fetchPriority:_,width:z,height:$,decoding:"async",className:g,style:{...Z,...q},sizes:W.sizes,srcSet:W.srcSet,src:W.src},G={unoptimized:d,priority:p,placeholder:S,fill:y};return{props:V,meta:G}}},86174:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{defaultHead:function(){return u},default:function(){return h}});let r=n(26927),o=n(25909),i=o._(n(86006)),a=r._(n(20255)),l=n(14620),s=n(27268),c=n(40353);function u(e){void 0===e&&(e=!1);let t=[i.default.createElement("meta",{charSet:"utf-8"})];return e||t.push(i.default.createElement("meta",{name:"viewport",content:"width=device-width"})),t}function f(e,t){return"string"==typeof t||"number"==typeof t?e:t.type===i.default.Fragment?e.concat(i.default.Children.toArray(t.props.children).reduce((e,t)=>"string"==typeof t||"number"==typeof t?e:e.concat(t),[])):e.concat(t)}n(46731);let d=["name","httpEquiv","charSet","itemProp"];function p(e,t){let{inAmpMode:n}=t;return e.reduce(f,[]).reverse().concat(u(n).reverse()).filter(function(){let e=new Set,t=new Set,n=new Set,r={};return o=>{let i=!0,a=!1;if(o.key&&"number"!=typeof o.key&&o.key.indexOf("$")>0){a=!0;let t=o.key.slice(o.key.indexOf("$")+1);e.has(t)?i=!1:e.add(t)}switch(o.type){case"title":case"base":t.has(o.type)?i=!1:t.add(o.type);break;case"meta":for(let e=0,t=d.length;e{let r=e.key||t;if(!n&&"link"===e.type&&e.props.href&&["https://fonts.googleapis.com/css","https://use.typekit.net/"].some(t=>e.props.href.startsWith(t))){let t={...e.props||{}};return t["data-href"]=t.href,t.href=void 0,t["data-optimized-fonts"]=!0,i.default.cloneElement(e,t)}return i.default.cloneElement(e,{key:r})})}let h=function(e){let{children:t}=e,n=(0,i.useContext)(l.AmpStateContext),r=(0,i.useContext)(s.HeadManagerContext);return i.default.createElement(a.default,{reduceComponentsToState:p,headManager:r,inAmpMode:(0,c.isInAmpMode)(n)},t)};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},16542:function(e,t){"use strict";function n(e){let{widthInt:t,heightInt:n,blurWidth:r,blurHeight:o,blurDataURL:i,objectFit:a}=e,l=r||t,s=o||n,c=i.startsWith("data:image/jpeg")?"%3CfeComponentTransfer%3E%3CfeFuncA type='discrete' tableValues='1 1'/%3E%3C/feComponentTransfer%3E%":"";return l&&s?"%3Csvg xmlns='http%3A//www.w3.org/2000/svg' viewBox='0 0 "+l+" "+s+"'%3E%3Cfilter id='b' color-interpolation-filters='sRGB'%3E%3CfeGaussianBlur stdDeviation='"+(r&&o?"1":"20")+"'/%3E"+c+"%3C/filter%3E%3Cimage preserveAspectRatio='none' filter='url(%23b)' x='0' y='0' height='100%25' width='100%25' href='"+i+"'/%3E%3C/svg%3E":"%3Csvg xmlns='http%3A//www.w3.org/2000/svg'%3E%3Cimage style='filter:blur(20px)' preserveAspectRatio='"+("contain"===a?"xMidYMid":"cover"===a?"xMidYMid slice":"none")+"' x='0' y='0' height='100%25' width='100%25' href='"+i+"'/%3E%3C/svg%3E"}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"getImageBlurSvg",{enumerable:!0,get:function(){return n}})},23442:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"ImageConfigContext",{enumerable:!0,get:function(){return a}});let r=n(26927),o=r._(n(86006)),i=n(17302),a=o.default.createContext(i.imageConfigDefault)},17302:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{VALID_LOADERS:function(){return n},imageConfigDefault:function(){return r}});let n=["default","imgix","cloudinary","akamai","custom"],r={deviceSizes:[640,750,828,1080,1200,1920,2048,3840],imageSizes:[16,32,48,64,96,128,256,384],path:"/_next/image",loader:"default",loaderFile:"",domains:[],disableStaticImages:!1,minimumCacheTTL:60,formats:["image/webp"],dangerouslyAllowSVG:!1,contentSecurityPolicy:"script-src 'none'; frame-src 'none'; sandbox;",contentDispositionType:"inline",remotePatterns:[],unoptimized:!1}},45445:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{default:function(){return c},unstable_getImgProps:function(){return s}});let r=n(26927),o=n(80529),i=n(46731),a=n(73029),l=r._(n(47235)),s=e=>{(0,i.warnOnce)("Warning: unstable_getImgProps() is experimental and may change or be removed at any time. Use at your own risk.");let{props:t}=(0,o.getImgProps)(e,{defaultLoader:l.default,imgConf:{deviceSizes:[640,750,828,1080,1200,1920,2048,3840],imageSizes:[16,32,48,64,96,128,256,384],path:"/_next/image",loader:"default",dangerouslyAllowSVG:!1,unoptimized:!1}});for(let[e,n]of Object.entries(t))void 0===n&&delete t[e];return{props:t}},c=a.Image},47235:function(e,t){"use strict";function n(e){let{config:t,src:n,width:r,quality:o}=e;return t.path+"?url="+encodeURIComponent(n)+"&w="+r+"&q="+(o||75)}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return r}}),n.__next_img_default=!0;let r=n},20255:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return l}});let r=n(25909),o=r._(n(86006)),i=o.useLayoutEffect,a=o.useEffect;function l(e){let{headManager:t,reduceComponentsToState:n}=e;function r(){if(t&&t.mountedInstances){let r=o.Children.toArray(Array.from(t.mountedInstances).filter(Boolean));t.updateHead(n(r,e))}}return i(()=>{var n;return null==t||null==(n=t.mountedInstances)||n.add(e.children),()=>{var n;null==t||null==(n=t.mountedInstances)||n.delete(e.children)}}),i(()=>(t&&(t._pendingUpdate=r),()=>{t&&(t._pendingUpdate=r)})),a(()=>(t&&t._pendingUpdate&&(t._pendingUpdate(),t._pendingUpdate=null),()=>{t&&t._pendingUpdate&&(t._pendingUpdate(),t._pendingUpdate=null)})),null}},46731:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"warnOnce",{enumerable:!0,get:function(){return n}});let n=e=>{}},81973:function(){},7913:function(e,t,n){var r=n(52040),o=n(91083).Buffer;!function(){var t={992:function(e){e.exports=function(e,n,r){if(e.filter)return e.filter(n,r);if(null==e||"function"!=typeof n)throw TypeError();for(var o=[],i=0;i1?n-1:0),o=1;o1?n-1:0),o=1;o1?n-1:0),o=1;o1?n-1:0),o=1;oe.length)&&(n=e.length),e.substring(n-t.length,n)===t}var g="",m="",b="",v="",y={deepStrictEqual:"Expected values to be strictly deep-equal:",strictEqual:"Expected values to be strictly equal:",strictEqualObject:'Expected "actual" to be reference-equal to "expected":',deepEqual:"Expected values to be loosely deep-equal:",equal:"Expected values to be loosely equal:",notDeepStrictEqual:'Expected "actual" not to be strictly deep-equal to:',notStrictEqual:'Expected "actual" to be strictly unequal to:',notStrictEqualObject:'Expected "actual" not to be reference-equal to "expected":',notDeepEqual:'Expected "actual" not to be loosely deep-equal to:',notEqual:'Expected "actual" to be loosely unequal to:',notIdentical:"Values identical but not reference-equal:"};function x(e){var t=Object.keys(e),n=Object.create(Object.getPrototypeOf(e));return t.forEach(function(t){n[t]=e[t]}),Object.defineProperty(n,"message",{value:e.message}),n}function w(e){return d(e,{compact:!1,customInspect:!1,depth:1e3,maxArrayLength:1/0,showHidden:!1,breakLength:1/0,showProxy:!1,sorted:!0,getters:!0})}var E=function(e){var t,n;function l(e){if(!function(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}(this,l),"object"!==f(e)||null===e)throw new p("options","Object",e);var t,n=e.message,o=e.operator,s=e.stackStartFn,c=e.actual,d=e.expected,E=Error.stackTraceLimit;if(Error.stackTraceLimit=0,null!=n)t=i(this,u(l).call(this,String(n)));else if(r.stderr&&r.stderr.isTTY&&(r.stderr&&r.stderr.getColorDepth&&1!==r.stderr.getColorDepth()?(g="\x1b[34m",m="\x1b[32m",v="\x1b[39m",b="\x1b[31m"):(g="",m="",v="",b="")),"object"===f(c)&&null!==c&&"object"===f(d)&&null!==d&&"stack"in c&&c instanceof Error&&"stack"in d&&d instanceof Error&&(c=x(c),d=x(d)),"deepStrictEqual"===o||"strictEqual"===o)t=i(this,u(l).call(this,function(e,t,n){var o="",i="",a=0,l="",s=!1,c=w(e),u=c.split("\n"),d=w(t).split("\n"),p=0,x="";if("strictEqual"===n&&"object"===f(e)&&"object"===f(t)&&null!==e&&null!==t&&(n="strictEqualObject"),1===u.length&&1===d.length&&u[0]!==d[0]){var E=u[0].length+d[0].length;if(E<=10){if(("object"!==f(e)||null===e)&&("object"!==f(t)||null===t)&&(0!==e||0!==t))return"".concat(y[n],"\n\n")+"".concat(u[0]," !== ").concat(d[0],"\n")}else if("strictEqualObject"!==n&&E<(r.stderr&&r.stderr.isTTY?r.stderr.columns:80)){for(;u[0][p]===d[0][p];)p++;p>2&&(x="\n ".concat(function(e,t){if(t=Math.floor(t),0==e.length||0==t)return"";var n=e.length*t;for(t=Math.floor(Math.log(t)/Math.log(2));t;)e+=e,t--;return e+e.substring(0,n-e.length)}(" ",p),"^"),p=0)}}for(var S=u[u.length-1],k=d[d.length-1];S===k&&(p++<2?l="\n ".concat(S).concat(l):o=S,u.pop(),d.pop(),0!==u.length&&0!==d.length);)S=u[u.length-1],k=d[d.length-1];var _=Math.max(u.length,d.length);if(0===_){var O=c.split("\n");if(O.length>30)for(O[26]="".concat(g,"...").concat(v);O.length>27;)O.pop();return"".concat(y.notIdentical,"\n\n").concat(O.join("\n"),"\n")}p>3&&(l="\n".concat(g,"...").concat(v).concat(l),s=!0),""!==o&&(l="\n ".concat(o).concat(l),o="");var C=0,A=y[n]+"\n".concat(m,"+ actual").concat(v," ").concat(b,"- expected").concat(v),N=" ".concat(g,"...").concat(v," Lines skipped");for(p=0;p<_;p++){var R=p-a;if(u.length1&&p>2&&(R>4?(i+="\n".concat(g,"...").concat(v),s=!0):R>3&&(i+="\n ".concat(d[p-2]),C++),i+="\n ".concat(d[p-1]),C++),a=p,o+="\n".concat(b,"-").concat(v," ").concat(d[p]),C++;else if(d.length1&&p>2&&(R>4?(i+="\n".concat(g,"...").concat(v),s=!0):R>3&&(i+="\n ".concat(u[p-2]),C++),i+="\n ".concat(u[p-1]),C++),a=p,i+="\n".concat(m,"+").concat(v," ").concat(u[p]),C++;else{var T=d[p],P=u[p],M=P!==T&&(!h(P,",")||P.slice(0,-1)!==T);M&&h(T,",")&&T.slice(0,-1)===P&&(M=!1,P+=","),M?(R>1&&p>2&&(R>4?(i+="\n".concat(g,"...").concat(v),s=!0):R>3&&(i+="\n ".concat(u[p-2]),C++),i+="\n ".concat(u[p-1]),C++),a=p,i+="\n".concat(m,"+").concat(v," ").concat(P),o+="\n".concat(b,"-").concat(v," ").concat(T),C+=2):(i+=o,o="",(1===R||0===p)&&(i+="\n ".concat(P),C++))}if(C>20&&p<_-2)return"".concat(A).concat(N,"\n").concat(i,"\n").concat(g,"...").concat(v).concat(o,"\n")+"".concat(g,"...").concat(v)}return"".concat(A).concat(s?N:"","\n").concat(i).concat(o).concat(l).concat(x)}(c,d,o)));else if("notDeepStrictEqual"===o||"notStrictEqual"===o){var S=y[o],k=w(c).split("\n");if("notStrictEqual"===o&&"object"===f(c)&&null!==c&&(S=y.notStrictEqualObject),k.length>30)for(k[26]="".concat(g,"...").concat(v);k.length>27;)k.pop();t=1===k.length?i(this,u(l).call(this,"".concat(S," ").concat(k[0]))):i(this,u(l).call(this,"".concat(S,"\n\n").concat(k.join("\n"),"\n")))}else{var _=w(c),O="",C=y[o];"notDeepEqual"===o||"notEqual"===o?(_="".concat(y[o],"\n\n").concat(_)).length>1024&&(_="".concat(_.slice(0,1021),"...")):(O="".concat(w(d)),_.length>512&&(_="".concat(_.slice(0,509),"...")),O.length>512&&(O="".concat(O.slice(0,509),"...")),"deepEqual"===o||"equal"===o?_="".concat(C,"\n\n").concat(_,"\n\nshould equal\n\n"):O=" ".concat(o," ").concat(O)),t=i(this,u(l).call(this,"".concat(_).concat(O)))}return Error.stackTraceLimit=E,t.generatedMessage=!n,Object.defineProperty(a(t),"name",{value:"AssertionError [ERR_ASSERTION]",enumerable:!1,writable:!0,configurable:!0}),t.code="ERR_ASSERTION",t.actual=c,t.expected=d,t.operator=o,Error.captureStackTrace&&Error.captureStackTrace(a(t),s),t.stack,t.name="AssertionError",i(t)}return!function(e,t){if("function"!=typeof t&&null!==t)throw TypeError("Super expression must either be null or a function");e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,writable:!0,configurable:!0}}),t&&c(e,t)}(l,e),t=[{key:"toString",value:function(){return"".concat(this.name," [").concat(this.code,"]: ").concat(this.message)}},{key:d.custom,value:function(e,t){return d(this,function(e){for(var t=1;t2)?"one of ".concat(t," ").concat(e.slice(0,n-1).join(", "),", or ")+e[n-1]:2===n?"one of ".concat(t," ").concat(e[0]," or ").concat(e[1]):"of ".concat(t," ").concat(e[0])}c("ERR_AMBIGUOUS_ARGUMENT",'The "%s" argument is ambiguous. %s',TypeError),c("ERR_INVALID_ARG_TYPE",function(e,t,o){if((void 0===a&&(a=n(167)),a("string"==typeof e,"'name' must be a string"),"string"==typeof t&&(i="not ",t.substr(!l||l<0?0:+l,i.length)===i))?(d="must not be",t=t.replace(/^not /,"")):d="must be",s=" argument",(void 0===c||c>e.length)&&(c=e.length),e.substring(c-s.length,c)===s)p="The ".concat(e," ").concat(d," ").concat(u(t,"type"));else{var i,l,s,c,f,d,p,h=("number"!=typeof f&&(f=0),f+1>e.length||-1===e.indexOf(".",f))?"argument":"property";p='The "'.concat(e,'" ').concat(h," ").concat(d," ").concat(u(t,"type"))}return p+". Received type ".concat(r(o))},TypeError),c("ERR_INVALID_ARG_VALUE",function(e,t){var r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"is invalid";void 0===l&&(l=n(177));var o=l.inspect(t);return o.length>128&&(o="".concat(o.slice(0,128),"...")),"The argument '".concat(e,"' ").concat(r,". Received ").concat(o)},TypeError,RangeError),c("ERR_INVALID_RETURN_VALUE",function(e,t,n){var o;return o=n&&n.constructor&&n.constructor.name?"instance of ".concat(n.constructor.name):"type ".concat(r(n)),"Expected ".concat(e,' to be returned from the "').concat(t,'"')+" function but got ".concat(o,".")},TypeError),c("ERR_MISSING_ARGS",function(){for(var e=arguments.length,t=Array(e),r=0;r0,"At least one arg needs to be specified");var o="The ",i=t.length;switch(t=t.map(function(e){return'"'.concat(e,'"')}),i){case 1:o+="".concat(t[0]," argument");break;case 2:o+="".concat(t[0]," and ").concat(t[1]," arguments");break;default:o+=t.slice(0,i-1).join(", ")+", and ".concat(t[i-1]," arguments")}return"".concat(o," must be specified")},TypeError),e.exports.codes=s},176:function(e,t,n){"use strict";function r(e,t){return function(e){if(Array.isArray(e))return e}(e)||function(e,t){var n=[],r=!0,o=!1,i=void 0;try{for(var a,l=e[Symbol.iterator]();!(r=(a=l.next()).done)&&(n.push(a.value),!t||n.length!==t);r=!0);}catch(e){o=!0,i=e}finally{try{r||null==l.return||l.return()}finally{if(o)throw i}}return n}(e,t)||function(){throw TypeError("Invalid attempt to destructure non-iterable instance")}()}function o(e){return(o="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}var i=void 0!==/a/g.flags,a=function(e){var t=[];return e.forEach(function(e){return t.push(e)}),t},l=function(e){var t=[];return e.forEach(function(e,n){return t.push([n,e])}),t},s=Object.is?Object.is:n(208),c=Object.getOwnPropertySymbols?Object.getOwnPropertySymbols:function(){return[]},u=Number.isNaN?Number.isNaN:n(718);function f(e){return e.call.bind(e)}var d=f(Object.prototype.hasOwnProperty),p=f(Object.prototype.propertyIsEnumerable),h=f(Object.prototype.toString),g=n(177).types,m=g.isAnyArrayBuffer,b=g.isArrayBufferView,v=g.isDate,y=g.isMap,x=g.isRegExp,w=g.isSet,E=g.isNativeError,S=g.isBoxedPrimitive,k=g.isNumberObject,_=g.isStringObject,O=g.isBooleanObject,C=g.isBigIntObject,A=g.isSymbolObject,N=g.isFloat32Array,R=g.isFloat64Array;function T(e){if(0===e.length||e.length>10)return!0;for(var t=0;t57)return!0}return 10===e.length&&e>=4294967296}function P(e){return Object.keys(e).filter(T).concat(c(e).filter(Object.prototype.propertyIsEnumerable.bind(e)))}/*! - * The buffer module from node.js, for the browser. - * - * @author Feross Aboukhadijeh - * @license MIT - */function M(e,t){if(e===t)return 0;for(var n=e.length,r=t.length,o=0,i=Math.min(n,r);o-1?o(n):n}},139:function(e,t,n){"use strict";var r=n(174),o=n(500),i=o("%Function.prototype.apply%"),a=o("%Function.prototype.call%"),l=o("%Reflect.apply%",!0)||r.call(a,i),s=o("%Object.getOwnPropertyDescriptor%",!0),c=o("%Object.defineProperty%",!0),u=o("%Math.max%");if(c)try{c({},"a",{value:1})}catch(e){c=null}e.exports=function(e){var t=l(r,a,arguments);return s&&c&&s(t,"length").configurable&&c(t,"length",{value:1+u(0,e.length-(arguments.length-1))}),t};var f=function(){return l(r,i,arguments)};c?c(e.exports,"apply",{value:f}):e.exports.apply=f},69:function(e,t,n){"use strict";var r=n(935),o="function"==typeof Symbol&&"symbol"==typeof Symbol("foo"),i=Object.prototype.toString,a=Array.prototype.concat,l=Object.defineProperty,s=l&&function(){var e={};try{for(var t in l(e,"x",{enumerable:!1,value:e}),e)return!1;return e.x===e}catch(e){return!1}}(),c=function(e,t,n,r){(!(t in e)||"function"==typeof r&&"[object Function]"===i.call(r)&&r())&&(s?l(e,t,{configurable:!0,enumerable:!1,value:n,writable:!0}):e[t]=n)},u=function(e,t){var n=arguments.length>2?arguments[2]:{},i=r(t);o&&(i=a.call(i,Object.getOwnPropertySymbols(t)));for(var l=0;l1&&"boolean"!=typeof t)throw new a('"allowMissing" argument must be a boolean');if(null===k(/^%?[^%]*%?$/g,e))throw new o("`%` may not be present anywhere but at the beginning and end of the intrinsic name");var n=C(e),r=n.length>0?n[0]:"",i=A("%"+r+"%",t),l=i.name,c=i.value,u=!1,f=i.alias;f&&(r=f[0],w(n,x([0,1],f)));for(var d=1,p=!0;d=n.length){var v=s(c,h);c=(p=!!v)&&"get"in v&&!("originalValue"in v.get)?v.get:c[h]}else p=y(c,h),c=c[h];p&&!u&&(g[l]=c)}}return c}},942:function(e,t,n){"use strict";var r="undefined"!=typeof Symbol&&Symbol,o=n(773);e.exports=function(){return"function"==typeof r&&"function"==typeof Symbol&&"symbol"==typeof r("foo")&&"symbol"==typeof Symbol("bar")&&o()}},773:function(e){"use strict";e.exports=function(){if("function"!=typeof Symbol||"function"!=typeof Object.getOwnPropertySymbols)return!1;if("symbol"==typeof Symbol.iterator)return!0;var e={},t=Symbol("test"),n=Object(t);if("string"==typeof t||"[object Symbol]"!==Object.prototype.toString.call(t)||"[object Symbol]"!==Object.prototype.toString.call(n))return!1;for(t in e[t]=42,e)return!1;if("function"==typeof Object.keys&&0!==Object.keys(e).length||"function"==typeof Object.getOwnPropertyNames&&0!==Object.getOwnPropertyNames(e).length)return!1;var r=Object.getOwnPropertySymbols(e);if(1!==r.length||r[0]!==t||!Object.prototype.propertyIsEnumerable.call(e,t))return!1;if("function"==typeof Object.getOwnPropertyDescriptor){var o=Object.getOwnPropertyDescriptor(e,t);if(42!==o.value||!0!==o.enumerable)return!1}return!0}},115:function(e,t,n){"use strict";var r="undefined"!=typeof Symbol&&Symbol,o=n(832);e.exports=function(){return"function"==typeof r&&"function"==typeof Symbol&&"symbol"==typeof r("foo")&&"symbol"==typeof Symbol("bar")&&o()}},832:function(e){"use strict";e.exports=function(){if("function"!=typeof Symbol||"function"!=typeof Object.getOwnPropertySymbols)return!1;if("symbol"==typeof Symbol.iterator)return!0;var e={},t=Symbol("test"),n=Object(t);if("string"==typeof t||"[object Symbol]"!==Object.prototype.toString.call(t)||"[object Symbol]"!==Object.prototype.toString.call(n))return!1;for(t in e[t]=42,e)return!1;if("function"==typeof Object.keys&&0!==Object.keys(e).length||"function"==typeof Object.getOwnPropertyNames&&0!==Object.getOwnPropertyNames(e).length)return!1;var r=Object.getOwnPropertySymbols(e);if(1!==r.length||r[0]!==t||!Object.prototype.propertyIsEnumerable.call(e,t))return!1;if("function"==typeof Object.getOwnPropertyDescriptor){var o=Object.getOwnPropertyDescriptor(e,t);if(42!==o.value||!0!==o.enumerable)return!1}return!0}},101:function(e,t,n){"use strict";var r=n(174);e.exports=r.call(Function.call,Object.prototype.hasOwnProperty)},782:function(e){"function"==typeof Object.create?e.exports=function(e,t){t&&(e.super_=t,e.prototype=Object.create(t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}))}:e.exports=function(e,t){if(t){e.super_=t;var n=function(){};n.prototype=t.prototype,e.prototype=new n,e.prototype.constructor=e}}},157:function(e){"use strict";var t="function"==typeof Symbol&&"symbol"==typeof Symbol.toStringTag,n=Object.prototype.toString,r=function(e){return(!t||!e||"object"!=typeof e||!(Symbol.toStringTag in e))&&"[object Arguments]"===n.call(e)},o=function(e){return!!r(e)||null!==e&&"object"==typeof e&&"number"==typeof e.length&&e.length>=0&&"[object Array]"!==n.call(e)&&"[object Function]"===n.call(e.callee)},i=function(){return r(arguments)}();r.isLegacyArguments=o,e.exports=i?r:o},391:function(e){"use strict";var t=Object.prototype.toString,n=Function.prototype.toString,r=/^\s*(?:function)?\*/,o="function"==typeof Symbol&&"symbol"==typeof Symbol.toStringTag,i=Object.getPrototypeOf,a=function(){if(!o)return!1;try{return Function("return function*() {}")()}catch(e){}}(),l=a?i(a):{};e.exports=function(e){return"function"==typeof e&&(!!r.test(n.call(e))||(o?i(e)===l:"[object GeneratorFunction]"===t.call(e)))}},460:function(e){"use strict";e.exports=function(e){return e!=e}},718:function(e,t,n){"use strict";var r=n(139),o=n(69),i=n(460),a=n(625),l=n(171),s=r(a(),Number);o(s,{getPolyfill:a,implementation:i,shim:l}),e.exports=s},625:function(e,t,n){"use strict";var r=n(460);e.exports=function(){return Number.isNaN&&Number.isNaN(NaN)&&!Number.isNaN("a")?Number.isNaN:r}},171:function(e,t,n){"use strict";var r=n(69),o=n(625);e.exports=function(){var e=o();return r(Number,{isNaN:e},{isNaN:function(){return Number.isNaN!==e}}),e}},994:function(e,t,r){"use strict";var o=r(144),i=r(349),a=r(256),l=a("Object.prototype.toString"),s=r(942)()&&"symbol"==typeof Symbol.toStringTag,c=i(),u=a("Array.prototype.indexOf",!0)||function(e,t){for(var n=0;n-1)}},208:function(e){"use strict";var t=function(e){return e!=e};e.exports=function(e,n){return 0===e&&0===n?1/e==1/n:!!(e===n||t(e)&&t(n))}},579:function(e,t,n){"use strict";var r;if(!Object.keys){var o=Object.prototype.hasOwnProperty,i=Object.prototype.toString,a=n(412),l=Object.prototype.propertyIsEnumerable,s=!l.call({toString:null},"toString"),c=l.call(function(){},"prototype"),u=["toString","toLocaleString","valueOf","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","constructor"],f=function(e){var t=e.constructor;return t&&t.prototype===e},d={$applicationCache:!0,$console:!0,$external:!0,$frame:!0,$frameElement:!0,$frames:!0,$innerHeight:!0,$innerWidth:!0,$onmozfullscreenchange:!0,$onmozfullscreenerror:!0,$outerHeight:!0,$outerWidth:!0,$pageXOffset:!0,$pageYOffset:!0,$parent:!0,$scrollLeft:!0,$scrollTop:!0,$scrollX:!0,$scrollY:!0,$self:!0,$webkitIndexedDB:!0,$webkitStorageInfo:!0,$window:!0},p=function(){if("undefined"==typeof window)return!1;for(var e in window)try{if(!d["$"+e]&&o.call(window,e)&&null!==window[e]&&"object"==typeof window[e])try{f(window[e])}catch(e){return!0}}catch(e){return!0}return!1}(),h=function(e){if("undefined"==typeof window||!p)return f(e);try{return f(e)}catch(e){return!1}};r=function(e){var t=null!==e&&"object"==typeof e,n="[object Function]"===i.call(e),r=a(e),l=t&&"[object String]"===i.call(e),f=[];if(!t&&!n&&!r)throw TypeError("Object.keys called on a non-object");var d=c&&n;if(l&&e.length>0&&!o.call(e,0))for(var p=0;p0)for(var g=0;g=0&&"[object Function]"===t.call(e.callee)),r}},369:function(e){e.exports=function(e){return e instanceof o}},584:function(e,t,n){"use strict";var r=n(157),o=n(391),i=n(490),a=n(994);function l(e){return e.call.bind(e)}var s="undefined"!=typeof BigInt,c="undefined"!=typeof Symbol,u=l(Object.prototype.toString),f=l(Number.prototype.valueOf),d=l(String.prototype.valueOf),p=l(Boolean.prototype.valueOf);if(s)var h=l(BigInt.prototype.valueOf);if(c)var g=l(Symbol.prototype.valueOf);function m(e,t){if("object"!=typeof e)return!1;try{return t(e),!0}catch(e){return!1}}function b(e){return"[object Map]"===u(e)}function v(e){return"[object Set]"===u(e)}function y(e){return"[object WeakMap]"===u(e)}function x(e){return"[object WeakSet]"===u(e)}function w(e){return"[object ArrayBuffer]"===u(e)}function E(e){return"undefined"!=typeof ArrayBuffer&&(w.working?w(e):e instanceof ArrayBuffer)}function S(e){return"[object DataView]"===u(e)}function k(e){return"undefined"!=typeof DataView&&(S.working?S(e):e instanceof DataView)}t.isArgumentsObject=r,t.isGeneratorFunction=o,t.isTypedArray=a,t.isPromise=function(e){return"undefined"!=typeof Promise&&e instanceof Promise||null!==e&&"object"==typeof e&&"function"==typeof e.then&&"function"==typeof e.catch},t.isArrayBufferView=function(e){return"undefined"!=typeof ArrayBuffer&&ArrayBuffer.isView?ArrayBuffer.isView(e):a(e)||k(e)},t.isUint8Array=function(e){return"Uint8Array"===i(e)},t.isUint8ClampedArray=function(e){return"Uint8ClampedArray"===i(e)},t.isUint16Array=function(e){return"Uint16Array"===i(e)},t.isUint32Array=function(e){return"Uint32Array"===i(e)},t.isInt8Array=function(e){return"Int8Array"===i(e)},t.isInt16Array=function(e){return"Int16Array"===i(e)},t.isInt32Array=function(e){return"Int32Array"===i(e)},t.isFloat32Array=function(e){return"Float32Array"===i(e)},t.isFloat64Array=function(e){return"Float64Array"===i(e)},t.isBigInt64Array=function(e){return"BigInt64Array"===i(e)},t.isBigUint64Array=function(e){return"BigUint64Array"===i(e)},b.working="undefined"!=typeof Map&&b(new Map),t.isMap=function(e){return"undefined"!=typeof Map&&(b.working?b(e):e instanceof Map)},v.working="undefined"!=typeof Set&&v(new Set),t.isSet=function(e){return"undefined"!=typeof Set&&(v.working?v(e):e instanceof Set)},y.working="undefined"!=typeof WeakMap&&y(new WeakMap),t.isWeakMap=function(e){return"undefined"!=typeof WeakMap&&(y.working?y(e):e instanceof WeakMap)},x.working="undefined"!=typeof WeakSet&&x(new WeakSet),t.isWeakSet=function(e){return x(e)},w.working="undefined"!=typeof ArrayBuffer&&w(new ArrayBuffer),t.isArrayBuffer=E,S.working="undefined"!=typeof ArrayBuffer&&"undefined"!=typeof DataView&&S(new DataView(new ArrayBuffer(1),0,1)),t.isDataView=k;var _="undefined"!=typeof SharedArrayBuffer?SharedArrayBuffer:void 0;function O(e){return"[object SharedArrayBuffer]"===u(e)}function C(e){return void 0!==_&&(void 0===O.working&&(O.working=O(new _)),O.working?O(e):e instanceof _)}function A(e){return m(e,f)}function N(e){return m(e,d)}function R(e){return m(e,p)}function T(e){return s&&m(e,h)}function P(e){return c&&m(e,g)}t.isSharedArrayBuffer=C,t.isAsyncFunction=function(e){return"[object AsyncFunction]"===u(e)},t.isMapIterator=function(e){return"[object Map Iterator]"===u(e)},t.isSetIterator=function(e){return"[object Set Iterator]"===u(e)},t.isGeneratorObject=function(e){return"[object Generator]"===u(e)},t.isWebAssemblyCompiledModule=function(e){return"[object WebAssembly.Module]"===u(e)},t.isNumberObject=A,t.isStringObject=N,t.isBooleanObject=R,t.isBigIntObject=T,t.isSymbolObject=P,t.isBoxedPrimitive=function(e){return A(e)||N(e)||R(e)||T(e)||P(e)},t.isAnyArrayBuffer=function(e){return"undefined"!=typeof Uint8Array&&(E(e)||C(e))},["isProxy","isExternal","isModuleNamespaceObject"].forEach(function(e){Object.defineProperty(t,e,{enumerable:!1,value:function(){throw Error(e+" is not supported in userland")}})})},177:function(e,t,n){var o=Object.getOwnPropertyDescriptors||function(e){for(var t=Object.keys(e),n={},r=0;r=o)return e;switch(e){case"%s":return String(r[n++]);case"%d":return Number(r[n++]);case"%j":try{return JSON.stringify(r[n++])}catch(e){return"[Circular]"}default:return e}}),l=r[n];n=3&&(r.depth=arguments[2]),arguments.length>=4&&(r.colors=arguments[3]),m(n)?r.showHidden=n:n&&t._extend(r,n),x(r.showHidden)&&(r.showHidden=!1),x(r.depth)&&(r.depth=2),x(r.colors)&&(r.colors=!1),x(r.customInspect)&&(r.customInspect=!0),r.colors&&(r.stylize=u),d(r,e,r.depth)}function u(e,t){var n=c.styles[t];return n?"\x1b["+c.colors[n][0]+"m"+e+"\x1b["+c.colors[n][1]+"m":e}function f(e,t){return e}function d(e,n,r){if(e.customInspect&&n&&_(n.inspect)&&n.inspect!==t.inspect&&!(n.constructor&&n.constructor.prototype===n)){var o,i,a,l,s,c=n.inspect(r,e);return y(c)||(c=d(e,c,r)),c}var u=function(e,t){if(x(t))return e.stylize("undefined","undefined");if(y(t)){var n="'"+JSON.stringify(t).replace(/^"|"$/g,"").replace(/'/g,"\\'").replace(/\\"/g,'"')+"'";return e.stylize(n,"string")}return v(t)?e.stylize(""+t,"number"):m(t)?e.stylize(""+t,"boolean"):b(t)?e.stylize("null","null"):void 0}(e,n);if(u)return u;var f=Object.keys(n),E=(l={},f.forEach(function(e,t){l[e]=!0}),l);if(e.showHidden&&(f=Object.getOwnPropertyNames(n)),k(n)&&(f.indexOf("message")>=0||f.indexOf("description")>=0))return p(n);if(0===f.length){if(_(n)){var O=n.name?": "+n.name:"";return e.stylize("[Function"+O+"]","special")}if(w(n))return e.stylize(RegExp.prototype.toString.call(n),"regexp");if(S(n))return e.stylize(Date.prototype.toString.call(n),"date");if(k(n))return p(n)}var C="",A=!1,R=["{","}"];return(g(n)&&(A=!0,R=["[","]"]),_(n)&&(C=" [Function"+(n.name?": "+n.name:"")+"]"),w(n)&&(C=" "+RegExp.prototype.toString.call(n)),S(n)&&(C=" "+Date.prototype.toUTCString.call(n)),k(n)&&(C=" "+p(n)),0!==f.length||A&&0!=n.length)?r<0?w(n)?e.stylize(RegExp.prototype.toString.call(n),"regexp"):e.stylize("[Object]","special"):(e.seen.push(n),s=A?function(e,t,n,r,o){for(var i=[],a=0,l=t.length;a=0&&a++,e+t.replace(/\u001b\[\d\d?m/g,"").length+1},0)>60?i[0]+(""===o?"":o+"\n ")+" "+s.join(",\n ")+" "+i[1]:i[0]+o+" "+s.join(", ")+" "+i[1]):R[0]+C+R[1]}function p(e){return"["+Error.prototype.toString.call(e)+"]"}function h(e,t,n,r,o,i){var a,l,s;if((s=Object.getOwnPropertyDescriptor(t,o)||{value:t[o]}).get?l=s.set?e.stylize("[Getter/Setter]","special"):e.stylize("[Getter]","special"):s.set&&(l=e.stylize("[Setter]","special")),N(r,o)||(a="["+o+"]"),!l&&(0>e.seen.indexOf(s.value)?(l=b(n)?d(e,s.value,null):d(e,s.value,n-1)).indexOf("\n")>-1&&(l=i?l.split("\n").map(function(e){return" "+e}).join("\n").substr(2):"\n"+l.split("\n").map(function(e){return" "+e}).join("\n")):l=e.stylize("[Circular]","special")),x(a)){if(i&&o.match(/^\d+$/))return l;(a=JSON.stringify(""+o)).match(/^"([a-zA-Z_][a-zA-Z_0-9]*)"$/)?(a=a.substr(1,a.length-2),a=e.stylize(a,"name")):(a=a.replace(/'/g,"\\'").replace(/\\"/g,'"').replace(/(^"|"$)/g,"'"),a=e.stylize(a,"string"))}return a+": "+l}function g(e){return Array.isArray(e)}function m(e){return"boolean"==typeof e}function b(e){return null===e}function v(e){return"number"==typeof e}function y(e){return"string"==typeof e}function x(e){return void 0===e}function w(e){return E(e)&&"[object RegExp]"===O(e)}function E(e){return"object"==typeof e&&null!==e}function S(e){return E(e)&&"[object Date]"===O(e)}function k(e){return E(e)&&("[object Error]"===O(e)||e instanceof Error)}function _(e){return"function"==typeof e}function O(e){return Object.prototype.toString.call(e)}function C(e){return e<10?"0"+e.toString(10):e.toString(10)}t.debuglog=function(e){if(!a[e=e.toUpperCase()]){if(l.test(e)){var n=r.pid;a[e]=function(){var r=t.format.apply(t,arguments);console.error("%s %d: %s",e,n,r)}}else a[e]=function(){}}return a[e]},t.inspect=c,c.colors={bold:[1,22],italic:[3,23],underline:[4,24],inverse:[7,27],white:[37,39],grey:[90,39],black:[30,39],blue:[34,39],cyan:[36,39],green:[32,39],magenta:[35,39],red:[31,39],yellow:[33,39]},c.styles={special:"cyan",number:"yellow",boolean:"yellow",undefined:"grey",null:"bold",string:"green",date:"magenta",regexp:"red"},t.types=n(584),t.isArray=g,t.isBoolean=m,t.isNull=b,t.isNullOrUndefined=function(e){return null==e},t.isNumber=v,t.isString=y,t.isSymbol=function(e){return"symbol"==typeof e},t.isUndefined=x,t.isRegExp=w,t.types.isRegExp=w,t.isObject=E,t.isDate=S,t.types.isDate=S,t.isError=k,t.types.isNativeError=k,t.isFunction=_,t.isPrimitive=function(e){return null===e||"boolean"==typeof e||"number"==typeof e||"string"==typeof e||"symbol"==typeof e||void 0===e},t.isBuffer=n(369);var A=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"];function N(e,t){return Object.prototype.hasOwnProperty.call(e,t)}t.log=function(){var e,n;console.log("%s - %s",(n=[C((e=new Date).getHours()),C(e.getMinutes()),C(e.getSeconds())].join(":"),[e.getDate(),A[e.getMonth()],n].join(" ")),t.format.apply(t,arguments))},t.inherits=n(782),t._extend=function(e,t){if(!t||!E(t))return e;for(var n=Object.keys(t),r=n.length;r--;)e[n[r]]=t[n[r]];return e};var R="undefined"!=typeof Symbol?Symbol("util.promisify.custom"):void 0;function T(e,t){if(!e){var n=Error("Promise was rejected with a falsy value");n.reason=e,e=n}return t(e)}t.promisify=function(e){if("function"!=typeof e)throw TypeError('The "original" argument must be of type Function');if(R&&e[R]){var t=e[R];if("function"!=typeof t)throw TypeError('The "util.promisify.custom" argument must be of type Function');return Object.defineProperty(t,R,{value:t,enumerable:!1,writable:!1,configurable:!0}),t}function t(){for(var t,n,r=new Promise(function(e,r){t=e,n=r}),o=[],i=0;i0?a-4:a;for(n=0;n>16&255,c[u++]=t>>8&255,c[u++]=255&t;return 2===l&&(t=r[e.charCodeAt(n)]<<2|r[e.charCodeAt(n+1)]>>4,c[u++]=255&t),1===l&&(t=r[e.charCodeAt(n)]<<10|r[e.charCodeAt(n+1)]<<4|r[e.charCodeAt(n+2)]>>2,c[u++]=t>>8&255,c[u++]=255&t),c},t.fromByteArray=function(e){for(var t,r=e.length,o=r%3,i=[],a=0,l=r-o;a>18&63]+n[o>>12&63]+n[o>>6&63]+n[63&o]);return i.join("")}(e,a,a+16383>l?l:a+16383));return 1===o?i.push(n[(t=e[r-1])>>2]+n[t<<4&63]+"=="):2===o&&i.push(n[(t=(e[r-2]<<8)+e[r-1])>>10]+n[t>>4&63]+n[t<<2&63]+"="),i.join("")};for(var n=[],r=[],o="undefined"!=typeof Uint8Array?Uint8Array:Array,i="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",a=0,l=i.length;a0)throw Error("Invalid string. Length must be a multiple of 4");var n=e.indexOf("=");-1===n&&(n=t);var r=n===t?0:4-n%4;return[n,r]}r["-".charCodeAt(0)]=62,r["_".charCodeAt(0)]=63},72:function(e,t,n){"use strict";/*! - * The buffer module from node.js, for the browser. - * - * @author Feross Aboukhadijeh - * @license MIT - */var r=n(675),o=n(783),i="function"==typeof Symbol&&"function"==typeof Symbol.for?Symbol.for("nodejs.util.inspect.custom"):null;function a(e){if(e>2147483647)throw RangeError('The value "'+e+'" is invalid for option "size"');var t=new Uint8Array(e);return Object.setPrototypeOf(t,l.prototype),t}function l(e,t,n){if("number"==typeof e){if("string"==typeof t)throw TypeError('The "string" argument must be of type string. Received type number');return u(e)}return s(e,t,n)}function s(e,t,n){if("string"==typeof e)return function(e,t){if(("string"!=typeof t||""===t)&&(t="utf8"),!l.isEncoding(t))throw TypeError("Unknown encoding: "+t);var n=0|p(e,t),r=a(n),o=r.write(e,t);return o!==n&&(r=r.slice(0,o)),r}(e,t);if(ArrayBuffer.isView(e))return f(e);if(null==e)throw TypeError("The first argument must be one of type string, Buffer, ArrayBuffer, Array, or Array-like Object. Received type "+typeof e);if(N(e,ArrayBuffer)||e&&N(e.buffer,ArrayBuffer)||"undefined"!=typeof SharedArrayBuffer&&(N(e,SharedArrayBuffer)||e&&N(e.buffer,SharedArrayBuffer)))return function(e,t,n){var r;if(t<0||e.byteLength=2147483647)throw RangeError("Attempt to allocate Buffer larger than maximum size: 0x7fffffff bytes");return 0|e}function p(e,t){if(l.isBuffer(e))return e.length;if(ArrayBuffer.isView(e)||N(e,ArrayBuffer))return e.byteLength;if("string"!=typeof e)throw TypeError('The "string" argument must be one of type string, Buffer, or ArrayBuffer. Received type '+typeof e);var n=e.length,r=arguments.length>2&&!0===arguments[2];if(!r&&0===n)return 0;for(var o=!1;;)switch(t){case"ascii":case"latin1":case"binary":return n;case"utf8":case"utf-8":return _(e).length;case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return 2*n;case"hex":return n>>>1;case"base64":return C(e).length;default:if(o)return r?-1:_(e).length;t=(""+t).toLowerCase(),o=!0}}function h(e,t,n){var o,i,a=!1;if((void 0===t||t<0)&&(t=0),t>this.length||((void 0===n||n>this.length)&&(n=this.length),n<=0||(n>>>=0)<=(t>>>=0)))return"";for(e||(e="utf8");;)switch(e){case"hex":return function(e,t,n){var r=e.length;(!t||t<0)&&(t=0),(!n||n<0||n>r)&&(n=r);for(var o="",i=t;i2147483647?n=2147483647:n<-2147483648&&(n=-2147483648),(i=n=+n)!=i&&(n=o?0:e.length-1),n<0&&(n=e.length+n),n>=e.length){if(o)return -1;n=e.length-1}else if(n<0){if(!o)return -1;n=0}if("string"==typeof t&&(t=l.from(t,r)),l.isBuffer(t))return 0===t.length?-1:b(e,t,n,r,o);if("number"==typeof t)return(t&=255,"function"==typeof Uint8Array.prototype.indexOf)?o?Uint8Array.prototype.indexOf.call(e,t,n):Uint8Array.prototype.lastIndexOf.call(e,t,n):b(e,[t],n,r,o);throw TypeError("val must be string, number or Buffer")}function b(e,t,n,r,o){var i,a=1,l=e.length,s=t.length;if(void 0!==r&&("ucs2"===(r=String(r).toLowerCase())||"ucs-2"===r||"utf16le"===r||"utf-16le"===r)){if(e.length<2||t.length<2)return -1;a=2,l/=2,s/=2,n/=2}function c(e,t){return 1===a?e[t]:e.readUInt16BE(t*a)}if(o){var u=-1;for(i=n;il&&(n=l-s),i=n;i>=0;i--){for(var f=!0,d=0;d239?4:c>223?3:c>191?2:1;if(o+f<=n)switch(f){case 1:c<128&&(u=c);break;case 2:(192&(i=e[o+1]))==128&&(s=(31&c)<<6|63&i)>127&&(u=s);break;case 3:i=e[o+1],a=e[o+2],(192&i)==128&&(192&a)==128&&(s=(15&c)<<12|(63&i)<<6|63&a)>2047&&(s<55296||s>57343)&&(u=s);break;case 4:i=e[o+1],a=e[o+2],l=e[o+3],(192&i)==128&&(192&a)==128&&(192&l)==128&&(s=(15&c)<<18|(63&i)<<12|(63&a)<<6|63&l)>65535&&s<1114112&&(u=s)}null===u?(u=65533,f=1):u>65535&&(u-=65536,r.push(u>>>10&1023|55296),u=56320|1023&u),r.push(u),o+=f}return function(e){var t=e.length;if(t<=4096)return String.fromCharCode.apply(String,e);for(var n="",r=0;rn)throw RangeError("Trying to access beyond buffer length")}function x(e,t,n,r,o,i){if(!l.isBuffer(e))throw TypeError('"buffer" argument must be a Buffer instance');if(t>o||te.length)throw RangeError("Index out of range")}function w(e,t,n,r,o,i){if(n+r>e.length||n<0)throw RangeError("Index out of range")}function E(e,t,n,r,i){return t=+t,n>>>=0,i||w(e,t,n,4,34028234663852886e22,-34028234663852886e22),o.write(e,t,n,r,23,4),n+4}function S(e,t,n,r,i){return t=+t,n>>>=0,i||w(e,t,n,8,17976931348623157e292,-17976931348623157e292),o.write(e,t,n,r,52,8),n+8}t.Buffer=l,t.SlowBuffer=function(e){return+e!=e&&(e=0),l.alloc(+e)},t.INSPECT_MAX_BYTES=50,t.kMaxLength=2147483647,l.TYPED_ARRAY_SUPPORT=function(){try{var e=new Uint8Array(1),t={foo:function(){return 42}};return Object.setPrototypeOf(t,Uint8Array.prototype),Object.setPrototypeOf(e,t),42===e.foo()}catch(e){return!1}}(),l.TYPED_ARRAY_SUPPORT||"undefined"==typeof console||"function"!=typeof console.error||console.error("This browser lacks typed array (Uint8Array) support which is required by `buffer` v5.x. Use `buffer` v4.x if you require old browser support."),Object.defineProperty(l.prototype,"parent",{enumerable:!0,get:function(){if(l.isBuffer(this))return this.buffer}}),Object.defineProperty(l.prototype,"offset",{enumerable:!0,get:function(){if(l.isBuffer(this))return this.byteOffset}}),l.poolSize=8192,l.from=function(e,t,n){return s(e,t,n)},Object.setPrototypeOf(l.prototype,Uint8Array.prototype),Object.setPrototypeOf(l,Uint8Array),l.alloc=function(e,t,n){return(c(e),e<=0)?a(e):void 0!==t?"string"==typeof n?a(e).fill(t,n):a(e).fill(t):a(e)},l.allocUnsafe=function(e){return u(e)},l.allocUnsafeSlow=function(e){return u(e)},l.isBuffer=function(e){return null!=e&&!0===e._isBuffer&&e!==l.prototype},l.compare=function(e,t){if(N(e,Uint8Array)&&(e=l.from(e,e.offset,e.byteLength)),N(t,Uint8Array)&&(t=l.from(t,t.offset,t.byteLength)),!l.isBuffer(e)||!l.isBuffer(t))throw TypeError('The "buf1", "buf2" arguments must be one of type Buffer or Uint8Array');if(e===t)return 0;for(var n=e.length,r=t.length,o=0,i=Math.min(n,r);on&&(e+=" ... "),""},i&&(l.prototype[i]=l.prototype.inspect),l.prototype.compare=function(e,t,n,r,o){if(N(e,Uint8Array)&&(e=l.from(e,e.offset,e.byteLength)),!l.isBuffer(e))throw TypeError('The "target" argument must be one of type Buffer or Uint8Array. Received type '+typeof e);if(void 0===t&&(t=0),void 0===n&&(n=e?e.length:0),void 0===r&&(r=0),void 0===o&&(o=this.length),t<0||n>e.length||r<0||o>this.length)throw RangeError("out of range index");if(r>=o&&t>=n)return 0;if(r>=o)return -1;if(t>=n)return 1;if(t>>>=0,n>>>=0,r>>>=0,o>>>=0,this===e)return 0;for(var i=o-r,a=n-t,s=Math.min(i,a),c=this.slice(r,o),u=e.slice(t,n),f=0;f>>=0,isFinite(n)?(n>>>=0,void 0===r&&(r="utf8")):(r=n,n=void 0);else throw Error("Buffer.write(string, encoding, offset[, length]) is no longer supported");var o,i,a,l,s,c,u,f,d,p,h,g,m=this.length-t;if((void 0===n||n>m)&&(n=m),e.length>0&&(n<0||t<0)||t>this.length)throw RangeError("Attempt to write outside buffer bounds");r||(r="utf8");for(var b=!1;;)switch(r){case"hex":return function(e,t,n,r){n=Number(n)||0;var o=e.length-n;r?(r=Number(r))>o&&(r=o):r=o;var i=t.length;r>i/2&&(r=i/2);for(var a=0;a>8,o.push(n%256),o.push(r);return o}(e,this.length-h),this,h,g);default:if(b)throw TypeError("Unknown encoding: "+r);r=(""+r).toLowerCase(),b=!0}},l.prototype.toJSON=function(){return{type:"Buffer",data:Array.prototype.slice.call(this._arr||this,0)}},l.prototype.slice=function(e,t){var n=this.length;e=~~e,t=void 0===t?n:~~t,e<0?(e+=n)<0&&(e=0):e>n&&(e=n),t<0?(t+=n)<0&&(t=0):t>n&&(t=n),t>>=0,t>>>=0,n||y(e,t,this.length);for(var r=this[e],o=1,i=0;++i>>=0,t>>>=0,n||y(e,t,this.length);for(var r=this[e+--t],o=1;t>0&&(o*=256);)r+=this[e+--t]*o;return r},l.prototype.readUInt8=function(e,t){return e>>>=0,t||y(e,1,this.length),this[e]},l.prototype.readUInt16LE=function(e,t){return e>>>=0,t||y(e,2,this.length),this[e]|this[e+1]<<8},l.prototype.readUInt16BE=function(e,t){return e>>>=0,t||y(e,2,this.length),this[e]<<8|this[e+1]},l.prototype.readUInt32LE=function(e,t){return e>>>=0,t||y(e,4,this.length),(this[e]|this[e+1]<<8|this[e+2]<<16)+16777216*this[e+3]},l.prototype.readUInt32BE=function(e,t){return e>>>=0,t||y(e,4,this.length),16777216*this[e]+(this[e+1]<<16|this[e+2]<<8|this[e+3])},l.prototype.readIntLE=function(e,t,n){e>>>=0,t>>>=0,n||y(e,t,this.length);for(var r=this[e],o=1,i=0;++i=(o*=128)&&(r-=Math.pow(2,8*t)),r},l.prototype.readIntBE=function(e,t,n){e>>>=0,t>>>=0,n||y(e,t,this.length);for(var r=t,o=1,i=this[e+--r];r>0&&(o*=256);)i+=this[e+--r]*o;return i>=(o*=128)&&(i-=Math.pow(2,8*t)),i},l.prototype.readInt8=function(e,t){return(e>>>=0,t||y(e,1,this.length),128&this[e])?-((255-this[e]+1)*1):this[e]},l.prototype.readInt16LE=function(e,t){e>>>=0,t||y(e,2,this.length);var n=this[e]|this[e+1]<<8;return 32768&n?4294901760|n:n},l.prototype.readInt16BE=function(e,t){e>>>=0,t||y(e,2,this.length);var n=this[e+1]|this[e]<<8;return 32768&n?4294901760|n:n},l.prototype.readInt32LE=function(e,t){return e>>>=0,t||y(e,4,this.length),this[e]|this[e+1]<<8|this[e+2]<<16|this[e+3]<<24},l.prototype.readInt32BE=function(e,t){return e>>>=0,t||y(e,4,this.length),this[e]<<24|this[e+1]<<16|this[e+2]<<8|this[e+3]},l.prototype.readFloatLE=function(e,t){return e>>>=0,t||y(e,4,this.length),o.read(this,e,!0,23,4)},l.prototype.readFloatBE=function(e,t){return e>>>=0,t||y(e,4,this.length),o.read(this,e,!1,23,4)},l.prototype.readDoubleLE=function(e,t){return e>>>=0,t||y(e,8,this.length),o.read(this,e,!0,52,8)},l.prototype.readDoubleBE=function(e,t){return e>>>=0,t||y(e,8,this.length),o.read(this,e,!1,52,8)},l.prototype.writeUIntLE=function(e,t,n,r){if(e=+e,t>>>=0,n>>>=0,!r){var o=Math.pow(2,8*n)-1;x(this,e,t,n,o,0)}var i=1,a=0;for(this[t]=255&e;++a>>=0,n>>>=0,!r){var o=Math.pow(2,8*n)-1;x(this,e,t,n,o,0)}var i=n-1,a=1;for(this[t+i]=255&e;--i>=0&&(a*=256);)this[t+i]=e/a&255;return t+n},l.prototype.writeUInt8=function(e,t,n){return e=+e,t>>>=0,n||x(this,e,t,1,255,0),this[t]=255&e,t+1},l.prototype.writeUInt16LE=function(e,t,n){return e=+e,t>>>=0,n||x(this,e,t,2,65535,0),this[t]=255&e,this[t+1]=e>>>8,t+2},l.prototype.writeUInt16BE=function(e,t,n){return e=+e,t>>>=0,n||x(this,e,t,2,65535,0),this[t]=e>>>8,this[t+1]=255&e,t+2},l.prototype.writeUInt32LE=function(e,t,n){return e=+e,t>>>=0,n||x(this,e,t,4,4294967295,0),this[t+3]=e>>>24,this[t+2]=e>>>16,this[t+1]=e>>>8,this[t]=255&e,t+4},l.prototype.writeUInt32BE=function(e,t,n){return e=+e,t>>>=0,n||x(this,e,t,4,4294967295,0),this[t]=e>>>24,this[t+1]=e>>>16,this[t+2]=e>>>8,this[t+3]=255&e,t+4},l.prototype.writeIntLE=function(e,t,n,r){if(e=+e,t>>>=0,!r){var o=Math.pow(2,8*n-1);x(this,e,t,n,o-1,-o)}var i=0,a=1,l=0;for(this[t]=255&e;++i>0)-l&255;return t+n},l.prototype.writeIntBE=function(e,t,n,r){if(e=+e,t>>>=0,!r){var o=Math.pow(2,8*n-1);x(this,e,t,n,o-1,-o)}var i=n-1,a=1,l=0;for(this[t+i]=255&e;--i>=0&&(a*=256);)e<0&&0===l&&0!==this[t+i+1]&&(l=1),this[t+i]=(e/a>>0)-l&255;return t+n},l.prototype.writeInt8=function(e,t,n){return e=+e,t>>>=0,n||x(this,e,t,1,127,-128),e<0&&(e=255+e+1),this[t]=255&e,t+1},l.prototype.writeInt16LE=function(e,t,n){return e=+e,t>>>=0,n||x(this,e,t,2,32767,-32768),this[t]=255&e,this[t+1]=e>>>8,t+2},l.prototype.writeInt16BE=function(e,t,n){return e=+e,t>>>=0,n||x(this,e,t,2,32767,-32768),this[t]=e>>>8,this[t+1]=255&e,t+2},l.prototype.writeInt32LE=function(e,t,n){return e=+e,t>>>=0,n||x(this,e,t,4,2147483647,-2147483648),this[t]=255&e,this[t+1]=e>>>8,this[t+2]=e>>>16,this[t+3]=e>>>24,t+4},l.prototype.writeInt32BE=function(e,t,n){return e=+e,t>>>=0,n||x(this,e,t,4,2147483647,-2147483648),e<0&&(e=4294967295+e+1),this[t]=e>>>24,this[t+1]=e>>>16,this[t+2]=e>>>8,this[t+3]=255&e,t+4},l.prototype.writeFloatLE=function(e,t,n){return E(this,e,t,!0,n)},l.prototype.writeFloatBE=function(e,t,n){return E(this,e,t,!1,n)},l.prototype.writeDoubleLE=function(e,t,n){return S(this,e,t,!0,n)},l.prototype.writeDoubleBE=function(e,t,n){return S(this,e,t,!1,n)},l.prototype.copy=function(e,t,n,r){if(!l.isBuffer(e))throw TypeError("argument should be a Buffer");if(n||(n=0),r||0===r||(r=this.length),t>=e.length&&(t=e.length),t||(t=0),r>0&&r=this.length)throw RangeError("Index out of range");if(r<0)throw RangeError("sourceEnd out of bounds");r>this.length&&(r=this.length),e.length-t=0;--i)e[i+t]=this[i+n];else Uint8Array.prototype.set.call(e,this.subarray(n,r),t);return o},l.prototype.fill=function(e,t,n,r){if("string"==typeof e){if("string"==typeof t?(r=t,t=0,n=this.length):"string"==typeof n&&(r=n,n=this.length),void 0!==r&&"string"!=typeof r)throw TypeError("encoding must be a string");if("string"==typeof r&&!l.isEncoding(r))throw TypeError("Unknown encoding: "+r);if(1===e.length){var o,i=e.charCodeAt(0);("utf8"===r&&i<128||"latin1"===r)&&(e=i)}}else"number"==typeof e?e&=255:"boolean"==typeof e&&(e=Number(e));if(t<0||this.length>>=0,n=void 0===n?this.length:n>>>0,e||(e=0),"number"==typeof e)for(o=t;o55295&&n<57344){if(!o){if(n>56319||a+1===r){(t-=3)>-1&&i.push(239,191,189);continue}o=n;continue}if(n<56320){(t-=3)>-1&&i.push(239,191,189),o=n;continue}n=(o-55296<<10|n-56320)+65536}else o&&(t-=3)>-1&&i.push(239,191,189);if(o=null,n<128){if((t-=1)<0)break;i.push(n)}else if(n<2048){if((t-=2)<0)break;i.push(n>>6|192,63&n|128)}else if(n<65536){if((t-=3)<0)break;i.push(n>>12|224,n>>6&63|128,63&n|128)}else if(n<1114112){if((t-=4)<0)break;i.push(n>>18|240,n>>12&63|128,n>>6&63|128,63&n|128)}else throw Error("Invalid code point")}return i}function O(e){for(var t=[],n=0;n=t.length)&&!(o>=e.length);++o)t[o+n]=e[o];return o}function N(e,t){return e instanceof t||null!=e&&null!=e.constructor&&null!=e.constructor.name&&e.constructor.name===t.name}var R=function(){for(var e="0123456789abcdef",t=Array(256),n=0;n<16;++n)for(var r=16*n,o=0;o<16;++o)t[r+o]=e[n]+e[o];return t}()},783:function(e,t){/*! ieee754. BSD-3-Clause License. Feross Aboukhadijeh */t.read=function(e,t,n,r,o){var i,a,l=8*o-r-1,s=(1<>1,u=-7,f=n?o-1:0,d=n?-1:1,p=e[t+f];for(f+=d,i=p&(1<<-u)-1,p>>=-u,u+=l;u>0;i=256*i+e[t+f],f+=d,u-=8);for(a=i&(1<<-u)-1,i>>=-u,u+=r;u>0;a=256*a+e[t+f],f+=d,u-=8);if(0===i)i=1-c;else{if(i===s)return a?NaN:(p?-1:1)*(1/0);a+=Math.pow(2,r),i-=c}return(p?-1:1)*a*Math.pow(2,i-r)},t.write=function(e,t,n,r,o,i){var a,l,s,c=8*i-o-1,u=(1<>1,d=23===o?5960464477539062e-23:0,p=r?0:i-1,h=r?1:-1,g=t<0||0===t&&1/t<0?1:0;for(isNaN(t=Math.abs(t))||t===1/0?(l=isNaN(t)?1:0,a=u):(a=Math.floor(Math.log(t)/Math.LN2),t*(s=Math.pow(2,-a))<1&&(a--,s*=2),a+f>=1?t+=d/s:t+=d*Math.pow(2,1-f),t*s>=2&&(a++,s/=2),a+f>=u?(l=0,a=u):a+f>=1?(l=(t*s-1)*Math.pow(2,o),a+=f):(l=t*Math.pow(2,f-1)*Math.pow(2,o),a=0));o>=8;e[n+p]=255&l,p+=h,l/=256,o-=8);for(a=a<0;e[n+p]=255&a,p+=h,a/=256,c-=8);e[n+p-h]|=128*g}}},n={};function r(e){var o=n[e];if(void 0!==o)return o.exports;var i=n[e]={exports:{}},a=!0;try{t[e](i,i.exports,r),a=!1}finally{a&&delete n[e]}return i.exports}r.ab="//";var o=r(72);e.exports=o}()},66003:function(e){!function(){var t={229:function(e){var t,n,r,o=e.exports={};function i(){throw Error("setTimeout has not been defined")}function a(){throw Error("clearTimeout has not been defined")}function l(e){if(t===setTimeout)return setTimeout(e,0);if((t===i||!t)&&setTimeout)return t=setTimeout,setTimeout(e,0);try{return t(e,0)}catch(n){try{return t.call(null,e,0)}catch(n){return t.call(this,e,0)}}}!function(){try{t="function"==typeof setTimeout?setTimeout:i}catch(e){t=i}try{n="function"==typeof clearTimeout?clearTimeout:a}catch(e){n=a}}();var s=[],c=!1,u=-1;function f(){c&&r&&(c=!1,r.length?s=r.concat(s):u=-1,s.length&&d())}function d(){if(!c){var e=l(f);c=!0;for(var t=s.length;t;){for(r=s,s=[];++u1)for(var n=1;na?1:Math.round(100*u/a)/100,t.a!==f)return{h:t.h,s:t.s,l:t.l,a:f,source:"rgb"}}else{var d=void 0;if(r!==(d=c<0?0:c>i?1:Math.round(100*c/i)/100))return{h:t.h,s:t.s,l:t.l,a:d,source:"rgb"}}return null},u={},f=function(e,t,n,r){if("undefined"==typeof document&&!r)return null;var o=r?new r:document.createElement("canvas");o.width=2*n,o.height=2*n;var i=o.getContext("2d");return i?(i.fillStyle=e,i.fillRect(0,0,o.width,o.height),i.fillStyle=t,i.fillRect(0,0,n,n),i.translate(n,n),i.fillRect(0,0,n,n),o.toDataURL()):null},d=function(e,t,n,r){var o=e+"-"+t+"-"+n+(r?"-server":"");if(u[o])return u[o];var i=f(e,t,n,r);return u[o]=i,i},p=Object.assign||function(e){for(var t=1;t-1)){var o=n.getArrowOffset(),i=38===e.keyCode?r+o:r-o;n.setUpdatedValue(i,e)}},n.handleDrag=function(e){if(n.props.dragLabel){var t=Math.round(n.props.value+e.movementX);t>=0&&t<=n.props.dragMax&&n.props.onChange&&n.props.onChange(n.getValueObjectWithLabel(t),e)}},n.handleMouseDown=function(e){n.props.dragLabel&&(e.preventDefault(),n.handleDrag(e),window.addEventListener("mousemove",n.handleDrag),window.addEventListener("mouseup",n.handleMouseUp))},n.handleMouseUp=function(){n.unbindEventListeners()},n.unbindEventListeners=function(){window.removeEventListener("mousemove",n.handleDrag),window.removeEventListener("mouseup",n.handleMouseUp)},n.state={value:String(e.value).toUpperCase(),blurValue:String(e.value).toUpperCase()},n.inputId="rc-editable-input-"+w++,n}return!function(e,t){if("function"!=typeof t&&null!==t)throw TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),y(t,[{key:"componentDidUpdate",value:function(e,t){this.props.value!==this.state.value&&(e.value!==this.props.value||t.value!==this.state.value)&&(this.input===document.activeElement?this.setState({blurValue:String(this.props.value).toUpperCase()}):this.setState({value:String(this.props.value).toUpperCase(),blurValue:!this.state.blurValue&&String(this.props.value).toUpperCase()}))}},{key:"componentWillUnmount",value:function(){this.unbindEventListeners()}},{key:"getValueObjectWithLabel",value:function(e){var t,n;return t={},(n=this.props.label)in t?Object.defineProperty(t,n,{value:e,enumerable:!0,configurable:!0,writable:!0}):t[n]=e,t}},{key:"getArrowOffset",value:function(){return this.props.arrowOffset||1}},{key:"setUpdatedValue",value:function(e,t){var n=this.props.label?this.getValueObjectWithLabel(e):e;this.props.onChange&&this.props.onChange(n,t),this.setState({value:e})}},{key:"render",value:function(){var e=this,t=(0,s.ZP)({default:{wrap:{position:"relative"}},"user-override":{wrap:this.props.style&&this.props.style.wrap?this.props.style.wrap:{},input:this.props.style&&this.props.style.input?this.props.style.input:{},label:this.props.style&&this.props.style.label?this.props.style.label:{}},"dragLabel-true":{label:{cursor:"ew-resize"}}},{"user-override":!0},this.props);return l.createElement("div",{style:t.wrap},l.createElement("input",{id:this.inputId,style:t.input,ref:function(t){return e.input=t},value:this.state.value,onKeyDown:this.handleKeyDown,onChange:this.handleChange,onBlur:this.handleBlur,placeholder:this.props.placeholder,spellCheck:"false"}),this.props.label&&!this.props.hideLabel?l.createElement("label",{htmlFor:this.inputId,style:t.label,onMouseDown:this.handleMouseDown},this.props.label):null)}}]),t}(l.PureComponent||l.Component),S=function(e,t,n,r){var o=r.clientWidth,i=r.clientHeight,a="number"==typeof e.pageX?e.pageX:e.touches[0].pageX,l="number"==typeof e.pageY?e.pageY:e.touches[0].pageY,s=a-(r.getBoundingClientRect().left+window.pageXOffset),c=l-(r.getBoundingClientRect().top+window.pageYOffset);if("vertical"===t){var u=void 0;if(u=c<0?359:c>i?0:360*(-(100*c/i)+100)/100,n.h!==u)return{h:u,s:n.s,l:n.l,a:n.a,source:"hsl"}}else{var f=void 0;if(f=s<0?0:s>o?359:360*(100*s/o)/100,n.h!==f)return{h:f,s:n.s,l:n.l,a:n.a,source:"hsl"}}return null},k=function(){function e(e,t){for(var n=0;n1?t[o-1]:void 0,a=o>2?t[2]:void 0;for(i=r.length>3&&"function"==typeof i?(o--,i):void 0,a&&(0,eb.Z)(t[0],t[1],a)&&(i=o<3?void 0:i,o=1),e=Object(e);++n=t||n<0||f&&r>=i}function g(){var e,n,r,o=ex();if(h(o))return m(o);l=setTimeout(g,(e=o-s,n=o-c,r=t-e,f?eP(r,i-n):r))}function m(e){return(l=void 0,d&&r)?p(e):(r=o=void 0,a)}function b(){var e,n=ex(),i=h(n);if(r=arguments,o=this,s=n,i){if(void 0===l)return c=e=s,l=setTimeout(g,t),u?p(e):a;if(f)return clearTimeout(l),l=setTimeout(g,t),p(s)}return void 0===l&&(l=setTimeout(g,t)),a}return t=eR(t)||0,(0,q.Z)(n)&&(u=!!n.leading,i=(f="maxWait"in n)?eT(eR(n.maxWait)||0,t):i,d="trailing"in n?!!n.trailing:d),b.cancel=function(){void 0!==l&&clearTimeout(l),c=0,r=s=o=l=void 0},b.flush=function(){return void 0===l?a:m(ex())},b},ej=function(e,t,n){var r=!0,o=!0;if("function"!=typeof e)throw TypeError("Expected a function");return(0,q.Z)(n)&&(r="leading"in n?!!n.leading:r,o="trailing"in n?!!n.trailing:o),eM(e,t,{leading:r,maxWait:t,trailing:o})},eL=function(e,t,n){var r=n.getBoundingClientRect(),o=r.width,i=r.height,a="number"==typeof e.pageX?e.pageX:e.touches[0].pageX,l="number"==typeof e.pageY?e.pageY:e.touches[0].pageY,s=a-(n.getBoundingClientRect().left+window.pageXOffset),c=l-(n.getBoundingClientRect().top+window.pageYOffset);s<0?s=0:s>o&&(s=o),c<0?c=0:c>i&&(c=i);var u=s/o,f=1-c/i;return{h:t.h,s:u,v:f,a:t.a,source:"hsv"}},eI=function(){function e(e,t){for(var n=0;n1&&(n-=1),n<1/6)?e+(t-e)*6*n:n<.5?t:n<2/3?e+(t-e)*(2/3-n)*6:e}if(e=te(e,360),t=te(t,100),n=te(n,100),0===t)r=o=i=n;else{var l=n<.5?n*(1+t):n+t-n*t,s=2*n-l;r=a(s,l,e+1/3),o=a(s,l,e),i=a(s,l,e-1/3)}return{r:255*r,g:255*o,b:255*i}}(n.h,i,l),s=!0,c="hsl"),n.hasOwnProperty("a")&&(o=n.a)),o=e7(o),{ok:s,format:n.format||c,r:Math.min(255,Math.max(r.r,0)),g:Math.min(255,Math.max(r.g,0)),b:Math.min(255,Math.max(r.b,0)),a:o});this._originalInput=e,this._r=E.r,this._g=E.g,this._b=E.b,this._a=E.a,this._roundA=Math.round(100*this._a)/100,this._format=t.format||E.format,this._gradientType=t.gradientType,this._r<1&&(this._r=Math.round(this._r)),this._g<1&&(this._g=Math.round(this._g)),this._b<1&&(this._b=Math.round(this._b)),this._ok=E.ok}function eq(e,t,n){var r,o,i=Math.max(e=te(e,255),t=te(t,255),n=te(n,255)),a=Math.min(e,t,n),l=(i+a)/2;if(i==a)r=o=0;else{var s=i-a;switch(o=l>.5?s/(2-i-a):s/(i+a),i){case e:r=(t-n)/s+(t>1)+720)%360;--t;)r.h=(r.h+o)%360,i.push(eZ(r));return i}function e4(e,t){t=t||6;for(var n=eZ(e).toHsv(),r=n.h,o=n.s,i=n.v,a=[],l=1/t;t--;)a.push(eZ({h:r,s:o,v:i})),i=(i+l)%1;return a}eZ.prototype={isDark:function(){return 128>this.getBrightness()},isLight:function(){return!this.isDark()},isValid:function(){return this._ok},getOriginalInput:function(){return this._originalInput},getFormat:function(){return this._format},getAlpha:function(){return this._a},getBrightness:function(){var e=this.toRgb();return(299*e.r+587*e.g+114*e.b)/1e3},getLuminance:function(){var e,t,n,r=this.toRgb();return e=r.r/255,.2126*(e<=.03928?e/12.92:Math.pow((e+.055)/1.055,2.4))+.7152*((t=r.g/255)<=.03928?t/12.92:Math.pow((t+.055)/1.055,2.4))+.0722*((n=r.b/255)<=.03928?n/12.92:Math.pow((n+.055)/1.055,2.4))},setAlpha:function(e){return this._a=e7(e),this._roundA=Math.round(100*this._a)/100,this},toHsv:function(){var e=eW(this._r,this._g,this._b);return{h:360*e.h,s:e.s,v:e.v,a:this._a}},toHsvString:function(){var e=eW(this._r,this._g,this._b),t=Math.round(360*e.h),n=Math.round(100*e.s),r=Math.round(100*e.v);return 1==this._a?"hsv("+t+", "+n+"%, "+r+"%)":"hsva("+t+", "+n+"%, "+r+"%, "+this._roundA+")"},toHsl:function(){var e=eq(this._r,this._g,this._b);return{h:360*e.h,s:e.s,l:e.l,a:this._a}},toHslString:function(){var e=eq(this._r,this._g,this._b),t=Math.round(360*e.h),n=Math.round(100*e.s),r=Math.round(100*e.l);return 1==this._a?"hsl("+t+", "+n+"%, "+r+"%)":"hsla("+t+", "+n+"%, "+r+"%, "+this._roundA+")"},toHex:function(e){return eV(this._r,this._g,this._b,e)},toHexString:function(e){return"#"+this.toHex(e)},toHex8:function(e){var t,n,r,o,i;return t=this._r,n=this._g,r=this._b,o=this._a,i=[tr(Math.round(t).toString(16)),tr(Math.round(n).toString(16)),tr(Math.round(r).toString(16)),tr(ti(o))],e&&i[0].charAt(0)==i[0].charAt(1)&&i[1].charAt(0)==i[1].charAt(1)&&i[2].charAt(0)==i[2].charAt(1)&&i[3].charAt(0)==i[3].charAt(1)?i[0].charAt(0)+i[1].charAt(0)+i[2].charAt(0)+i[3].charAt(0):i.join("")},toHex8String:function(e){return"#"+this.toHex8(e)},toRgb:function(){return{r:Math.round(this._r),g:Math.round(this._g),b:Math.round(this._b),a:this._a}},toRgbString:function(){return 1==this._a?"rgb("+Math.round(this._r)+", "+Math.round(this._g)+", "+Math.round(this._b)+")":"rgba("+Math.round(this._r)+", "+Math.round(this._g)+", "+Math.round(this._b)+", "+this._roundA+")"},toPercentageRgb:function(){return{r:Math.round(100*te(this._r,255))+"%",g:Math.round(100*te(this._g,255))+"%",b:Math.round(100*te(this._b,255))+"%",a:this._a}},toPercentageRgbString:function(){return 1==this._a?"rgb("+Math.round(100*te(this._r,255))+"%, "+Math.round(100*te(this._g,255))+"%, "+Math.round(100*te(this._b,255))+"%)":"rgba("+Math.round(100*te(this._r,255))+"%, "+Math.round(100*te(this._g,255))+"%, "+Math.round(100*te(this._b,255))+"%, "+this._roundA+")"},toName:function(){return 0===this._a?"transparent":!(this._a<1)&&(e9[eV(this._r,this._g,this._b,!0)]||!1)},toFilter:function(e){var t="#"+eG(this._r,this._g,this._b,this._a),n=t,r=this._gradientType?"GradientType = 1, ":"";if(e){var o=eZ(e);n="#"+eG(o._r,o._g,o._b,o._a)}return"progid:DXImageTransform.Microsoft.gradient("+r+"startColorstr="+t+",endColorstr="+n+")"},toString:function(e){var t=!!e;e=e||this._format;var n=!1,r=this._a<1&&this._a>=0;return!t&&r&&("hex"===e||"hex6"===e||"hex3"===e||"hex4"===e||"hex8"===e||"name"===e)?"name"===e&&0===this._a?this.toName():this.toRgbString():("rgb"===e&&(n=this.toRgbString()),"prgb"===e&&(n=this.toPercentageRgbString()),("hex"===e||"hex6"===e)&&(n=this.toHexString()),"hex3"===e&&(n=this.toHexString(!0)),"hex4"===e&&(n=this.toHex8String(!0)),"hex8"===e&&(n=this.toHex8String()),"name"===e&&(n=this.toName()),"hsl"===e&&(n=this.toHslString()),"hsv"===e&&(n=this.toHsvString()),n||this.toHexString())},clone:function(){return eZ(this.toString())},_applyModification:function(e,t){var n=e.apply(null,[this].concat([].slice.call(t)));return this._r=n._r,this._g=n._g,this._b=n._b,this.setAlpha(n._a),this},lighten:function(){return this._applyModification(eJ,arguments)},brighten:function(){return this._applyModification(eQ,arguments)},darken:function(){return this._applyModification(e0,arguments)},desaturate:function(){return this._applyModification(eK,arguments)},saturate:function(){return this._applyModification(eY,arguments)},greyscale:function(){return this._applyModification(eX,arguments)},spin:function(){return this._applyModification(e1,arguments)},_applyCombination:function(e,t){return e.apply(null,[this].concat([].slice.call(t)))},analogous:function(){return this._applyCombination(e6,arguments)},complement:function(){return this._applyCombination(e2,arguments)},monochromatic:function(){return this._applyCombination(e4,arguments)},splitcomplement:function(){return this._applyCombination(e3,arguments)},triad:function(){return this._applyCombination(e5,[3])},tetrad:function(){return this._applyCombination(e5,[4])}},eZ.fromRatio=function(e,t){if("object"==e$(e)){var n={};for(var r in e)e.hasOwnProperty(r)&&("a"===r?n[r]=e[r]:n[r]=to(e[r]));e=n}return eZ(e,t)},eZ.equals=function(e,t){return!!e&&!!t&&eZ(e).toRgbString()==eZ(t).toRgbString()},eZ.random=function(){return eZ.fromRatio({r:Math.random(),g:Math.random(),b:Math.random()})},eZ.mix=function(e,t,n){n=0===n?0:n||50;var r=eZ(e).toRgb(),o=eZ(t).toRgb(),i=n/100;return eZ({r:(o.r-r.r)*i+r.r,g:(o.g-r.g)*i+r.g,b:(o.b-r.b)*i+r.b,a:(o.a-r.a)*i+r.a})},eZ.readability=function(e,t){var n=eZ(e),r=eZ(t);return(Math.max(n.getLuminance(),r.getLuminance())+.05)/(Math.min(n.getLuminance(),r.getLuminance())+.05)},eZ.isReadable=function(e,t,n){var r,o,i,a,l,s=eZ.readability(e,t);switch(l=!1,(o=((r=(r=n)||{level:"AA",size:"small"}).level||"AA").toUpperCase(),i=(r.size||"small").toLowerCase(),"AA"!==o&&"AAA"!==o&&(o="AA"),"small"!==i&&"large"!==i&&(i="small"),a={level:o,size:i}).level+a.size){case"AAsmall":case"AAAlarge":l=s>=4.5;break;case"AAlarge":l=s>=3;break;case"AAAsmall":l=s>=7}return l},eZ.mostReadable=function(e,t,n){var r,o,i,a,l=null,s=0;o=(n=n||{}).includeFallbackColors,i=n.level,a=n.size;for(var c=0;cs&&(s=r,l=eZ(t[c]));return eZ.isReadable(e,l,{level:i,size:a})||!o?l:(n.includeFallbackColors=!1,eZ.mostReadable(e,["#fff","#000"],n))};var e8=eZ.names={aliceblue:"f0f8ff",antiquewhite:"faebd7",aqua:"0ff",aquamarine:"7fffd4",azure:"f0ffff",beige:"f5f5dc",bisque:"ffe4c4",black:"000",blanchedalmond:"ffebcd",blue:"00f",blueviolet:"8a2be2",brown:"a52a2a",burlywood:"deb887",burntsienna:"ea7e5d",cadetblue:"5f9ea0",chartreuse:"7fff00",chocolate:"d2691e",coral:"ff7f50",cornflowerblue:"6495ed",cornsilk:"fff8dc",crimson:"dc143c",cyan:"0ff",darkblue:"00008b",darkcyan:"008b8b",darkgoldenrod:"b8860b",darkgray:"a9a9a9",darkgreen:"006400",darkgrey:"a9a9a9",darkkhaki:"bdb76b",darkmagenta:"8b008b",darkolivegreen:"556b2f",darkorange:"ff8c00",darkorchid:"9932cc",darkred:"8b0000",darksalmon:"e9967a",darkseagreen:"8fbc8f",darkslateblue:"483d8b",darkslategray:"2f4f4f",darkslategrey:"2f4f4f",darkturquoise:"00ced1",darkviolet:"9400d3",deeppink:"ff1493",deepskyblue:"00bfff",dimgray:"696969",dimgrey:"696969",dodgerblue:"1e90ff",firebrick:"b22222",floralwhite:"fffaf0",forestgreen:"228b22",fuchsia:"f0f",gainsboro:"dcdcdc",ghostwhite:"f8f8ff",gold:"ffd700",goldenrod:"daa520",gray:"808080",green:"008000",greenyellow:"adff2f",grey:"808080",honeydew:"f0fff0",hotpink:"ff69b4",indianred:"cd5c5c",indigo:"4b0082",ivory:"fffff0",khaki:"f0e68c",lavender:"e6e6fa",lavenderblush:"fff0f5",lawngreen:"7cfc00",lemonchiffon:"fffacd",lightblue:"add8e6",lightcoral:"f08080",lightcyan:"e0ffff",lightgoldenrodyellow:"fafad2",lightgray:"d3d3d3",lightgreen:"90ee90",lightgrey:"d3d3d3",lightpink:"ffb6c1",lightsalmon:"ffa07a",lightseagreen:"20b2aa",lightskyblue:"87cefa",lightslategray:"789",lightslategrey:"789",lightsteelblue:"b0c4de",lightyellow:"ffffe0",lime:"0f0",limegreen:"32cd32",linen:"faf0e6",magenta:"f0f",maroon:"800000",mediumaquamarine:"66cdaa",mediumblue:"0000cd",mediumorchid:"ba55d3",mediumpurple:"9370db",mediumseagreen:"3cb371",mediumslateblue:"7b68ee",mediumspringgreen:"00fa9a",mediumturquoise:"48d1cc",mediumvioletred:"c71585",midnightblue:"191970",mintcream:"f5fffa",mistyrose:"ffe4e1",moccasin:"ffe4b5",navajowhite:"ffdead",navy:"000080",oldlace:"fdf5e6",olive:"808000",olivedrab:"6b8e23",orange:"ffa500",orangered:"ff4500",orchid:"da70d6",palegoldenrod:"eee8aa",palegreen:"98fb98",paleturquoise:"afeeee",palevioletred:"db7093",papayawhip:"ffefd5",peachpuff:"ffdab9",peru:"cd853f",pink:"ffc0cb",plum:"dda0dd",powderblue:"b0e0e6",purple:"800080",rebeccapurple:"663399",red:"f00",rosybrown:"bc8f8f",royalblue:"4169e1",saddlebrown:"8b4513",salmon:"fa8072",sandybrown:"f4a460",seagreen:"2e8b57",seashell:"fff5ee",sienna:"a0522d",silver:"c0c0c0",skyblue:"87ceeb",slateblue:"6a5acd",slategray:"708090",slategrey:"708090",snow:"fffafa",springgreen:"00ff7f",steelblue:"4682b4",tan:"d2b48c",teal:"008080",thistle:"d8bfd8",tomato:"ff6347",turquoise:"40e0d0",violet:"ee82ee",wheat:"f5deb3",white:"fff",whitesmoke:"f5f5f5",yellow:"ff0",yellowgreen:"9acd32"},e9=eZ.hexNames=function(e){var t={};for(var n in e)e.hasOwnProperty(n)&&(t[e[n]]=n);return t}(e8);function e7(e){return(isNaN(e=parseFloat(e))||e<0||e>1)&&(e=1),e}function te(e,t){"string"==typeof(n=e)&&-1!=n.indexOf(".")&&1===parseFloat(n)&&(e="100%");var n,r,o="string"==typeof(r=e)&&-1!=r.indexOf("%");return(e=Math.min(t,Math.max(0,parseFloat(e))),o&&(e=parseInt(e*t,10)/100),1e-6>Math.abs(e-t))?1:e%t/parseFloat(t)}function tt(e){return Math.min(1,Math.max(0,e))}function tn(e){return parseInt(e,16)}function tr(e){return 1==e.length?"0"+e:""+e}function to(e){return e<=1&&(e=100*e+"%"),e}function ti(e){return Math.round(255*parseFloat(e)).toString(16)}var ta=(i="[\\s|\\(]+("+(o="(?:[-\\+]?\\d*\\.\\d+%?)|(?:[-\\+]?\\d+%?)")+")[,|\\s]+("+o+")[,|\\s]+("+o+")\\s*\\)?",a="[\\s|\\(]+("+o+")[,|\\s]+("+o+")[,|\\s]+("+o+")[,|\\s]+("+o+")\\s*\\)?",{CSS_UNIT:new RegExp(o),rgb:RegExp("rgb"+i),rgba:RegExp("rgba"+a),hsl:RegExp("hsl"+i),hsla:RegExp("hsla"+a),hsv:RegExp("hsv"+i),hsva:RegExp("hsva"+a),hex3:/^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,hex6:/^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/,hex4:/^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,hex8:/^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/});function tl(e){return!!ta.CSS_UNIT.exec(e)}var ts=function(e){var t,n,r=0,o=0;return t=["r","g","b","a","h","s","l","v"],n=function(t){e[t]&&(r+=1,isNaN(e[t])||(o+=1),("s"===t||"l"===t)&&/^\d+%$/.test(e[t])&&(o+=1))},((0,X.Z)(t)?eF:eB.Z)(t,"function"==typeof n?n:ez.Z),r===o&&e},tc=function(e,t){var n=e.hex?eZ(e.hex):eZ(e),r=n.toHsl(),o=n.toHsv(),i=n.toRgb(),a=n.toHex();return 0===r.s&&(r.h=t||0,o.h=t||0),{hsl:r,hex:"000000"===a&&0===i.a?"transparent":"#"+a,rgb:i,hsv:o,oldHue:e.h||t||r.h,source:e.source}},tu=function(e){if("transparent"===e)return!0;var t="#"===String(e).charAt(0)?1:0;return e.length!==4+t&&e.length<7+t&&eZ(e).isValid()},tf=function(e){if(!e)return"#fff";var t=tc(e);return"transparent"===t.hex?"rgba(0,0,0,0.4)":(299*t.rgb.r+587*t.rgb.g+114*t.rgb.b)/1e3>=128?"#000":"#fff"},td=function(e,t){return eZ(t+" ("+e.replace("\xb0","")+")")._ok},tp=Object.assign||function(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:"span";return function(n){function r(){!function(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}(this,r);for(var e,t,n,o=arguments.length,i=Array(o),a=0;a1&&(e.a=1),n.props.onChange({h:n.props.hsl.h,s:n.props.hsl.s,l:n.props.hsl.l,a:Math.round(100*e.a)/100,source:"rgb"},t)):(e.h||e.s||e.l)&&("string"==typeof e.s&&e.s.includes("%")&&(e.s=e.s.replace("%","")),"string"==typeof e.l&&e.l.includes("%")&&(e.l=e.l.replace("%","")),1==e.s?e.s=.01:1==e.l&&(e.l=.01),n.props.onChange({h:e.h||n.props.hsl.h,s:Number(tV(e.s)?n.props.hsl.s:e.s),l:Number(tV(e.l)?n.props.hsl.l:e.l),source:"hsl"},t))},n.showHighlight=function(e){e.currentTarget.style.background="#eee"},n.hideHighlight=function(e){e.currentTarget.style.background="transparent"},1!==e.hsl.a&&"hex"===e.view?n.state={view:"rgb"}:n.state={view:e.view},n}return!function(e,t){if("function"!=typeof t&&null!==t)throw TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),tK(t,[{key:"render",value:function(){var e=this,t=(0,s.ZP)({default:{wrap:{paddingTop:"16px",display:"flex"},fields:{flex:"1",display:"flex",marginLeft:"-6px"},field:{paddingLeft:"6px",width:"100%"},alpha:{paddingLeft:"6px",width:"100%"},toggle:{width:"32px",textAlign:"right",position:"relative"},icon:{marginRight:"-4px",marginTop:"12px",cursor:"pointer",position:"relative"},iconHighlight:{position:"absolute",width:"24px",height:"28px",background:"#eee",borderRadius:"4px",top:"10px",left:"12px",display:"none"},input:{fontSize:"11px",color:"#333",width:"100%",borderRadius:"2px",border:"none",boxShadow:"inset 0 0 0 1px #dadada",height:"21px",textAlign:"center"},label:{textTransform:"uppercase",fontSize:"11px",lineHeight:"11px",color:"#969696",textAlign:"center",display:"block",marginTop:"12px"},svg:{fill:"#333",width:"24px",height:"24px",border:"1px transparent solid",borderRadius:"5px"}},disableAlpha:{alpha:{display:"none"}}},this.props,this.state),n=void 0;return"hex"===this.state.view?n=l.createElement("div",{style:t.fields,className:"flexbox-fix"},l.createElement("div",{style:t.field},l.createElement(E,{style:{input:t.input,label:t.label},label:"hex",value:this.props.hex,onChange:this.handleChange}))):"rgb"===this.state.view?n=l.createElement("div",{style:t.fields,className:"flexbox-fix"},l.createElement("div",{style:t.field},l.createElement(E,{style:{input:t.input,label:t.label},label:"r",value:this.props.rgb.r,onChange:this.handleChange})),l.createElement("div",{style:t.field},l.createElement(E,{style:{input:t.input,label:t.label},label:"g",value:this.props.rgb.g,onChange:this.handleChange})),l.createElement("div",{style:t.field},l.createElement(E,{style:{input:t.input,label:t.label},label:"b",value:this.props.rgb.b,onChange:this.handleChange})),l.createElement("div",{style:t.alpha},l.createElement(E,{style:{input:t.input,label:t.label},label:"a",value:this.props.rgb.a,arrowOffset:.01,onChange:this.handleChange}))):"hsl"===this.state.view&&(n=l.createElement("div",{style:t.fields,className:"flexbox-fix"},l.createElement("div",{style:t.field},l.createElement(E,{style:{input:t.input,label:t.label},label:"h",value:Math.round(this.props.hsl.h),onChange:this.handleChange})),l.createElement("div",{style:t.field},l.createElement(E,{style:{input:t.input,label:t.label},label:"s",value:Math.round(100*this.props.hsl.s)+"%",onChange:this.handleChange})),l.createElement("div",{style:t.field},l.createElement(E,{style:{input:t.input,label:t.label},label:"l",value:Math.round(100*this.props.hsl.l)+"%",onChange:this.handleChange})),l.createElement("div",{style:t.alpha},l.createElement(E,{style:{input:t.input,label:t.label},label:"a",value:this.props.hsl.a,arrowOffset:.01,onChange:this.handleChange})))),l.createElement("div",{style:t.wrap,className:"flexbox-fix"},n,l.createElement("div",{style:t.toggle},l.createElement("div",{style:t.icon,onClick:this.toggleViews,ref:function(t){return e.icon=t}},l.createElement(tG.Z,{style:t.svg,onMouseOver:this.showHighlight,onMouseEnter:this.showHighlight,onMouseOut:this.hideHighlight}))))}}],[{key:"getDerivedStateFromProps",value:function(e,t){return 1!==e.hsl.a&&"hex"===t.view?{view:"rgb"}:null}}]),t}(l.Component);tY.defaultProps={view:"hex"};var tX=function(){var e=(0,s.ZP)({default:{picker:{width:"12px",height:"12px",borderRadius:"6px",transform:"translate(-6px, -1px)",backgroundColor:"rgb(248, 248, 248)",boxShadow:"0 1px 4px 0 rgba(0, 0, 0, 0.37)"}}});return l.createElement("div",{style:e.picker})},tJ=function(){var e=(0,s.ZP)({default:{picker:{width:"12px",height:"12px",borderRadius:"6px",boxShadow:"inset 0 0 0 1px #fff",transform:"translate(-6px, -6px)"}}});return l.createElement("div",{style:e.picker})},tQ=function(e){var t=e.width,n=e.onChange,r=e.disableAlpha,o=e.rgb,i=e.hsl,a=e.hsv,c=e.hex,u=e.renderers,f=e.styles,d=e.className,p=e.defaultView,g=(0,s.ZP)(ev({default:{picker:{width:t,background:"#fff",borderRadius:"2px",boxShadow:"0 0 2px rgba(0,0,0,.3), 0 4px 8px rgba(0,0,0,.3)",boxSizing:"initial",fontFamily:"Menlo"},saturation:{width:"100%",paddingBottom:"55%",position:"relative",borderRadius:"2px 2px 0 0",overflow:"hidden"},Saturation:{radius:"2px 2px 0 0"},body:{padding:"16px 16px 12px"},controls:{display:"flex"},color:{width:"32px"},swatch:{marginTop:"6px",width:"16px",height:"16px",borderRadius:"8px",position:"relative",overflow:"hidden"},active:{absolute:"0px 0px 0px 0px",borderRadius:"8px",boxShadow:"inset 0 0 0 1px rgba(0,0,0,.1)",background:"rgba("+o.r+", "+o.g+", "+o.b+", "+o.a+")",zIndex:"2"},toggles:{flex:"1"},hue:{height:"10px",position:"relative",marginBottom:"8px"},Hue:{radius:"2px"},alpha:{height:"10px",position:"relative"},Alpha:{radius:"2px"}},disableAlpha:{color:{width:"22px"},alpha:{display:"none"},hue:{marginBottom:"0px"},swatch:{width:"10px",height:"10px",marginTop:"0px"}}},void 0===f?{}:f),{disableAlpha:r});return l.createElement("div",{style:g.picker,className:"chrome-picker "+(void 0===d?"":d)},l.createElement("div",{style:g.saturation},l.createElement(eD,{style:g.Saturation,hsl:i,hsv:a,pointer:tJ,onChange:n})),l.createElement("div",{style:g.body},l.createElement("div",{style:g.controls,className:"flexbox-fix"},l.createElement("div",{style:g.color},l.createElement("div",{style:g.swatch},l.createElement("div",{style:g.active}),l.createElement(h,{renderers:u}))),l.createElement("div",{style:g.toggles},l.createElement("div",{style:g.hue},l.createElement(O,{style:g.Hue,hsl:i,pointer:tX,onChange:n})),l.createElement("div",{style:g.alpha},l.createElement(v,{style:g.Alpha,rgb:o,hsl:i,pointer:tX,renderers:u,onChange:n})))),l.createElement(tY,{rgb:o,hsl:i,hex:c,view:p,onChange:n,disableAlpha:r})))};tQ.propTypes={width:A().oneOfType([A().string,A().number]),disableAlpha:A().bool,styles:A().object,defaultView:A().oneOf(["hex","rgb","hsl"])},tQ.defaultProps={width:225,disableAlpha:!1,styles:{}},tg(tQ);var t0=function(e){var t=e.color,n=e.onClick,r=e.onSwatchHover,o=e.active,i=(0,s.ZP)({default:{color:{background:t,width:"15px",height:"15px",float:"left",marginRight:"5px",marginBottom:"5px",position:"relative",cursor:"pointer"},dot:{absolute:"5px 5px 5px 5px",background:tf(t),borderRadius:"50%",opacity:"0"}},active:{dot:{opacity:"1"}},"color-#FFFFFF":{color:{boxShadow:"inset 0 0 0 1px #ddd"},dot:{background:"#000"}},transparent:{dot:{background:"#000"}}},{active:o,"color-#FFFFFF":"#FFFFFF"===t,transparent:"transparent"===t});return l.createElement(tx,{style:i.color,color:t,onClick:void 0===n?function(){}:n,onHover:r,focusStyle:{boxShadow:"0 0 4px "+t}},l.createElement("div",{style:i.dot}))},t1=function(e){var t=e.hex,n=e.rgb,r=e.onChange,o=(0,s.ZP)({default:{fields:{display:"flex",paddingBottom:"6px",paddingRight:"5px",position:"relative"},active:{position:"absolute",top:"6px",left:"5px",height:"9px",width:"9px",background:t},HEXwrap:{flex:"6",position:"relative"},HEXinput:{width:"80%",padding:"0px",paddingLeft:"20%",border:"none",outline:"none",background:"none",fontSize:"12px",color:"#333",height:"16px"},HEXlabel:{display:"none"},RGBwrap:{flex:"3",position:"relative"},RGBinput:{width:"70%",padding:"0px",paddingLeft:"30%",border:"none",outline:"none",background:"none",fontSize:"12px",color:"#333",height:"16px"},RGBlabel:{position:"absolute",top:"3px",left:"0px",lineHeight:"16px",textTransform:"uppercase",fontSize:"12px",color:"#999"}}}),i=function(e,t){e.r||e.g||e.b?r({r:e.r||n.r,g:e.g||n.g,b:e.b||n.b,source:"rgb"},t):r({hex:e.hex,source:"hex"},t)};return l.createElement("div",{style:o.fields,className:"flexbox-fix"},l.createElement("div",{style:o.active}),l.createElement(E,{style:{wrap:o.HEXwrap,input:o.HEXinput,label:o.HEXlabel},label:"hex",value:t,onChange:i}),l.createElement(E,{style:{wrap:o.RGBwrap,input:o.RGBinput,label:o.RGBlabel},label:"r",value:n.r,onChange:i}),l.createElement(E,{style:{wrap:o.RGBwrap,input:o.RGBinput,label:o.RGBlabel},label:"g",value:n.g,onChange:i}),l.createElement(E,{style:{wrap:o.RGBwrap,input:o.RGBinput,label:o.RGBlabel},label:"b",value:n.b,onChange:i}))},t2=function(e){var t=e.onChange,n=e.onSwatchHover,r=e.colors,o=e.hex,i=e.rgb,a=e.styles,c=void 0===a?{}:a,u=e.className,f=(0,s.ZP)(ev({default:{Compact:{background:"#f6f6f6",radius:"4px"},compact:{paddingTop:"5px",paddingLeft:"5px",boxSizing:"initial",width:"240px"},clear:{clear:"both"}}},c)),d=function(e,n){e.hex?tu(e.hex)&&t({hex:e.hex,source:"hex"},n):t(e,n)};return l.createElement(ey,{style:f.Compact,styles:c},l.createElement("div",{style:f.compact,className:"compact-picker "+(void 0===u?"":u)},l.createElement("div",null,(0,tS.Z)(r,function(e){return l.createElement(t0,{key:e,color:e,active:e.toLowerCase()===o,onClick:d,onSwatchHover:n})}),l.createElement("div",{style:f.clear})),l.createElement(t1,{hex:o,rgb:i,onChange:d})))};t2.propTypes={colors:A().arrayOf(A().string),styles:A().object},t2.defaultProps={colors:["#4D4D4D","#999999","#FFFFFF","#F44E3B","#FE9200","#FCDC00","#DBDF00","#A4DD00","#68CCCA","#73D8FF","#AEA1FF","#FDA1FF","#333333","#808080","#cccccc","#D33115","#E27300","#FCC400","#B0BC00","#68BC00","#16A5A5","#009CE0","#7B64FF","#FA28FF","#000000","#666666","#B3B3B3","#9F0500","#C45100","#FB9E00","#808900","#194D33","#0C797D","#0062B1","#653294","#AB149E"],styles:{}},tg(t2);var t5=(0,s.tz)(function(e){var t=e.hover,n=e.color,r=e.onClick,o=e.onSwatchHover,i={position:"relative",zIndex:"2",outline:"2px solid #fff",boxShadow:"0 0 5px 2px rgba(0,0,0,0.25)"},a=(0,s.ZP)({default:{swatch:{width:"25px",height:"25px",fontSize:"0"}},hover:{swatch:i}},{hover:t});return l.createElement("div",{style:a.swatch},l.createElement(tx,{color:n,onClick:r,onHover:o,focusStyle:i}))}),t3=function(e){var t=e.width,n=e.colors,r=e.onChange,o=e.onSwatchHover,i=e.triangle,a=e.styles,c=e.className,u=(0,s.ZP)(ev({default:{card:{width:t,background:"#fff",border:"1px solid rgba(0,0,0,0.2)",boxShadow:"0 3px 12px rgba(0,0,0,0.15)",borderRadius:"4px",position:"relative",padding:"5px",display:"flex",flexWrap:"wrap"},triangle:{position:"absolute",border:"7px solid transparent",borderBottomColor:"#fff"},triangleShadow:{position:"absolute",border:"8px solid transparent",borderBottomColor:"rgba(0,0,0,0.15)"}},"hide-triangle":{triangle:{display:"none"},triangleShadow:{display:"none"}},"top-left-triangle":{triangle:{top:"-14px",left:"10px"},triangleShadow:{top:"-16px",left:"9px"}},"top-right-triangle":{triangle:{top:"-14px",right:"10px"},triangleShadow:{top:"-16px",right:"9px"}},"bottom-left-triangle":{triangle:{top:"35px",left:"10px",transform:"rotate(180deg)"},triangleShadow:{top:"37px",left:"9px",transform:"rotate(180deg)"}},"bottom-right-triangle":{triangle:{top:"35px",right:"10px",transform:"rotate(180deg)"},triangleShadow:{top:"37px",right:"9px",transform:"rotate(180deg)"}}},void 0===a?{}:a),{"hide-triangle":"hide"===i,"top-left-triangle":"top-left"===i,"top-right-triangle":"top-right"===i,"bottom-left-triangle":"bottom-left"===i,"bottom-right-triangle":"bottom-right"===i}),f=function(e,t){return r({hex:e,source:"hex"},t)};return l.createElement("div",{style:u.card,className:"github-picker "+(void 0===c?"":c)},l.createElement("div",{style:u.triangleShadow}),l.createElement("div",{style:u.triangle}),(0,tS.Z)(n,function(e){return l.createElement(t5,{color:e,key:e,onClick:f,onSwatchHover:o})}))};t3.propTypes={width:A().oneOfType([A().string,A().number]),colors:A().arrayOf(A().string),triangle:A().oneOf(["hide","top-left","top-right","bottom-left","bottom-right"]),styles:A().object},t3.defaultProps={width:200,colors:["#B80000","#DB3E00","#FCCB00","#008B02","#006B76","#1273DE","#004DCF","#5300EB","#EB9694","#FAD0C3","#FEF3BD","#C1E1C5","#BEDADC","#C4DEF6","#BED3F3","#D4C4FB"],triangle:"top-left",styles:{}},tg(t3);var t6=Object.assign||function(e){for(var t=1;t.5});return l.createElement("div",{style:n.picker})},t7=function(){var e=(0,s.ZP)({default:{triangle:{width:0,height:0,borderStyle:"solid",borderWidth:"4px 0 4px 6px",borderColor:"transparent transparent transparent #fff",position:"absolute",top:"1px",left:"1px"},triangleBorder:{width:0,height:0,borderStyle:"solid",borderWidth:"5px 0 5px 8px",borderColor:"transparent transparent transparent #555"},left:{Extend:"triangleBorder",transform:"translate(-13px, -4px)"},leftInside:{Extend:"triangle",transform:"translate(-8px, -5px)"},right:{Extend:"triangleBorder",transform:"translate(20px, -14px) rotate(180deg)"},rightInside:{Extend:"triangle",transform:"translate(-8px, -5px)"}}});return l.createElement("div",{style:e.pointer},l.createElement("div",{style:e.left},l.createElement("div",{style:e.leftInside})),l.createElement("div",{style:e.right},l.createElement("div",{style:e.rightInside})))},ne=function(e){var t=e.onClick,n=e.label,r=e.children,o=e.active,i=(0,s.ZP)({default:{button:{backgroundImage:"linear-gradient(-180deg, #FFFFFF 0%, #E6E6E6 100%)",border:"1px solid #878787",borderRadius:"2px",height:"20px",boxShadow:"0 1px 0 0 #EAEAEA",fontSize:"14px",color:"#000",lineHeight:"20px",textAlign:"center",marginBottom:"10px",cursor:"pointer"}},active:{button:{boxShadow:"0 0 0 1px #878787"}}},{active:o});return l.createElement("div",{style:i.button,onClick:t},n||r)},nt=function(e){var t=e.rgb,n=e.currentColor,r=(0,s.ZP)({default:{swatches:{border:"1px solid #B3B3B3",borderBottom:"1px solid #F0F0F0",marginBottom:"2px",marginTop:"1px"},new:{height:"34px",background:"rgb("+t.r+","+t.g+", "+t.b+")",boxShadow:"inset 1px 0 0 #000, inset -1px 0 0 #000, inset 0 1px 0 #000"},current:{height:"34px",background:n,boxShadow:"inset 1px 0 0 #000, inset -1px 0 0 #000, inset 0 -1px 0 #000"},label:{fontSize:"14px",color:"#000",textAlign:"center"}}});return l.createElement("div",null,l.createElement("div",{style:r.label},"new"),l.createElement("div",{style:r.swatches},l.createElement("div",{style:r.new}),l.createElement("div",{style:r.current})),l.createElement("div",{style:r.label},"current"))},nn=function(){function e(e,t){for(var n=0;n100&&(e.a=100),e.a/=100,t({h:r.h,s:r.s,l:r.l,a:e.a,source:"rgb"},o))};return l.createElement("div",{style:a.fields,className:"flexbox-fix"},l.createElement("div",{style:a.double},l.createElement(E,{style:{input:a.input,label:a.label},label:"hex",value:o.replace("#",""),onChange:c})),l.createElement("div",{style:a.single},l.createElement(E,{style:{input:a.input,label:a.label},label:"r",value:n.r,onChange:c,dragLabel:"true",dragMax:"255"})),l.createElement("div",{style:a.single},l.createElement(E,{style:{input:a.input,label:a.label},label:"g",value:n.g,onChange:c,dragLabel:"true",dragMax:"255"})),l.createElement("div",{style:a.single},l.createElement(E,{style:{input:a.input,label:a.label},label:"b",value:n.b,onChange:c,dragLabel:"true",dragMax:"255"})),l.createElement("div",{style:a.alpha},l.createElement(E,{style:{input:a.input,label:a.label},label:"a",value:Math.round(100*n.a),onChange:c,dragLabel:"true",dragMax:"100"})))},ni=Object.assign||function(e){for(var t=1;tMath.abs(n.l-.8)&&.1>Math.abs(n.s-.5),onClick:t,first:!0})),l.createElement("div",{style:r.swatch},l.createElement(nc,{hsl:n,offset:".65",active:.1>Math.abs(n.l-.65)&&.1>Math.abs(n.s-.5),onClick:t})),l.createElement("div",{style:r.swatch},l.createElement(nc,{hsl:n,offset:".50",active:.1>Math.abs(n.l-.5)&&.1>Math.abs(n.s-.5),onClick:t})),l.createElement("div",{style:r.swatch},l.createElement(nc,{hsl:n,offset:".35",active:.1>Math.abs(n.l-.35)&&.1>Math.abs(n.s-.5),onClick:t})),l.createElement("div",{style:r.swatch},l.createElement(nc,{hsl:n,offset:".20",active:.1>Math.abs(n.l-.2)&&.1>Math.abs(n.s-.5),onClick:t,last:!0})),l.createElement("div",{style:r.clear}))},nf=function(e){var t=e.hsl,n=e.onChange,r=e.pointer,o=e.styles,i=e.className,a=(0,s.ZP)(ev({default:{hue:{height:"12px",position:"relative"},Hue:{radius:"2px"}}},void 0===o?{}:o));return l.createElement("div",{style:a.wrap||{},className:"slider-picker "+(void 0===i?"":i)},l.createElement("div",{style:a.hue},l.createElement(O,{style:a.Hue,hsl:t,pointer:r,onChange:n})),l.createElement("div",{style:a.swatches},l.createElement(nu,{hsl:t,onClick:n})))};nf.propTypes={styles:A().object},nf.defaultProps={pointer:function(){var e=(0,s.ZP)({default:{picker:{width:"14px",height:"14px",borderRadius:"6px",transform:"translate(-7px, -1px)",backgroundColor:"rgb(248, 248, 248)",boxShadow:"0 1px 4px 0 rgba(0, 0, 0, 0.37)"}}});return l.createElement("div",{style:e.picker})},styles:{}},tg(nf);var nd=n(29872),np=function(e){var t=e.color,n=e.onClick,r=e.onSwatchHover,o=e.first,i=e.last,a=e.active,c=(0,s.ZP)({default:{color:{width:"40px",height:"24px",cursor:"pointer",background:t,marginBottom:"1px"},check:{color:tf(t),marginLeft:"8px",display:"none"}},first:{color:{overflow:"hidden",borderRadius:"2px 2px 0 0"}},last:{color:{overflow:"hidden",borderRadius:"0 0 2px 2px"}},active:{check:{display:"block"}},"color-#FFFFFF":{color:{boxShadow:"inset 0 0 0 1px #ddd"},check:{color:"#333"}},transparent:{check:{color:"#333"}}},{first:o,last:i,active:a,"color-#FFFFFF":"#FFFFFF"===t,transparent:"transparent"===t});return l.createElement(tx,{color:t,style:c.color,onClick:void 0===n?function(){}:n,onHover:r,focusStyle:{boxShadow:"0 0 4px "+t}},l.createElement("div",{style:c.check},l.createElement(nd.Z,null)))},nh=function(e){var t=e.onClick,n=e.onSwatchHover,r=e.group,o=e.active,i=(0,s.ZP)({default:{group:{paddingBottom:"10px",width:"40px",float:"left",marginRight:"10px"}}});return l.createElement("div",{style:i.group},(0,tS.Z)(r,function(e,i){return l.createElement(np,{key:e,color:e,active:e.toLowerCase()===o,first:0===i,last:i===r.length-1,onClick:t,onSwatchHover:n})}))},ng=function(e){var t=e.width,n=e.height,r=e.onChange,o=e.onSwatchHover,i=e.colors,a=e.hex,c=e.styles,u=e.className,f=(0,s.ZP)(ev({default:{picker:{width:t,height:n},overflow:{height:n,overflowY:"scroll"},body:{padding:"16px 0 6px 16px"},clear:{clear:"both"}}},void 0===c?{}:c)),d=function(e,t){return r({hex:e,source:"hex"},t)};return l.createElement("div",{style:f.picker,className:"swatches-picker "+(void 0===u?"":u)},l.createElement(ey,null,l.createElement("div",{style:f.overflow},l.createElement("div",{style:f.body},(0,tS.Z)(i,function(e){return l.createElement(nh,{key:e.toString(),group:e,active:a,onClick:d,onSwatchHover:o})}),l.createElement("div",{style:f.clear})))))};ng.propTypes={width:A().oneOfType([A().string,A().number]),height:A().oneOfType([A().string,A().number]),colors:A().arrayOf(A().arrayOf(A().string)),styles:A().object},ng.defaultProps={width:320,height:240,colors:[[tO["900"],tO["700"],tO["500"],tO["300"],tO["100"]],[tC["900"],tC["700"],tC["500"],tC["300"],tC["100"]],[tA["900"],tA["700"],tA["500"],tA["300"],tA["100"]],[tN["900"],tN["700"],tN["500"],tN["300"],tN["100"]],[tR["900"],tR["700"],tR["500"],tR["300"],tR["100"]],[tT["900"],tT["700"],tT["500"],tT["300"],tT["100"]],[tP["900"],tP["700"],tP["500"],tP["300"],tP["100"]],[tM["900"],tM["700"],tM["500"],tM["300"],tM["100"]],[tj["900"],tj["700"],tj["500"],tj["300"],tj["100"]],["#194D33",tL["700"],tL["500"],tL["300"],tL["100"]],[tI["900"],tI["700"],tI["500"],tI["300"],tI["100"]],[tD["900"],tD["700"],tD["500"],tD["300"],tD["100"]],[tF["900"],tF["700"],tF["500"],tF["300"],tF["100"]],[tB["900"],tB["700"],tB["500"],tB["300"],tB["100"]],[tz["900"],tz["700"],tz["500"],tz["300"],tz["100"]],[t$["900"],t$["700"],t$["500"],t$["300"],t$["100"]],[tU["900"],tU["700"],tU["500"],tU["300"],tU["100"]],[tH["900"],tH["700"],tH["500"],tH["300"],tH["100"]],["#000000","#525252","#969696","#D9D9D9","#FFFFFF"]],styles:{}},tg(ng);var nm=function(e){var t=e.onChange,n=e.onSwatchHover,r=e.hex,o=e.colors,i=e.width,a=e.triangle,c=e.styles,u=e.className,f=(0,s.ZP)(ev({default:{card:{width:i,background:"#fff",border:"0 solid rgba(0,0,0,0.25)",boxShadow:"0 1px 4px rgba(0,0,0,0.25)",borderRadius:"4px",position:"relative"},body:{padding:"15px 9px 9px 15px"},label:{fontSize:"18px",color:"#fff"},triangle:{width:"0px",height:"0px",borderStyle:"solid",borderWidth:"0 9px 10px 9px",borderColor:"transparent transparent #fff transparent",position:"absolute"},triangleShadow:{width:"0px",height:"0px",borderStyle:"solid",borderWidth:"0 9px 10px 9px",borderColor:"transparent transparent rgba(0,0,0,.1) transparent",position:"absolute"},hash:{background:"#F0F0F0",height:"30px",width:"30px",borderRadius:"4px 0 0 4px",float:"left",color:"#98A1A4",display:"flex",alignItems:"center",justifyContent:"center"},input:{width:"100px",fontSize:"14px",color:"#666",border:"0px",outline:"none",height:"28px",boxShadow:"inset 0 0 0 1px #F0F0F0",boxSizing:"content-box",borderRadius:"0 4px 4px 0",float:"left",paddingLeft:"8px"},swatch:{width:"30px",height:"30px",float:"left",borderRadius:"4px",margin:"0 6px 6px 0"},clear:{clear:"both"}},"hide-triangle":{triangle:{display:"none"},triangleShadow:{display:"none"}},"top-left-triangle":{triangle:{top:"-10px",left:"12px"},triangleShadow:{top:"-11px",left:"12px"}},"top-right-triangle":{triangle:{top:"-10px",right:"12px"},triangleShadow:{top:"-11px",right:"12px"}}},void 0===c?{}:c),{"hide-triangle":"hide"===a,"top-left-triangle":"top-left"===a,"top-right-triangle":"top-right"===a}),d=function(e,n){tu(e)&&t({hex:e,source:"hex"},n)};return l.createElement("div",{style:f.card,className:"twitter-picker "+(void 0===u?"":u)},l.createElement("div",{style:f.triangleShadow}),l.createElement("div",{style:f.triangle}),l.createElement("div",{style:f.body},(0,tS.Z)(o,function(e,t){return l.createElement(tx,{key:t,color:e,hex:e,style:f.swatch,onClick:d,onHover:n,focusStyle:{boxShadow:"0 0 4px "+e}})}),l.createElement("div",{style:f.hash},"#"),l.createElement(E,{label:null,style:{input:f.input},value:r.replace("#",""),onChange:d}),l.createElement("div",{style:f.clear})))};nm.propTypes={width:A().oneOfType([A().string,A().number]),triangle:A().oneOf(["hide","top-left","top-right"]),colors:A().arrayOf(A().string),styles:A().object},nm.defaultProps={width:276,colors:["#FF6900","#FCB900","#7BDCB5","#00D084","#8ED1FC","#0693E3","#ABB8C3","#EB144C","#F78DA7","#9900EF"],triangle:"top-left",styles:{}};var nb=tg(nm),nv=function(e){var t=(0,s.ZP)({default:{picker:{width:"20px",height:"20px",borderRadius:"22px",border:"2px #fff solid",transform:"translate(-12px, -13px)",background:"hsl("+Math.round(e.hsl.h)+", "+Math.round(100*e.hsl.s)+"%, "+Math.round(100*e.hsl.l)+"%)"}}});return l.createElement("div",{style:t.picker})};nv.propTypes={hsl:A().shape({h:A().number,s:A().number,l:A().number,a:A().number})},nv.defaultProps={hsl:{a:1,h:249.94,l:.2,s:.5}};var ny=function(e){var t=(0,s.ZP)({default:{picker:{width:"20px",height:"20px",borderRadius:"22px",transform:"translate(-10px, -7px)",background:"hsl("+Math.round(e.hsl.h)+", 100%, 50%)",border:"2px white solid"}}});return l.createElement("div",{style:t.picker})};ny.propTypes={hsl:A().shape({h:A().number,s:A().number,l:A().number,a:A().number})},ny.defaultProps={hsl:{a:1,h:249.94,l:.2,s:.5}};var nx=function(e){var t=e.onChange,n=e.rgb,r=e.hsl,o=e.hex,i=e.hsv,a=function(e,n){if(e.hex)tu(e.hex)&&t({hex:e.hex,source:"hex"},n);else if(e.rgb){var r=e.rgb.split(",");td(e.rgb,"rgb")&&t({r:r[0],g:r[1],b:r[2],a:1,source:"rgb"},n)}else if(e.hsv){var o=e.hsv.split(",");td(e.hsv,"hsv")&&(o[2]=o[2].replace("%",""),o[1]=o[1].replace("%",""),o[0]=o[0].replace("\xb0",""),1==o[1]?o[1]=.01:1==o[2]&&(o[2]=.01),t({h:Number(o[0]),s:Number(o[1]),v:Number(o[2]),source:"hsv"},n))}else if(e.hsl){var i=e.hsl.split(",");td(e.hsl,"hsl")&&(i[2]=i[2].replace("%",""),i[1]=i[1].replace("%",""),i[0]=i[0].replace("\xb0",""),1==d[1]?d[1]=.01:1==d[2]&&(d[2]=.01),t({h:Number(i[0]),s:Number(i[1]),v:Number(i[2]),source:"hsl"},n))}},c=(0,s.ZP)({default:{wrap:{display:"flex",height:"100px",marginTop:"4px"},fields:{width:"100%"},column:{paddingTop:"10px",display:"flex",justifyContent:"space-between"},double:{padding:"0px 4.4px",boxSizing:"border-box"},input:{width:"100%",height:"38px",boxSizing:"border-box",padding:"4px 10% 3px",textAlign:"center",border:"1px solid #dadce0",fontSize:"11px",textTransform:"lowercase",borderRadius:"5px",outline:"none",fontFamily:"Roboto,Arial,sans-serif"},input2:{height:"38px",width:"100%",border:"1px solid #dadce0",boxSizing:"border-box",fontSize:"11px",textTransform:"lowercase",borderRadius:"5px",outline:"none",paddingLeft:"10px",fontFamily:"Roboto,Arial,sans-serif"},label:{textAlign:"center",fontSize:"12px",background:"#fff",position:"absolute",textTransform:"uppercase",color:"#3c4043",width:"35px",top:"-6px",left:"0",right:"0",marginLeft:"auto",marginRight:"auto",fontFamily:"Roboto,Arial,sans-serif"},label2:{left:"10px",textAlign:"center",fontSize:"12px",background:"#fff",position:"absolute",textTransform:"uppercase",color:"#3c4043",width:"32px",top:"-6px",fontFamily:"Roboto,Arial,sans-serif"},single:{flexGrow:"1",margin:"0px 4.4px"}}}),u=n.r+", "+n.g+", "+n.b,f=Math.round(r.h)+"\xb0, "+Math.round(100*r.s)+"%, "+Math.round(100*r.l)+"%",d=Math.round(i.h)+"\xb0, "+Math.round(100*i.s)+"%, "+Math.round(100*i.v)+"%";return l.createElement("div",{style:c.wrap,className:"flexbox-fix"},l.createElement("div",{style:c.fields},l.createElement("div",{style:c.double},l.createElement(E,{style:{input:c.input,label:c.label},label:"hex",value:o,onChange:a})),l.createElement("div",{style:c.column},l.createElement("div",{style:c.single},l.createElement(E,{style:{input:c.input2,label:c.label2},label:"rgb",value:u,onChange:a})),l.createElement("div",{style:c.single},l.createElement(E,{style:{input:c.input2,label:c.label2},label:"hsv",value:d,onChange:a})),l.createElement("div",{style:c.single},l.createElement(E,{style:{input:c.input2,label:c.label2},label:"hsl",value:f,onChange:a})))))},nw=function(e){var t=e.width,n=e.onChange,r=e.rgb,o=e.hsl,i=e.hsv,a=e.hex,c=e.header,u=e.styles,f=e.className,d=(0,s.ZP)(ev({default:{picker:{width:t,background:"#fff",border:"1px solid #dfe1e5",boxSizing:"initial",display:"flex",flexWrap:"wrap",borderRadius:"8px 8px 0px 0px"},head:{height:"57px",width:"100%",paddingTop:"16px",paddingBottom:"16px",paddingLeft:"16px",fontSize:"20px",boxSizing:"border-box",fontFamily:"Roboto-Regular,HelveticaNeue,Arial,sans-serif"},saturation:{width:"70%",padding:"0px",position:"relative",overflow:"hidden"},swatch:{width:"30%",height:"228px",padding:"0px",background:"rgba("+r.r+", "+r.g+", "+r.b+", 1)",position:"relative",overflow:"hidden"},body:{margin:"auto",width:"95%"},controls:{display:"flex",boxSizing:"border-box",height:"52px",paddingTop:"22px"},color:{width:"32px"},hue:{height:"8px",position:"relative",margin:"0px 16px 0px 16px",width:"100%"},Hue:{radius:"2px"}}},void 0===u?{}:u));return l.createElement("div",{style:d.picker,className:"google-picker "+(void 0===f?"":f)},l.createElement("div",{style:d.head},c),l.createElement("div",{style:d.swatch}),l.createElement("div",{style:d.saturation},l.createElement(eD,{hsl:o,hsv:i,pointer:nv,onChange:n})),l.createElement("div",{style:d.body},l.createElement("div",{style:d.controls,className:"flexbox-fix"},l.createElement("div",{style:d.hue},l.createElement(O,{style:d.Hue,hsl:o,radius:"4px",pointer:ny,onChange:n}))),l.createElement(nx,{rgb:r,hsl:o,hex:a,hsv:i,onChange:n})))};nw.propTypes={width:A().oneOfType([A().string,A().number]),styles:A().object,header:A().string},nw.defaultProps={width:652,styles:{},header:"Color picker"},tg(nw)},58467:function(e,t,n){"use strict";function r(e){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}Object.defineProperty(t,"__esModule",{value:!0}),t.CopyToClipboard=void 0;var o=l(n(86006)),i=l(n(27652)),a=["text","onCopy","options","children"];function l(e){return e&&e.__esModule?e:{default:e}}function s(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable})),n.push.apply(n,r)}return n}function c(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}(e,a),r=o.default.Children.only(t);return o.default.cloneElement(r,c(c({},n),{},{onClick:this.onClick}))}}],u(g.prototype,n),l&&u(g,l),Object.defineProperty(g,"prototype",{writable:!1}),g}(o.default.PureComponent);t.CopyToClipboard=g,h(g,"defaultProps",{onCopy:void 0,options:void 0})},10688:function(e,t,n){"use strict";var r=n(58467).CopyToClipboard;r.CopyToClipboard=r,e.exports=r},83393:function(e,t,n){"use strict";n.d(t,{Ybf:function(){return i},jRj:function(){return o}});var r=n(83270);function o(e){return(0,r.w_)({tag:"svg",attr:{viewBox:"0 0 24 24",fill:"none",stroke:"currentColor",strokeWidth:"2",strokeLinecap:"round",strokeLinejoin:"round"},child:[{tag:"circle",attr:{cx:"11",cy:"11",r:"8"}},{tag:"line",attr:{x1:"21",y1:"21",x2:"16.65",y2:"16.65"}}]})(e)}function i(e){return(0,r.w_)({tag:"svg",attr:{viewBox:"0 0 24 24",fill:"none",stroke:"currentColor",strokeWidth:"2",strokeLinecap:"round",strokeLinejoin:"round"},child:[{tag:"polyline",attr:{points:"3 6 5 6 21 6"}},{tag:"path",attr:{d:"M19 6v14a2 2 0 0 1-2 2H7a2 2 0 0 1-2-2V6m3 0V4a2 2 0 0 1 2-2h4a2 2 0 0 1 2 2v2"}},{tag:"line",attr:{x1:"10",y1:"11",x2:"10",y2:"17"}},{tag:"line",attr:{x1:"14",y1:"11",x2:"14",y2:"17"}}]})(e)}},83270:function(e,t,n){"use strict";n.d(t,{w_:function(){return s}});var r=n(86006),o={color:void 0,size:void 0,className:void 0,style:void 0,attr:void 0},i=r.createContext&&r.createContext(o),a=function(){return(a=Object.assign||function(e){for(var t,n=1,r=arguments.length;nt.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};function s(e){return function(t){return r.createElement(c,a({attr:a({},e.attr)},t),function e(t){return t&&t.map(function(t,n){return r.createElement(t.tag,a({key:n},t.attr),e(t.child))})}(e.child))}}function c(e){var t=function(t){var n,o=e.attr,i=e.size,s=e.title,c=l(e,["attr","size","title"]),u=i||t.size||"1em";return t.className&&(n=t.className),e.className&&(n=(n?n+" ":"")+e.className),r.createElement("svg",a({stroke:"currentColor",fill:"currentColor",strokeWidth:"0"},t.attr,o,c,{className:n,style:a(a({color:e.color||t.color},t.style),e.style),height:u,width:u,xmlns:"http://www.w3.org/2000/svg"}),s&&r.createElement("title",null,s),e.children)};return void 0!==i?r.createElement(i.Consumer,null,function(e){return t(e)}):t(o)}},29389:function(e,t){"use strict";/** - * @license React - * react-is.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var n,r=Symbol.for("react.element"),o=Symbol.for("react.portal"),i=Symbol.for("react.fragment"),a=Symbol.for("react.strict_mode"),l=Symbol.for("react.profiler"),s=Symbol.for("react.provider"),c=Symbol.for("react.context"),u=Symbol.for("react.server_context"),f=Symbol.for("react.forward_ref"),d=Symbol.for("react.suspense"),p=Symbol.for("react.suspense_list"),h=Symbol.for("react.memo"),g=Symbol.for("react.lazy"),m=Symbol.for("react.offscreen");function b(e){if("object"==typeof e&&null!==e){var t=e.$$typeof;switch(t){case r:switch(e=e.type){case i:case l:case a:case d:case p:return e;default:switch(e=e&&e.$$typeof){case u:case c:case f:case g:case h:case s:return e;default:return t}}case o:return t}}}n=Symbol.for("react.module.reference"),t.ContextConsumer=c,t.ContextProvider=s,t.Element=r,t.ForwardRef=f,t.Fragment=i,t.Lazy=g,t.Memo=h,t.Portal=o,t.Profiler=l,t.StrictMode=a,t.Suspense=d,t.SuspenseList=p,t.isAsyncMode=function(){return!1},t.isConcurrentMode=function(){return!1},t.isContextConsumer=function(e){return b(e)===c},t.isContextProvider=function(e){return b(e)===s},t.isElement=function(e){return"object"==typeof e&&null!==e&&e.$$typeof===r},t.isForwardRef=function(e){return b(e)===f},t.isFragment=function(e){return b(e)===i},t.isLazy=function(e){return b(e)===g},t.isMemo=function(e){return b(e)===h},t.isPortal=function(e){return b(e)===o},t.isProfiler=function(e){return b(e)===l},t.isStrictMode=function(e){return b(e)===a},t.isSuspense=function(e){return b(e)===d},t.isSuspenseList=function(e){return b(e)===p},t.isValidElementType=function(e){return"string"==typeof e||"function"==typeof e||e===i||e===l||e===a||e===d||e===p||e===m||"object"==typeof e&&null!==e&&(e.$$typeof===g||e.$$typeof===h||e.$$typeof===s||e.$$typeof===c||e.$$typeof===f||e.$$typeof===n||void 0!==e.getModuleId)},t.typeOf=b},59605:function(e,t,n){"use strict";e.exports=n(29389)},30458:function(e,t,n){"use strict";let r=n(86006),o=function(e){let t="";return"string"==typeof e?t=e:"number"==typeof e?t=e.toString():e instanceof Array?e.forEach(function(e){t+=o(e)}):(0,r.isValidElement)(e)&&(t+=o(e.props.children)),t};t.Z=o},61555:function(e,t,n){"use strict";n.d(t,{Av:function(){return a},pF:function(){return r},xv:function(){return i},zi:function(){return o}});var r="right-scroll-bar-position",o="width-before-scroll-bar",i="with-scroll-bars-hidden",a="--removed-body-scroll-bar-size"},90450:function(e,t,n){"use strict";n.d(t,{jp:function(){return d}});var r=n(86006),o=n(85481),i=n(61555),a={left:0,top:0,right:0,gap:0},l=function(e){return parseInt(e||"",10)||0},s=function(e){var t=window.getComputedStyle(document.body),n=t["padding"===e?"paddingLeft":"marginLeft"],r=t["padding"===e?"paddingTop":"marginTop"],o=t["padding"===e?"paddingRight":"marginRight"];return[l(n),l(r),l(o)]},c=function(e){if(void 0===e&&(e="margin"),"undefined"==typeof window)return a;var t=s(e),n=document.documentElement.clientWidth,r=window.innerWidth;return{left:t[0],top:t[1],right:t[2],gap:Math.max(0,r-n+t[2]-t[0])}},u=(0,o.Ws)(),f=function(e,t,n,r){var o=e.left,a=e.top,l=e.right,s=e.gap;return void 0===n&&(n="margin"),"\n .".concat(i.xv," {\n overflow: hidden ").concat(r,";\n padding-right: ").concat(s,"px ").concat(r,";\n }\n body {\n overflow: hidden ").concat(r,";\n overscroll-behavior: contain;\n ").concat([t&&"position: relative ".concat(r,";"),"margin"===n&&"\n padding-left: ".concat(o,"px;\n padding-top: ").concat(a,"px;\n padding-right: ").concat(l,"px;\n margin-left:0;\n margin-top:0;\n margin-right: ").concat(s,"px ").concat(r,";\n "),"padding"===n&&"padding-right: ".concat(s,"px ").concat(r,";")].filter(Boolean).join(""),"\n }\n \n .").concat(i.pF," {\n right: ").concat(s,"px ").concat(r,";\n }\n \n .").concat(i.zi," {\n margin-right: ").concat(s,"px ").concat(r,";\n }\n \n .").concat(i.pF," .").concat(i.pF," {\n right: 0 ").concat(r,";\n }\n \n .").concat(i.zi," .").concat(i.zi," {\n margin-right: 0 ").concat(r,";\n }\n \n body {\n ").concat(i.Av,": ").concat(s,"px;\n }\n")},d=function(e){var t=e.noRelative,n=e.noImportant,o=e.gapMode,i=void 0===o?"margin":o,a=r.useMemo(function(){return c(i)},[i]);return r.createElement(u,{styles:f(a,!t,i,n?"":"!important")})}},51859:function(e,t,n){"use strict";n.d(t,{ZP:function(){return t_}});var r,o,i=n(82685),a=n(3708),l=n(83161),s=n(99889),c=n(24245),u=n(35413);function f(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);n0?d[b]+" "+v:ec(v,/&\f/g,d[b])).trim())&&(s[m++]=y);return ew(e,t,n,0===o?eP:l,s,c,u)}function eF(e,t,n,r){return ew(e,t,n,eM,ed(e,0,r),ed(e,r+1,-1),r)}var eB=function(e,t,n){for(var r=0,o=0;r=o,o=ek(),38===r&&12===o&&(t[n]=1),!e_(o);)eS();return ed(ex,e,ev)},ez=function(e,t){var n=-1,r=44;do switch(e_(r)){case 0:38===r&&12===ek()&&(t[n]=1),e[n]+=eB(ev-1,t,n);break;case 2:e[n]+=eC(r);break;case 4:if(44===r){e[++n]=58===ek()?"&\f":"",t[n]=e[n].length;break}default:e[n]+=el(r)}while(r=eS());return e},e$=function(e,t){var n;return n=ez(eO(e),t),ex="",n},eU=new WeakMap,eH=function(e){if("rule"===e.type&&e.parent&&!(e.length<1)){for(var t=e.value,n=e.parent,r=e.column===n.column&&e.line===n.line;"rule"!==n.type;)if(!(n=n.parent))return;if((1!==e.props.length||58===t.charCodeAt(0)||eU.get(n))&&!r){eU.set(e,!0);for(var o=[],i=e$(t,o),a=n.props,l=0,s=0;l-1&&!e.return)switch(e.type){case eM:e.return=function e(t,n){switch(45^ef(t,0)?(((n<<2^ef(t,0))<<2^ef(t,1))<<2^ef(t,2))<<2^ef(t,3):0){case 5103:return eR+"print-"+t+t;case 5737:case 4201:case 3177:case 3433:case 1641:case 4457:case 2921:case 5572:case 6356:case 5844:case 3191:case 6645:case 3005:case 6391:case 5879:case 5623:case 6135:case 4599:case 4855:case 4215:case 6389:case 5109:case 5365:case 5621:case 3829:return eR+t+t;case 5349:case 4246:case 4810:case 6968:case 2756:return eR+t+eN+t+eA+t+t;case 6828:case 4268:return eR+t+eA+t+t;case 6165:return eR+t+eA+"flex-"+t+t;case 5187:return eR+t+ec(t,/(\w+).+(:[^]+)/,eR+"box-$1$2"+eA+"flex-$1$2")+t;case 5443:return eR+t+eA+"flex-item-"+ec(t,/flex-|-self/,"")+t;case 4675:return eR+t+eA+"flex-line-pack"+ec(t,/align-content|flex-|-self/,"")+t;case 5548:return eR+t+eA+ec(t,"shrink","negative")+t;case 5292:return eR+t+eA+ec(t,"basis","preferred-size")+t;case 6060:return eR+"box-"+ec(t,"-grow","")+eR+t+eA+ec(t,"grow","positive")+t;case 4554:return eR+ec(t,/([^-])(transform)/g,"$1"+eR+"$2")+t;case 6187:return ec(ec(ec(t,/(zoom-|grab)/,eR+"$1"),/(image-set)/,eR+"$1"),t,"")+t;case 5495:case 3959:return ec(t,/(image-set\([^]*)/,eR+"$1$`$1");case 4968:return ec(ec(t,/(.+:)(flex-)?(.*)/,eR+"box-pack:$3"+eA+"flex-pack:$3"),/s.+-b[^;]+/,"justify")+eR+t+t;case 4095:case 3583:case 4068:case 2532:return ec(t,/(.+)-inline(.+)/,eR+"$1$2")+t;case 8116:case 7059:case 5753:case 5535:case 5445:case 5701:case 4933:case 4677:case 5533:case 5789:case 5021:case 4765:if(ep(t)-1-n>6)switch(ef(t,n+1)){case 109:if(45!==ef(t,n+4))break;case 102:return ec(t,/(.+:)(.+)-([^]+)/,"$1"+eR+"$2-$3$1"+eN+(108==ef(t,n+3)?"$3":"$2-$3"))+t;case 115:return~eu(t,"stretch")?e(ec(t,"stretch","fill-available"),n)+t:t}break;case 4949:if(115!==ef(t,n+1))break;case 6444:switch(ef(t,ep(t)-3-(~eu(t,"!important")&&10))){case 107:return ec(t,":",":"+eR)+t;case 101:return ec(t,/(.+:)([^;!]+)(;|!.+)?/,"$1"+eR+(45===ef(t,14)?"inline-":"")+"box$3$1"+eR+"$2$3$1"+eA+"$2box$3")+t}break;case 5936:switch(ef(t,n+11)){case 114:return eR+t+eA+ec(t,/[svh]\w+-[tblr]{2}/,"tb")+t;case 108:return eR+t+eA+ec(t,/[svh]\w+-[tblr]{2}/,"tb-rl")+t;case 45:return eR+t+eA+ec(t,/[svh]\w+-[tblr]{2}/,"lr")+t}return eR+t+eA+t+t}return t}(e.value,e.length);break;case ej:return eL([eE(e,{value:ec(e.value,"@","@"+eR)})],r);case eP:if(e.length)return e.props.map(function(t){var n;switch(n=t,(n=/(::plac\w+|:read-\w+)/.exec(n))?n[0]:n){case":read-only":case":read-write":return eL([eE(e,{props:[ec(t,/:(read-\w+)/,":"+eN+"$1")]})],r);case"::placeholder":return eL([eE(e,{props:[ec(t,/:(plac\w+)/,":"+eR+"input-$1")]}),eE(e,{props:[ec(t,/:(plac\w+)/,":"+eN+"$1")]}),eE(e,{props:[ec(t,/:(plac\w+)/,eA+"input-$1")]})],r)}return""}).join("")}}],eW=function(e){var t,n,r,o,i,a=e.key;if("css"===a){var l=document.querySelectorAll("style[data-emotion]:not([data-s])");Array.prototype.forEach.call(l,function(e){-1!==e.getAttribute("data-emotion").indexOf(" ")&&(document.head.appendChild(e),e.setAttribute("data-s",""))})}var s=e.stylisPlugins||eq,c={},u=[];o=e.container||document.head,Array.prototype.forEach.call(document.querySelectorAll('style[data-emotion^="'+a+' "]'),function(e){for(var t=e.getAttribute("data-emotion").split(" "),n=1;n2||e_(ey)>3?"":" "}(m);break;case 92:_+=function(e,t){for(var n;--t&&eS()&&!(ey<48)&&!(ey>102)&&(!(ey>57)||!(ey<65))&&(!(ey>70)||!(ey<97)););return n=ev+(t<6&&32==ek()&&32==eS()),ed(ex,e,n)}(ev-1,7);continue;case 47:switch(ek()){case 42:case 47:eh(ew(u=function(e,t){for(;eS();)if(e+ey===57)break;else if(e+ey===84&&47===ek())break;return"/*"+ed(ex,t,ev-1)+"*"+el(47===e?e:eS())}(eS(),ev),n,r,eT,el(ey),ed(u,2,-2),0),c);break;default:_+="/"}break;case 123*b:s[f++]=ep(_)*y;case 125*b:case 59:case 0:switch(x){case 0:case 125:v=0;case 59+d:-1==y&&(_=ec(_,/\f/g,"")),g>0&&ep(_)-p&&eh(g>32?eF(_+";",o,r,p-1):eF(ec(_," ","")+";",o,r,p-2),c);break;case 59:_+=";";default:if(eh(k=eD(_,n,r,f,d,i,s,w,E=[],S=[],p),a),123===x){if(0===d)e(_,n,k,k,E,a,p,s,S);else switch(99===h&&110===ef(_,3)?100:h){case 100:case 108:case 109:case 115:e(t,k,k,o&&eh(eD(t,k,k,0,0,i,s,w,i,E=[],p),S),i,S,p,s,o?E:S);break;default:e(_,k,k,k,[""],S,0,s,S)}}}f=d=g=0,b=y=1,w=_="",p=l;break;case 58:p=1+ep(_),g=m;default:if(b<1){if(123==x)--b;else if(125==x&&0==b++&&125==(ey=ev>0?ef(ex,--ev):0,em--,10===ey&&(em=1,eg--),ey))continue}switch(_+=el(x),x*b){case 38:y=d>0?1:(_+="\f",-1);break;case 44:s[f++]=(ep(_)-1)*y,y=1;break;case 64:45===ek()&&(_+=eC(eS())),h=ek(),d=p=ep(w=_+=function(e){for(;!e_(ek());)eS();return ed(ex,e,ev)}(ev)),x++;break;case 45:45===m&&2==ep(_)&&(b=0)}}return a}("",null,null,null,[""],t=eO(t=e),0,[0],t),ex="",n),f)},p={key:a,sheet:new ei({key:a,container:o,nonce:e.nonce,speedy:e.speedy,prepend:e.prepend,insertionPoint:e.insertionPoint}),nonce:e.nonce,inserted:c,registered:{},insert:function(e,t,n,r){i=n,d(e?e+"{"+t.styles+"}":t.styles),r&&(p.inserted[t.name]=!0)}};return p.sheet.hydrate(u),p},eV={animationIterationCount:1,aspectRatio:1,borderImageOutset:1,borderImageSlice:1,borderImageWidth:1,boxFlex:1,boxFlexGroup:1,boxOrdinalGroup:1,columnCount:1,columns:1,flex:1,flexGrow:1,flexPositive:1,flexShrink:1,flexNegative:1,flexOrder:1,gridRow:1,gridRowEnd:1,gridRowSpan:1,gridRowStart:1,gridColumn:1,gridColumnEnd:1,gridColumnSpan:1,gridColumnStart:1,msGridRow:1,msGridRowSpan:1,msGridColumn:1,msGridColumnSpan:1,fontWeight:1,lineHeight:1,opacity:1,order:1,orphans:1,tabSize:1,widows:1,zIndex:1,zoom:1,WebkitLineClamp:1,fillOpacity:1,floodOpacity:1,stopOpacity:1,strokeDasharray:1,strokeDashoffset:1,strokeMiterlimit:1,strokeOpacity:1,strokeWidth:1},eG=/[A-Z]|^ms/g,eK=/_EMO_([^_]+?)_([^]*?)_EMO_/g,eY=function(e){return 45===e.charCodeAt(1)},eX=function(e){return null!=e&&"boolean"!=typeof e},eJ=(r=Object.create(null),function(e){return void 0===r[e]&&(r[e]=eY(e)?e:e.replace(eG,"-$&").toLowerCase()),r[e]}),eQ=function(e,t){switch(e){case"animation":case"animationName":if("string"==typeof t)return t.replace(eK,function(e,t,n){return o={name:t,styles:n,next:o},t})}return 1===eV[e]||eY(e)||"number"!=typeof t||0===t?t:t+"px"};function e0(e,t,n){if(null==n)return"";if(void 0!==n.__emotion_styles)return n;switch(typeof n){case"boolean":return"";case"object":if(1===n.anim)return o={name:n.name,styles:n.styles,next:o},n.name;if(void 0!==n.styles){var r=n.next;if(void 0!==r)for(;void 0!==r;)o={name:r.name,styles:r.styles,next:o},r=r.next;return n.styles+";"}return function(e,t,n){var r="";if(Array.isArray(n))for(var o=0;o=4;++r,o-=4)t=(65535&(t=255&e.charCodeAt(r)|(255&e.charCodeAt(++r))<<8|(255&e.charCodeAt(++r))<<16|(255&e.charCodeAt(++r))<<24))*1540483477+((t>>>16)*59797<<16),t^=t>>>24,n=(65535&t)*1540483477+((t>>>16)*59797<<16)^(65535&n)*1540483477+((n>>>16)*59797<<16);switch(o){case 3:n^=(255&e.charCodeAt(r+2))<<16;case 2:n^=(255&e.charCodeAt(r+1))<<8;case 1:n^=255&e.charCodeAt(r),n=(65535&n)*1540483477+((n>>>16)*59797<<16)}return n^=n>>>13,(((n=(65535&n)*1540483477+((n>>>16)*59797<<16))^n>>>15)>>>0).toString(36)}(a)+c,styles:a,next:o}};function e5(e,t,n){var r="";return n.split(" ").forEach(function(n){void 0!==e[n]?t.push(e[n]+";"):r+=n+" "}),r}var e3=function(e,t,n){var r=e.key+"-"+t.name;!1===n&&void 0===e.registered[r]&&(e.registered[r]=t.styles)},e6=function(e,t,n){e3(e,t,n);var r=e.key+"-"+t.name;if(void 0===e.inserted[t.name]){var o=t;do e.insert(t===o?"."+r:"",o,e.sheet,!0),o=o.next;while(void 0!==o)}};function e4(e,t){if(void 0===e.inserted[t.name])return e.insert("",t,e.sheet,!0)}function e8(e,t,n){var r=[],o=e5(e,r,n);return r.length<2?n:o+t(r)}var e9=function e(t){for(var n="",r=0;r1&&void 0!==arguments[1]?arguments[1]:"white",n="background-color: ".concat(e,"; border-radius: 4px; padding: 2px 4px;");return t&&(n+=" color: ".concat(t,";")),[n,""]}function ti(e,t){for(var n,r,o=arguments.length,i=Array(o>2?o-2:0),a=2;at?(e.apply(void 0,i),n=l):(clearTimeout(r),r=tl()(function(){e.apply(void 0,i),n=U()()},Math.max(0,t-l+n)))}}(function(e){var t=i.current;t&&t(e)},t)},[t,i]),l=(0,v.useCallback)(function(e){e.timeStampLow=U()(),a(e)},[a]);return(0,v.useLayoutEffect)(function(){return o.addEventListener(n,l,{passive:!0}),l({target:o,type:n}),function(){return o.removeEventListener(n,l)}},[n,l,o]),!1};ts.defaultProps={debounce:200};var tc=n(44170),tu=n.n(tc);function tf(e,t){var n=tu()(t-e),r=Math.sqrt(Math.abs(t-e)),o=e+r*n;return n>0?Math.min(t,o):Math.max(t,o)}var td=function(e){var t=e.name,n=e.onEnd,r=e.target,o=e.value,i=(0,v.useRef)(),a=(0,v.useCallback)(function(e,t,o,l){var s=arguments.length>4&&void 0!==arguments[4]?arguments[4]:U()();("100%"===o||"number"==typeof o)&&(cancelAnimationFrame(i.current),i.current=requestAnimationFrame(function(){if(r){var i="100%"===o?r.scrollHeight-r.offsetHeight:o,c=function(e,t,n,r){for(var o=e,i=0;iMath.abs(i-c)&&(c=i),r[e]=c,i===c?n&&n(!0):a(e,t,o,l+1,s)}}))},[i,n,r]),l=(0,v.useCallback)(function(){cancelAnimationFrame(i.current),n&&n(!1)},[n]);return(0,v.useLayoutEffect)(function(){return(a(t,r[t],o,1),r)?(r.addEventListener("pointerdown",l,{passive:!0}),r.addEventListener("wheel",l,{passive:!0}),function(){r.removeEventListener("pointerdown",l),r.removeEventListener("wheel",l),cancelAnimationFrame(i.current)}):function(){return cancelAnimationFrame(i.current)}},[a,i,l,t,r,o]),!1};function tp(e){var t=p((0,v.useState)(e),2),n=t[0],r=t[1],o=(0,v.useRef)(),i=(0,v.useCallback)(function(e){"function"==typeof e?i(function(t){return e=e(t),o.current=e,e}):(o.current=e,i(e))},[o]);return o.current=n,[n,r,o]}function th(e,t){var n=W()(e);if(G()){var r=G()(e);t&&(r=Y()(r).call(r,function(t){return J()(e,t).enumerable})),n.push.apply(n,r)}return n}function tg(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:{},n=t.force;return void 0!==n&&n?function(){for(var t=arguments.length,n=Array(t),r=0;r",{force:o})},[o]);a="top"===a?"top":"bottom";var u=(0,v.useRef)(0),f=(0,v.useRef)(i),d=p(tp("top"===a?0:"100%"),3),h=d[0],g=d[1],m=d[2],b=p(tp(null),3),S=b[0],_=b[1],O=b[2],C=(0,v.useRef)(0),A=(0,v.useRef)(0),N=(0,v.useRef)(0),R=p((0,v.useState)(!0),2),T=R[0],M=R[1],L=p((0,v.useState)(!0),2),D=L[0],B=L[1],$=p((0,v.useState)(!0),2),H=$[0],q=$[1],W=p((0,v.useState)(!1),2),V=W[0],G=W[1],K=p(tp(!0),3),Y=K[0],X=K[1],J=K[2],Q=(0,v.useRef)([]),ee=(0,v.useCallback)(function(e){var t=O.current;return Q.current.push(e),t&&e({scrollTop:t.scrollTop}),function(){var t=Q.current,n=I()(t).call(t,e);~n&&F()(t).call(t,n,1)}},[Q,O]),et=(0,v.useCallback)(function(){var e=m.current;c(function(){var t;return z()(t=["%cSpineTo%c: %conEnd%c is fired."]).call(t,P(to("magenta")),P(to("orange")),[{animateTo:e}])}),u.current=U()(),tv(e,a)||X(!1),g(null)},[m,c,u,a,g,X]),en=(0,v.useCallback)(function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.behavior,r=O.current;if("number"!=typeof e&&"100%"!==e)return console.warn('react-scroll-to-bottom: Arguments passed to scrollTo() must be either number or "100%".');c(function(){var t;return[z()(t=["%cscrollTo%c: Will scroll to %c".concat("number"==typeof e?e+"px":e.replace(/%/g,"%%"),"%c")]).call(t,P(to("lime","")),P(to("purple"))),{behavior:n,nextAnimateTo:e,target:r}]}),"auto"===n?(et(),r&&(r.scrollTop="100%"===e?r.scrollHeight-r.offsetHeight:e)):("smooth"!==n&&console.warn('react-scroll-to-bottom: Please set "behavior" when calling "scrollTo". In future versions, the default behavior will be changed from smooth scrolling to discrete scrolling to align with HTML Standard.'),g(e)),tv(e,a)&&(c(function(){var t;return[z()(t=["%cscrollTo%c: Scrolling to end, will set sticky to %ctrue%c."]).call(t,P(to("lime","")),P(to("purple"))),[{mode:a,nextAnimateTo:e}]]}),X(!0))},[c,et,a,g,X,O]),er=(0,v.useCallback)(function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.behavior;c(function(){var e;return z()(e=["%cscrollToBottom%c: Called"]).call(e,P(to("yellow","")))}),"smooth"!==t&&console.warn('react-scroll-to-bottom: Please set "behavior" when calling "scrollToBottom". In future versions, the default behavior will be changed from smooth scrolling to discrete scrolling to align with HTML Standard.'),en("100%",{behavior:t||"smooth"})},[c,en]),eo=(0,v.useCallback)(function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.behavior;c(function(){var e;return z()(e=["%cscrollToTop%c: Called"]).call(e,P(to("yellow","")))}),"smooth"!==t&&console.warn('react-scroll-to-bottom: Please set "behavior" when calling "scrollToTop". In future versions, the default behavior will be changed from smooth scrolling to discrete scrolling to align with HTML Standard.'),en(0,{behavior:t||"smooth"})},[c,en]),ei=(0,v.useCallback)(function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.behavior;c(function(){var e;return z()(e=["%cscrollToEnd%c: Called"]).call(e,P(to("yellow","")))}),"smooth"!==t&&console.warn('react-scroll-to-bottom: Please set "behavior" when calling "scrollToEnd". In future versions, the default behavior will be changed from smooth scrolling to discrete scrolling to align with HTML Standard.');var n={behavior:t||"smooth"};"top"===a?eo(n):er(n)},[c,a,er,eo]),ea=(0,v.useCallback)(function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.behavior;c(function(){var e;return z()(e=["%cscrollToStart%c: Called"]).call(e,P(to("yellow","")))}),"smooth"!==t&&console.warn('react-scroll-to-bottom: Please set "behavior" when calling "scrollToStart". In future versions, the default behavior will be changed from smooth scrolling to discrete scrolling to align with HTML Standard.');var n={behavior:t||"smooth"};"top"===a?er(n):eo(n)},[c,a,er,eo]),el=(0,v.useCallback)(function(){var e=O.current;if(e){if("auto"===f.current){c(function(){var e;return z()(e=["%ctarget changed%c: Initial scroll"]).call(e,P(to("blue")))}),e.scrollTop="top"===a?0:e.scrollHeight-e.offsetHeight,f.current=!1;return}var t,n=C.current,r=e.offsetHeight,o=e.scrollHeight,i=e.scrollTop,l="top"===a?0:Math.max(0,o-r-i),u=Math.max(0,n-i),d=s({maxValue:l,minValue:u,offsetHeight:r,scrollHeight:o,scrollTop:i}),p=Math.max(0,Math.min(l,d));t="top"===a||p!==l?i+p:"100%",c(function(){var e,a,s;return[z()(e=[z()(a=z()(s="%cscrollToSticky%c: Will animate from %c".concat(n,"px%c to %c")).call(s,"number"==typeof t?t+"px":t.replace(/%/g,"%%"),"%c (%c")).call(a,("100%"===t?l:t)+n,"px%c)")]).call(e,P(to("orange")),P(to("purple")),P(to("purple")),P(to("purple"))),{animateFrom:n,maxValue:l,minValue:u,nextAnimateTo:t,nextValue:p,offsetHeight:r,rawNextValue:d,scrollHeight:o,scrollTop:i}]}),en(t,{behavior:"smooth"})}},[C,c,a,s,en,O]),es=(0,v.useCallback)(function(e){var t,n=e.timeStampLow,r=m.current,o=O.current,i=null!==r;if(!(n<=u.current)&&o){var l=tb({mode:a,target:o}),s=l.atBottom,f=l.atEnd,d=l.atStart,p=l.atTop;M(s),B(f),G(d),q(p);var h=o.offsetHeight,g=o.scrollHeight,b=A.current,v=N.current,y=h!==b,x=g!==v;if(y&&(A.current=h),x&&(N.current=g),y||x)J.current&&(c(function(){var e;return[z()(e=["%conScroll%c: Size changed while sticky, calling %cscrollToSticky()%c"]).call(e,P(to("red")),P(to("orange")),[{offsetHeightChanged:y,scrollHeightChanged:x}]),{nextOffsetHeight:h,prevOffsetHeight:b,nextScrollHeight:g,prevScrollHeight:v}]}),el());else{var w=i&&tv(r,a)||f;J.current!==w&&(c(function(){var e,t,n,l;return[z()(e=["%conScroll%c: %csetSticky%c(%c".concat(w,"%c)")]).call(e,P(to("red")),P(to("red")),P(to("purple"))),z()(t=[z()(n=z()(l="(animating = %c".concat(i,"%c && isEnd = %c")).call(l,tv(r,a),"%c) || atEnd = %c")).call(n,f,"%c")]).call(t,P(to("purple")),P(to("purple")),P(to("purple")),[{animating:i,animateTo:r,atEnd:f,mode:a,offsetHeight:o.offsetHeight,scrollHeight:o.scrollHeight,sticky:J.current,nextSticky:w}])]}),X(w))}var E=o.scrollTop;Z()(t=Q.current).call(t,function(e){return e({scrollTop:E})})}},[m,c,u,a,A,N,Q,el,M,B,G,q,X,J,O]);(0,v.useEffect)(function(){if(S){var e,n,r=!1,o=(e=function(){var e=O.current,t=null!==m.current;J.current?tb({mode:a,target:e}).atEnd?r=!1:r?U()()-r>34&&(t||(C.current=e.scrollTop,c(function(){var e;return z()(e=["%cInterval check%c: Should sticky but not at end, calling %cscrollToSticky()%c to scroll"]).call(e,P(to("navy")),P(to("orange")))}),el()),r=!1):r=U()():e.scrollHeight<=e.offsetHeight&&!J.current&&(c(function(){var t;return[z()(t=["%cInterval check%c: Container is emptied, setting sticky back to %ctrue%c"]).call(t,P(to("navy")),P(to("purple"))),[{offsetHeight:e.offsetHeight,scrollHeight:e.scrollHeight,sticky:J.current}]]}),X(!0))},n=Math.max(17,t)||17,e(),j()(e,n));return function(){return clearInterval(o)}}},[m,t,c,a,el,X,J,S,O]);var ec=(0,v.useMemo)(function(){var e=tm[l]||(tm[l]=e7({key:"react-scroll-to-bottom--css-"+tt()().toString(26).substr(2,5).replace(/[0-9]/g,function(e){return String.fromCharCode(e.charCodeAt(0)+65)}),nonce:l}));return function(t){return e.css(t)+""}},[l]),eu=(0,v.useMemo)(function(){return{observeScrollPosition:ee,setTarget:_,styleToClassName:ec}},[ee,_,ec]),ef=(0,v.useMemo)(function(){return{atBottom:T,atEnd:D,atStart:V,atTop:H,mode:a}},[T,D,V,H,a]),ed=(0,v.useMemo)(function(){var e=null!==h;return{animating:e,animatingToEnd:e&&tv(h,a),sticky:Y}},[h,a,Y]),ep=(0,v.useMemo)(function(){return tg(tg({},ef),ed)},[ef,ed]),eh=(0,v.useMemo)(function(){return{scrollTo:en,scrollToBottom:er,scrollToEnd:ei,scrollToStart:ea,scrollToTop:eo}},[en,er,ei,ea,eo]);return(0,v.useEffect)(function(){if(S){var e=function(){N.current=S.scrollHeight};return S.addEventListener("focus",e,{capture:!0,passive:!0}),function(){return S.removeEventListener("focus",e)}}},[S]),c(function(){var e;return[z()(e=["%cRender%c: Render"]).call(e,P(to("cyan",""))),{animateTo:h,animating:null!==h,sticky:Y,target:S}]}),v.createElement(k.Provider,{value:eu},v.createElement(y.Provider,{value:eh},v.createElement(E.Provider,{value:ep},v.createElement(x.Provider,{value:ef},v.createElement(w.Provider,{value:ed},n,S&&v.createElement(ts,{debounce:r,name:"scroll",onEvent:es,target:S}),S&&null!==h&&v.createElement(td,{name:"scrollTop",onEnd:et,target:S,value:h}))))))};ty.defaultProps={checkInterval:100,children:void 0,debounce:17,debug:void 0,initialScrollBehavior:"smooth",mode:void 0,nonce:void 0,scroller:function(){return 1/0}},ty.propTypes={checkInterval:b().number,children:b().any,debounce:b().number,debug:b().bool,initialScrollBehavior:b().oneOf(["auto","smooth"]),mode:b().oneOf(["bottom","top"]),nonce:b().string,scroller:b().func};var tx={height:"100%",overflowY:"auto",width:"100%"},tw=function(e){var t=e.children,n=e.className,r=(0,v.useContext)(k).setTarget,o=_()(tx);return v.createElement("div",{className:g()(o,(n||"")+""),ref:r},t)};tw.defaultProps={children:void 0,className:void 0},tw.propTypes={children:b().any,className:b().string};var tE={position:"relative"},tS=function(e){var t=e.children,n=e.className,r=e.followButtonClassName,o=e.scrollViewClassName,i=_()(tE);return v.createElement("div",{className:g()(i,(n||"")+"")},v.createElement(tw,{className:(o||"")+""},t),v.createElement(C,{className:(r||"")+""}))};tS.defaultProps={children:void 0,className:void 0,followButtonClassName:void 0,scrollViewClassName:void 0},tS.propTypes={children:b().any,className:b().string,followButtonClassName:b().string,scrollViewClassName:b().string};var tk=function(e){var t=e.checkInterval,n=e.children,r=e.className,o=e.debounce,i=e.debug,a=e.followButtonClassName,l=e.initialScrollBehavior,s=e.mode,c=e.nonce,u=e.scroller,f=e.scrollViewClassName;return v.createElement(ty,{checkInterval:t,debounce:o,debug:i,initialScrollBehavior:l,mode:s,nonce:c,scroller:u},v.createElement(tS,{className:r,followButtonClassName:a,scrollViewClassName:f},n))};tk.defaultProps={checkInterval:void 0,children:void 0,className:void 0,debounce:void 0,debug:void 0,followButtonClassName:void 0,initialScrollBehavior:"smooth",mode:void 0,nonce:void 0,scroller:void 0,scrollViewClassName:void 0},tk.propTypes={checkInterval:b().number,children:b().any,className:b().string,debounce:b().number,debug:b().bool,followButtonClassName:b().string,initialScrollBehavior:b().oneOf(["auto","smooth"]),mode:b().oneOf(["bottom","top"]),nonce:b().string,scroller:b().func,scrollViewClassName:b().string};var t_=tk;!function(e,t){try{var r=n.g.document;if(void 0!==r&&r.createElement&&r.head&&r.head.appendChild){var o=r.querySelector('html meta[name="'.concat(encodeURI(e),'"]'))||r.createElement("meta");o.setAttribute("name",e),o.setAttribute("content",t),r.head.appendChild(o)}}catch(e){}}("react-scroll-to-bottom:version","4.2.0")},32580:function(e,t){var n;/*! - Copyright (c) 2018 Jed Watson. - Licensed under the MIT License (MIT), see - http://jedwatson.github.io/classnames -*/!function(){"use strict";var r={}.hasOwnProperty;function o(){for(var e=[],t=0;tt.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n},s=function(e,t,n){var r="react-spinners-".concat(e,"-").concat(n);if("undefined"==typeof window||!window.document)return r;var o=document.createElement("style");document.head.appendChild(o);var i=o.sheet,a="\n @keyframes ".concat(r," {\n ").concat(t,"\n }\n ");return i&&i.insertRule(a,0),r}("BeatLoader","50% {transform: scale(0.75);opacity: 0.2} 100% {transform: scale(1);opacity: 1}","beat"),c=function(e){var t=e.loading,n=e.color,o=void 0===n?"#000000":n,c=e.speedMultiplier,u=void 0===c?1:c,f=e.cssOverride,d=e.size,p=void 0===d?15:d,h=e.margin,g=void 0===h?2:h,m=l(e,["loading","color","speedMultiplier","cssOverride","size","margin"]),b=a({display:"inherit"},void 0===f?{}:f),v=function(e){return{display:"inline-block",backgroundColor:o,width:i(p),height:i(p),margin:i(g),borderRadius:"100%",animation:"".concat(s," ").concat(.7/u,"s ").concat(e%2?"0s":"".concat(.35/u,"s")," infinite linear"),animationFillMode:"both"}};return void 0===t||t?r.createElement("span",a({style:b},m),r.createElement("span",{style:v(1)}),r.createElement("span",{style:v(2)}),r.createElement("span",{style:v(3)})):null}},85481:function(e,t,n){"use strict";n.d(t,{Ws:function(){return l}});var r,o=n(86006),i=function(){var e=0,t=null;return{add:function(o){if(0==e&&(t=function(){if(!document)return null;var e=document.createElement("style");e.type="text/css";var t=r||n.nc;return t&&e.setAttribute("nonce",t),e}())){var i,a;(i=t).styleSheet?i.styleSheet.cssText=o:i.appendChild(document.createTextNode(o)),a=t,(document.head||document.getElementsByTagName("head")[0]).appendChild(a)}e++},remove:function(){--e||!t||(t.parentNode&&t.parentNode.removeChild(t),t=null)}}},a=function(){var e=i();return function(t,n){o.useEffect(function(){return e.add(t),function(){e.remove()}},[t&&n])}},l=function(){var e=a();return function(t){return e(t.styles,t.dynamic),null}}},35036:function(e,t,n){"use strict";n.d(t,{Z:function(){return w}});var r=n(40431),o=n(86006),i=o.useLayoutEffect,a=function(e){var t=o.useRef(e);return i(function(){t.current=e}),t},l=function(e,t){if("function"==typeof e){e(t);return}e.current=t},s=function(e,t){var n=(0,o.useRef)();return(0,o.useCallback)(function(r){e.current=r,n.current&&l(n.current,null),n.current=t,t&&l(t,r)},[t])},c={"min-height":"0","max-height":"none",height:"0",visibility:"hidden",overflow:"hidden",position:"absolute","z-index":"-1000",top:"0",right:"0"},u=function(e){Object.keys(c).forEach(function(t){e.style.setProperty(t,c[t],"important")})},f=null,d=function(e,t){var n=e.scrollHeight;return"border-box"===t.sizingStyle.boxSizing?n+t.borderSize:n-t.paddingSize},p=function(){},h=["borderBottomWidth","borderLeftWidth","borderRightWidth","borderTopWidth","boxSizing","fontFamily","fontSize","fontStyle","fontWeight","letterSpacing","lineHeight","paddingBottom","paddingLeft","paddingRight","paddingTop","tabSize","textIndent","textRendering","textTransform","width","wordBreak"],g=!!document.documentElement.currentStyle,m=function(e){var t=window.getComputedStyle(e);if(null===t)return null;var n=h.reduce(function(e,n){return e[n]=t[n],e},{}),r=n.boxSizing;if(""===r)return null;g&&"border-box"===r&&(n.width=parseFloat(n.width)+parseFloat(n.borderRightWidth)+parseFloat(n.borderLeftWidth)+parseFloat(n.paddingRight)+parseFloat(n.paddingLeft)+"px");var o=parseFloat(n.paddingBottom)+parseFloat(n.paddingTop),i=parseFloat(n.borderBottomWidth)+parseFloat(n.borderTopWidth);return{sizingStyle:n,paddingSize:o,borderSize:i}};function b(e,t,n){var r=a(n);o.useLayoutEffect(function(){var n=function(e){return r.current(e)};if(e)return e.addEventListener(t,n),function(){return e.removeEventListener(t,n)}},[])}var v=function(e){b(window,"resize",e)},y=function(e){b(document.fonts,"loadingdone",e)},x=["cacheMeasurements","maxRows","minRows","onChange","onHeightChange"],w=o.forwardRef(function(e,t){var n=e.cacheMeasurements,i=e.maxRows,a=e.minRows,l=e.onChange,c=void 0===l?p:l,h=e.onHeightChange,g=void 0===h?p:h,b=function(e,t){if(null==e)return{};var n,r,o={},i=Object.keys(e);for(r=0;r=0||(o[n]=e[n]);return o}(e,x),w=void 0!==b.value,E=o.useRef(null),S=s(E,t),k=o.useRef(0),_=o.useRef(),O=function(){var e,t,r,o,l,s,c,p,h,b,v,y=E.current,x=n&&_.current?_.current:m(y);if(x){_.current=x;var w=(e=y.value||y.placeholder||"x",void 0===(t=a)&&(t=1),void 0===(r=i)&&(r=1/0),f||((f=document.createElement("textarea")).setAttribute("tabindex","-1"),f.setAttribute("aria-hidden","true"),u(f)),null===f.parentNode&&document.body.appendChild(f),o=x.paddingSize,l=x.borderSize,c=(s=x.sizingStyle).boxSizing,Object.keys(s).forEach(function(e){f.style[e]=s[e]}),u(f),f.value=e,p=d(f,x),f.value=e,p=d(f,x),f.value="x",b=(h=f.scrollHeight-o)*t,"border-box"===c&&(b=b+o+l),p=Math.max(b,p),v=h*r,"border-box"===c&&(v=v+o+l),[p=Math.min(v,p),h]),S=w[0],O=w[1];k.current!==S&&(k.current=S,y.style.setProperty("height",S+"px","important"),g(S,{rowHeight:O}))}};return o.useLayoutEffect(O),v(O),y(O),o.createElement("textarea",(0,r.Z)({},b,{onChange:function(e){w||O(),c(e)},ref:S}))})},18160:function(e,t,n){"use strict";t.b=void 0;let r=n(9268),o=n(86006),i="undefined"==typeof window,a=!i&&(()=>{try{return"ontouchstart"in window||navigator.maxTouchPoints}catch(e){return!1}})(),l=!i&&(()=>{try{return window.CSS.supports("overflow-anchor: auto")}catch(e){return!1}})(),s=a&&!l,c={top:"top",bottom:"bottom",clientHeight:"clientHeight",scrollHeight:"scrollHeight",scrollTop:"scrollTop",overflowY:"overflowY",height:"height",minHeight:"minHeight",maxHeight:"maxHeight",marginTop:"marginTop"},u={top:"left",bottom:"right",scrollHeight:"scrollWidth",clientHeight:"clientWidth",scrollTop:"scrollLeft",overflowY:"overflowX",minHeight:"minWidth",height:"width",maxHeight:"maxWidth",marginTop:"marginLeft"},f=(e,t,n=1/0)=>Math.max(Math.min(t,n),e),d=(e,t,n)=>Math.ceil(Math.abs(e-t)/n),p=i?o.useEffect:o.useLayoutEffect,h=(e,t,n)=>{let r=[];for(let o=e;o{let i=n,a=e;for(;a&&a!==t;){if(o(a,i))return[a,i];r?(i++,a=a.nextSibling):(i--,a=a.previousSibling)}return[null,-1]},m=/auto|scroll/gi,b=(e,t)=>{if(!t||t===document.body||t===document.documentElement)return document.documentElement;let n=window.getComputedStyle(t);return m.test(n[e.overflowY])||m.test(n.overflow)?t:b(e,t.parentNode)},v=(e,t,n=0)=>({padding:0,margin:0,border:"none",visibility:"hidden",overflowAnchor:"none",[e.minHeight]:t,[e.height]:t,[e.maxHeight]:t,[e.marginTop]:n});t.b=(0,o.forwardRef)(({items:e=[],count:t,children:n,viewportRef:i,itemSize:m=0,itemMargin:y=-1,overscan:x=1,axis:w="y",initialIndex:E=-1,initialAlignToTop:S=!0,initialOffset:k=0,initialDelay:_=-1,initialPrerender:O=0,onViewportIndexesChange:C,overflowAnchor:A="auto",withCache:N=!0,scrollThreshold:R=0,renderSpacer:T=({ref:e,style:t})=>(0,r.jsx)("div",{ref:e,style:t},void 0),indexesShift:P=0,getItemBoundingClientRect:M=e=>e.getBoundingClientRect()},j)=>{let L;let I="y"===w?c:u,D="number"==typeof t,F=(D?t:e.length)-1,[[B,z],$]=(0,o.useState)(()=>[f(0,m),f(-1,y)]),U=f(0,B+z),H=f(0,Math.ceil(x*U)),[Z,q]=(0,o.useState)([E-O,E+O]),W=(0,o.useRef)(null),V=(0,o.useRef)(-1),G=(0,o.useRef)(null),K=(0,o.useRef)(null),Y=(0,o.useRef)(!1),X=(0,o.useRef)(P),J=(0,o.useRef)([]),Q=(0,o.useRef)(E>=0?{index:E,alignToTop:S,offset:k,delay:_,prerender:O}:null),ee=(0,o.useRef)(null),et=(0,o.useRef)(0),en=(0,o.useRef)([-1,-1]),er=(0,o.useRef)(null),[eo,ei]=(0,o.useMemo)(()=>{Z[0]=f(0,Z[0],F),Z[1]=f(Z[0],Z[1],F);let e=P-X.current;X.current=P;let t=G.current;return t&&e&&(Z[0]=f(0,Z[0]+e,F),Z[1]=f(Z[0],Z[1]+e,F),W.current=t.nextSibling,V.current=Z[0],Y.current=!0),Z},[P,Z,F]),ea=(0,o.useMemo)(()=>v(I,(N?J.current:[]).slice(0,eo).reduce((e,t)=>e+(t-B),eo*U),et.current),[I,N,eo,U,B]),el=(0,o.useMemo)(()=>v(I,(N?J.current:[]).slice(ei+1,F+1).reduce((e,t)=>e+(t-B),U*(F-ei))),[I,N,ei,F,U,B]),es=(0,o.useMemo)(()=>{let e=null;return()=>{if(i)return i.current===document.body?document.documentElement:i.current;if(e&&e.isConnected)return e;let t=G.current;return t?e=b(I,t.parentNode):null}},[I,i]),ec=(0,o.useRef)(()=>{}),eu=(0,o.useRef)(()=>({index:-1,offset:0}));return p(()=>{ec.current=()=>{let e=es(),t=G.current,n=K.current;if(!e||!t||!n)return;let r=t.nextSibling,o=n.previousSibling,i=e.getBoundingClientRect(),a=t.getBoundingClientRect(),l=n.getBoundingClientRect(),c={[I.top]:e===document.documentElement?0:i[I.top],[I.bottom]:e===document.documentElement?document.documentElement[I.clientHeight]:i[I.bottom]},u={[I.top]:c[I.top]-H,[I.bottom]:c[I.bottom]+H};if(et.current<0&&a[I.top]-et.current>=u[I.top]||et.current>0&&a[I.top]>=u[I.top]||et.current&&Q.current){t.style[I.marginTop]="0px",e.style[I.overflowY]="hidden",e[I.scrollTop]+=-et.current,e.style[I.overflowY]="",et.current=0;return}if(0===B||-1===z){let e=0;if(g({fromElement:r,toElement:n,fromIndex:eo,compare:t=>(e+=M(t)[I.height],!1)}),!e)return;let t=ei-eo+1,o=0===B?Math.ceil(e/t):B,i=-1===z?Math.ceil((l[I.top]-a[I.bottom]-e)/t):z;$([o,i]);return}if(ee.current)return;if(Q.current){let t=f(0,Q.current.index,F);if(tei){q([t-Q.current.prerender,t+Q.current.prerender]);return}let[o]=g({fromElement:r,toElement:n,fromIndex:eo,compare:(e,n)=>n===t});if(!o)return;let{alignToTop:i,offset:a,delay:l}=Q.current;Q.current=null;let u=()=>{let t=M(o),n=i?t[I.top]-c[I.top]+a:t[I.bottom]-c[I.top]-e[I.clientHeight]+a;e[I.scrollTop]+=n,ee.current=null},d=l<0&&s?30:l;if(d>0){ee.current=setTimeout(u,d);return}u();return}if(null===er.current)er.current=e.scrollTop;else if(er.current!==e.scrollTop){let t=Math.abs(e.scrollTop-er.current);if(er.current=e.scrollTop,R>0&&t>R)return}let p=r===n?n:r.nextSibling,h=o===t?t:o.previousSibling,m=Math.ceil((l[I.top]-a[I.bottom])/(ei+1-eo)),b=a[I.bottom]>u[I.bottom],v=l[I.top]u[I.top],x=!b&&!v&&l[I.top]u[I.bottom],E=!b&&!v&&(p===n?l:M(p))[I.top]M(e)[I.bottom]<=u[I.bottom]});-1!==e&&(k=e+1)}if(E){let[,e]=g({fromElement:r,toElement:n,fromIndex:eo,compare:e=>M(e)[I.top]>=u[I.top]});-1!==e&&(S=e-1)}if(C){let[,e]=g({fromElement:r,toElement:n,fromIndex:eo,compare:e=>M(e)[I.bottom]>c[I.top]});-1===e&&(e=eo);let[,i]=g({fromElement:o,toElement:t,fromIndex:ei,asc:!1,compare:e=>M(e)[I.top]=S)W.current=r,V.current=eo;else{let[e,t]=g({fromElement:r,toElement:n,fromIndex:eo,compare:(e,t)=>{if(t===S)return!0;let n=M(e);return n[I.height]!==B&&(J.current[t]=n[I.height]),!1}});e?(W.current=e,V.current=t):(W.current=o,V.current=ei)}}q([S,k])}},eu.current=()=>{let e=es(),t=G.current,n=K.current,r=-1,o=0;if(!e||!t||!n)return{index:r,offset:o};let i=t.nextSibling,a=e.getBoundingClientRect(),l={[I.top]:e===document.documentElement?0:a[I.top],[I.bottom]:e===document.documentElement?document.documentElement[I.clientHeight]:a[I.bottom]};return g({fromElement:i,toElement:n,fromIndex:eo,compare:(e,t)=>{let n=M(e);return r=t,o=l[I.top]-n[I.top],n[I.bottom]>l[I.top]}}),{index:r,offset:o}}}),W.current&&es()&&G.current&&(L=M(W.current)[I.top]-(es()===document.documentElement?0:es().getBoundingClientRect()[I.top])),p(()=>{W.current=null;let e=V.current,t=Y.current;V.current=-1,Y.current=!1;let n=es(),r=G.current,o=K.current;if(-1===e||!n||!r||!o||void 0===L||l&&"none"!==A&&!t)return;let i=null;if(e>=eo&&e<=ei){let[t]=g({fromElement:r.nextSibling,toElement:o,fromIndex:eo,compare:(t,n)=>n===e});t&&(i=M(t)[I.top])}else ee+(t-B),e*U):e<=F&&(i=o.getBoundingClientRect()[I.top]+(N?J.current:[]).slice(ei+1,e).reduce((e,t)=>e+(t-B),U*(e-1-ei)));if(null===i)return;let s=i-(n===document.documentElement?0:n.getBoundingClientRect()[I.top])-L;if(s){if(a){et.current-=s,r.style[I.marginTop]=`${et.current}px`;return}n[I.scrollTop]+=s}},[eo]),p(()=>{let e;let t=()=>{e=requestAnimationFrame(t),ec.current()};return t(),()=>{cancelAnimationFrame(e),ee.current&&clearTimeout(ee.current)}},[]),(0,o.useImperativeHandle)(j,()=>({scrollToIndex:({index:e=-1,alignToTop:t=!0,offset:n=0,delay:r=-1,prerender:o=0})=>{Q.current={index:e,alignToTop:t,offset:n,delay:r,prerender:o},ec.current()},getScrollPosition:()=>eu.current()}),[]),(0,r.jsxs)(o.Fragment,{children:[T({ref:G,style:ea,type:"top"}),(!!t||!!e.length)&&h(eo,ei+1,D?n:t=>n(e[t],t,e)),T({ref:K,style:el,type:"bottom"})]},void 0)})},99231:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.autoprefix=void 0;var r,o=(r=n(17766))&&r.__esModule?r:{default:r},i=Object.assign||function(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:"span";return function(n){function r(){!function(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}(this,r);for(var n,l,s,c=arguments.length,u=Array(c),f=0;f1&&void 0!==arguments[1]?arguments[1]:"span";return function(n){function r(){!function(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}(this,r);for(var n,l,s,c=arguments.length,u=Array(c),f=0;f0&&void 0!==arguments[0]?arguments[0]:[],n=[];return(0,a.default)(t,function(t){Array.isArray(t)?e(t).map(function(e){return n.push(e)}):(0,i.default)(t)?(0,o.default)(t,function(e,t){!0===e&&n.push(t),n.push(t+"-"+e)}):(0,r.default)(t)&&n.push(t)}),n};t.default=s},25319:function(e,t,n){"use strict";t.tz=void 0;var r=c(n(83378)),o=c(n(26189)),i=c(n(99231)),a=c(n(79071)),l=c(n(84913)),s=c(n(71906));function c(e){return e&&e.__esModule?e:{default:e}}a.default,t.tz=a.default,l.default,s.default,t.ZP=function(e){for(var t=arguments.length,n=Array(t>1?t-1:0),a=1;a1)||void 0===arguments[1]||arguments[1];n[e]=t};return 0===e&&r("first-child"),e===t-1&&r("last-child"),(0===e||e%2==0)&&r("even"),1===Math.abs(e%2)&&r("odd"),r("nth-child",e),n}},26189:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.mergeClasses=void 0;var r=a(n(17766)),o=a(n(48797)),i=Object.assign||function(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:[],n=e.default&&(0,o.default)(e.default)||{};return t.map(function(t){var o=e[t];return o&&(0,r.default)(o,function(e,t){n[t]||(n[t]={}),n[t]=i({},n[t],o[t])}),t}),n};t.default=l},72093:function(e,t,n){var r=n(24645);function o(e,t){var n,o,i,a=null;if(!e||"string"!=typeof e)return a;for(var l=r(e),s="function"==typeof t,c=0,u=l.length;c - * @license MIT - */e.exports=function(e){return null!=e&&null!=e.constructor&&"function"==typeof e.constructor.isBuffer&&e.constructor.isBuffer(e)}},83940:function(e,t,n){"use strict";n.d(t,{q:function(){return o}});var r=n(86006);function o(e,t){var n,o,i;return n=t||null,o=function(t){return e.forEach(function(e){return"function"==typeof e?e(t):e&&(e.current=t),e})},(i=(0,r.useState)(function(){return{value:n,callback:o,facade:{get current(){return i.value},set current(value){var e=i.value;e!==value&&(i.value=value,i.callback(value,e))}}}})[0]).callback=o,i.facade}},11503:function(e,t,n){"use strict";n.d(t,{L:function(){return a}});var r=n(78466),o=n(86006),i=function(e){var t=e.sideCar,n=(0,r._T)(e,["sideCar"]);if(!t)throw Error("Sidecar: please provide `sideCar` property to import the right car");var i=t.read();if(!i)throw Error("Sidecar medium not found");return o.createElement(i,(0,r.pi)({},n))};function a(e,t){return e.useMedium(t),i}i.isSideCarExport=!0},37445:function(e,t,n){"use strict";n.d(t,{_:function(){return i}});var r=n(78466);function o(e){return e}function i(e){void 0===e&&(e={});var t,n,i,a=(void 0===t&&(t=o),n=[],i=!1,{read:function(){if(i)throw Error("Sidecar: could not `read` from an `assigned` medium. `read` could be used only with `useMedium`.");return n.length?n[n.length-1]:null},useMedium:function(e){var r=t(e,i);return n.push(r),function(){n=n.filter(function(e){return e!==r})}},assignSyncMedium:function(e){for(i=!0;n.length;){var t=n;n=[],t.forEach(e)}n={push:function(t){return e(t)},filter:function(){return n}}},assignMedium:function(e){i=!0;var t=[];if(n.length){var r=n;n=[],r.forEach(e),t=n}var o=function(){var n=t;t=[],n.forEach(e)},a=function(){return Promise.resolve().then(o)};a(),n={push:function(e){t.push(e),a()},filter:function(e){return t=t.filter(e),n}}}});return a.options=(0,r.pi)({async:!0,ssr:!1},e),a}},98727:function(e,t,n){"use strict";/** - * @license React - * use-sync-external-store-shim.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var r=n(86006),o="function"==typeof Object.is?Object.is:function(e,t){return e===t&&(0!==e||1/e==1/t)||e!=e&&t!=t},i=r.useState,a=r.useEffect,l=r.useLayoutEffect,s=r.useDebugValue;function c(e){var t=e.getSnapshot;e=e.value;try{var n=t();return!o(e,n)}catch(e){return!0}}var u="undefined"==typeof window||void 0===window.document||void 0===window.document.createElement?function(e,t){return t()}:function(e,t){var n=t(),r=i({inst:{value:n,getSnapshot:t}}),o=r[0].inst,u=r[1];return l(function(){o.value=n,o.getSnapshot=t,c(o)&&u({inst:o})},[e,n,t]),a(function(){return c(o)&&u({inst:o}),e(function(){c(o)&&u({inst:o})})},[e]),s(n),n};t.useSyncExternalStore=void 0!==r.useSyncExternalStore?r.useSyncExternalStore:u},94464:function(e,t,n){"use strict";/** - * @license React - * use-sync-external-store-shim/with-selector.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var r=n(86006),o=n(3276),i="function"==typeof Object.is?Object.is:function(e,t){return e===t&&(0!==e||1/e==1/t)||e!=e&&t!=t},a=o.useSyncExternalStore,l=r.useRef,s=r.useEffect,c=r.useMemo,u=r.useDebugValue;t.useSyncExternalStoreWithSelector=function(e,t,n,r,o){var f=l(null);if(null===f.current){var d={hasValue:!1,value:null};f.current=d}else d=f.current;f=c(function(){function e(e){if(!s){if(s=!0,a=e,e=r(e),void 0!==o&&d.hasValue){var t=d.value;if(o(t,e))return l=t}return l=e}if(t=l,i(a,e))return t;var n=r(e);return void 0!==o&&o(t,n)?t:(a=e,l=n)}var a,l,s=!1,c=void 0===n?null:n;return[function(){return e(t())},null===c?void 0:function(){return e(c())}]},[t,n,r,o]);var p=a(e,f[0],f[1]);return s(function(){d.hasValue=!0,d.value=p},[p]),u(p),p}},3276:function(e,t,n){"use strict";e.exports=n(98727)},97737:function(e,t,n){"use strict";e.exports=n(94464)},86462:function(e,t,n){"use strict";let r;n.d(t,{Z:function(){return c}});let o="undefined"!=typeof crypto&&crypto.randomUUID&&crypto.randomUUID.bind(crypto);var i={randomUUID:o};let a=new Uint8Array(16);function l(){if(!r&&!(r="undefined"!=typeof crypto&&crypto.getRandomValues&&crypto.getRandomValues.bind(crypto)))throw Error("crypto.getRandomValues() not supported. See https://github.com/uuidjs/uuid#getrandomvalues-not-supported");return r(a)}let s=[];for(let e=0;e<256;++e)s.push((e+256).toString(16).slice(1));var c=function(e,t,n){if(i.randomUUID&&!t&&!e)return i.randomUUID();e=e||{};let r=e.random||(e.rng||l)();if(r[6]=15&r[6]|64,r[8]=63&r[8]|128,t){n=n||0;for(let e=0;e<16;++e)t[n+e]=r[e];return t}return function(e,t=0){return(s[e[t+0]]+s[e[t+1]]+s[e[t+2]]+s[e[t+3]]+"-"+s[e[t+4]]+s[e[t+5]]+"-"+s[e[t+6]]+s[e[t+7]]+"-"+s[e[t+8]]+s[e[t+9]]+"-"+s[e[t+10]]+s[e[t+11]]+s[e[t+12]]+s[e[t+13]]+s[e[t+14]]+s[e[t+15]]).toLowerCase()}(r)}},16394:function(e){/*! - * Determine if an object is a Buffer - * - * @author Feross Aboukhadijeh - * @license MIT - */e.exports=function(e){return null!=e&&null!=e.constructor&&"function"==typeof e.constructor.isBuffer&&e.constructor.isBuffer(e)}},75478:function(e){e.exports={area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0}},56509:function(e,t,n){"use strict";function r(e,t){return Array(t+1).join(e)}n.r(t);var o,i,a=["ADDRESS","ARTICLE","ASIDE","AUDIO","BLOCKQUOTE","BODY","CANVAS","CENTER","DD","DIR","DIV","DL","DT","FIELDSET","FIGCAPTION","FIGURE","FOOTER","FORM","FRAMESET","H1","H2","H3","H4","H5","H6","HEADER","HGROUP","HR","HTML","ISINDEX","LI","MAIN","MENU","NAV","NOFRAMES","NOSCRIPT","OL","OUTPUT","P","PRE","SECTION","TABLE","TBODY","TD","TFOOT","TH","THEAD","TR","UL"];function l(e){return f(e,a)}var s=["AREA","BASE","BR","COL","COMMAND","EMBED","HR","IMG","INPUT","KEYGEN","LINK","META","PARAM","SOURCE","TRACK","WBR"];function c(e){return f(e,s)}var u=["A","TABLE","THEAD","TBODY","TFOOT","TH","TD","IFRAME","SCRIPT","AUDIO","VIDEO"];function f(e,t){return t.indexOf(e.nodeName)>=0}function d(e,t){return e.getElementsByTagName&&t.some(function(t){return e.getElementsByTagName(t).length})}var p={};function h(e){return e?e.replace(/(\n+\s*)+/g,"\n"):""}function g(e){for(var t in this.options=e,this._keep=[],this._remove=[],this.blankRule={replacement:e.blankReplacement},this.keepReplacement=e.keepReplacement,this.defaultRule={replacement:e.defaultReplacement},this.array=[],e.rules)this.array.push(e.rules[t])}function m(e,t,n){for(var r=0;r-1)return!0}else if("function"==typeof r){if(r.call(e,t,n))return!0}else throw TypeError("`filter` needs to be a string, array, or function")}(o,t,n))return o}}function b(e){var t=e.nextSibling||e.parentNode;return e.parentNode.removeChild(e),t}function v(e,t,n){return e&&e.parentNode===t||n(t)?t.nextSibling||t.parentNode:t.firstChild||t.nextSibling||t.parentNode}p.paragraph={filter:"p",replacement:function(e){return"\n\n"+e+"\n\n"}},p.lineBreak={filter:"br",replacement:function(e,t,n){return n.br+"\n"}},p.heading={filter:["h1","h2","h3","h4","h5","h6"],replacement:function(e,t,n){var o=Number(t.nodeName.charAt(1));if("setext"!==n.headingStyle||!(o<3))return"\n\n"+r("#",o)+" "+e+"\n\n";var i=r(1===o?"=":"-",e.length);return"\n\n"+e+"\n"+i+"\n\n"}},p.blockquote={filter:"blockquote",replacement:function(e){return"\n\n"+(e=(e=e.replace(/^\n+|\n+$/g,"")).replace(/^/gm,"> "))+"\n\n"}},p.list={filter:["ul","ol"],replacement:function(e,t){var n=t.parentNode;return"LI"===n.nodeName&&n.lastElementChild===t?"\n"+e:"\n\n"+e+"\n\n"}},p.listItem={filter:"li",replacement:function(e,t,n){e=e.replace(/^\n+/,"").replace(/\n+$/,"\n").replace(/\n/gm,"\n ");var r=n.bulletListMarker+" ",o=t.parentNode;if("OL"===o.nodeName){var i=o.getAttribute("start"),a=Array.prototype.indexOf.call(o.children,t);r=(i?Number(i)+a:a+1)+". "}return r+e+(t.nextSibling&&!/\n$/.test(e)?"\n":"")}},p.indentedCodeBlock={filter:function(e,t){return"indented"===t.codeBlockStyle&&"PRE"===e.nodeName&&e.firstChild&&"CODE"===e.firstChild.nodeName},replacement:function(e,t,n){return"\n\n "+t.firstChild.textContent.replace(/\n/g,"\n ")+"\n\n"}},p.fencedCodeBlock={filter:function(e,t){return"fenced"===t.codeBlockStyle&&"PRE"===e.nodeName&&e.firstChild&&"CODE"===e.firstChild.nodeName},replacement:function(e,t,n){for(var o,i=((t.firstChild.getAttribute("class")||"").match(/language-(\S+)/)||[null,""])[1],a=t.firstChild.textContent,l=n.fence.charAt(0),s=3,c=RegExp("^"+l+"{3,}","gm");o=c.exec(a);)o[0].length>=s&&(s=o[0].length+1);var u=r(l,s);return"\n\n"+u+i+"\n"+a.replace(/\n$/,"")+"\n"+u+"\n\n"}},p.horizontalRule={filter:"hr",replacement:function(e,t,n){return"\n\n"+n.hr+"\n\n"}},p.inlineLink={filter:function(e,t){return"inlined"===t.linkStyle&&"A"===e.nodeName&&e.getAttribute("href")},replacement:function(e,t){var n=t.getAttribute("href"),r=h(t.getAttribute("title"));return r&&(r=' "'+r+'"'),"["+e+"]("+n+r+")"}},p.referenceLink={filter:function(e,t){return"referenced"===t.linkStyle&&"A"===e.nodeName&&e.getAttribute("href")},replacement:function(e,t,n){var r,o,i=t.getAttribute("href"),a=h(t.getAttribute("title"));switch(a&&(a=' "'+a+'"'),n.linkReferenceStyle){case"collapsed":r="["+e+"][]",o="["+e+"]: "+i+a;break;case"shortcut":r="["+e+"]",o="["+e+"]: "+i+a;break;default:var l=this.references.length+1;r="["+e+"]["+l+"]",o="["+l+"]: "+i+a}return this.references.push(o),r},references:[],append:function(e){var t="";return this.references.length&&(t="\n\n"+this.references.join("\n")+"\n\n",this.references=[]),t}},p.emphasis={filter:["em","i"],replacement:function(e,t,n){return e.trim()?n.emDelimiter+e+n.emDelimiter:""}},p.strong={filter:["strong","b"],replacement:function(e,t,n){return e.trim()?n.strongDelimiter+e+n.strongDelimiter:""}},p.code={filter:function(e){var t=e.previousSibling||e.nextSibling,n="PRE"===e.parentNode.nodeName&&!t;return"CODE"===e.nodeName&&!n},replacement:function(e){if(!e)return"";e=e.replace(/\r?\n|\r/g," ");for(var t=/^`|^ .*?[^ ].* $|`$/.test(e)?" ":"",n="`",r=e.match(/`+/gm)||[];-1!==r.indexOf(n);)n+="`";return n+t+e+t+n}},p.image={filter:"img",replacement:function(e,t){var n=h(t.getAttribute("alt")),r=t.getAttribute("src")||"",o=h(t.getAttribute("title"));return r?"!["+n+"]("+r+(o?' "'+o+'"':"")+")":""}},g.prototype={add:function(e,t){this.array.unshift(t)},keep:function(e){this._keep.unshift({filter:e,replacement:this.keepReplacement})},remove:function(e){this._remove.unshift({filter:e,replacement:function(){return""}})},forNode:function(e){var t;return e.isBlank?this.blankRule:(t=m(this.array,e,this.options))||(t=m(this._keep,e,this.options))||(t=m(this._remove,e,this.options))?t:this.defaultRule},forEach:function(e){for(var t=0;t'+e+"","text/html").getElementById("turndown-root"):e.cloneNode(!0),isBlock:l,isVoid:c,isPre:t.preformattedCode?E:null}),n}function E(e){return"PRE"===e.nodeName||"CODE"===e.nodeName}function S(e,t){return e.isBlock=l(e),e.isCode="CODE"===e.nodeName||e.parentNode.isCode,e.isBlank=!c(e)&&!f(e,u)&&/^\s*$/i.test(e.textContent)&&!d(e,s)&&!d(e,u),e.flankingWhitespace=function(e,t){if(e.isBlock||t.preformattedCode&&e.isCode)return{leading:"",trailing:""};var n,r={leading:(n=e.textContent.match(/^(([ \t\r\n]*)(\s*))(?:(?=\S)[\s\S]*\S)?((\s*?)([ \t\r\n]*))$/))[1],leadingAscii:n[2],leadingNonAscii:n[3],trailing:n[4],trailingNonAscii:n[5],trailingAscii:n[6]};return r.leadingAscii&&k("left",e,t)&&(r.leading=r.leadingNonAscii),r.trailingAscii&&k("right",e,t)&&(r.trailing=r.trailingNonAscii),{leading:r.leading,trailing:r.trailing}}(e,t),e}function k(e,t,n){var r,o,i;return"left"===e?(r=t.previousSibling,o=/ $/):(r=t.nextSibling,o=/^ /),r&&(3===r.nodeType?i=o.test(r.nodeValue):n.preformattedCode&&"CODE"===r.nodeName?i=!1:1!==r.nodeType||l(r)||(i=o.test(r.textContent))),i}var _=Array.prototype.reduce;function O(e){if(!(this instanceof O))return new O(e);this.options=function(e){for(var t=1;t0&&"\n"===e[t-1];)t--;return e.substring(0,t)}(e),r=t.replace(/^\n*/,""),o=Math.max(e.length-n.length,t.length-r.length);return n+"\n\n".substring(0,o)+r}O.prototype={turndown:function(e){if(!(null!=e&&("string"==typeof e||e.nodeType&&(1===e.nodeType||9===e.nodeType||11===e.nodeType))))throw TypeError(e+" is not a string, or an element/document/fragment node.");return""===e?"":A.call(this,C.call(this,new w(e,this.options)))},use:function(e){if(Array.isArray(e))for(var t=0;t/g,">").replace(/"/g,""").replace(/'/g,"'")}function r(e,...t){let n=Object.create(null);for(let t in e)n[t]=e[t];return t.forEach(function(e){for(let t in e)n[t]=e[t]}),n}let o=e=>!!e.scope,i=(e,{prefix:t})=>{if(e.startsWith("language:"))return e.replace("language:","language-");if(e.includes(".")){let n=e.split(".");return[`${t}${n.shift()}`,...n.map((e,t)=>`${e}${"_".repeat(t+1)}`)].join(" ")}return`${t}${e}`};class a{constructor(e,t){this.buffer="",this.classPrefix=t.classPrefix,e.walk(this)}addText(e){this.buffer+=n(e)}openNode(e){if(!o(e))return;let t=i(e.scope,{prefix:this.classPrefix});this.span(t)}closeNode(e){o(e)&&(this.buffer+="")}value(){return this.buffer}span(e){this.buffer+=``}}let l=(e={})=>{let t={children:[]};return Object.assign(t,e),t};class s{constructor(){this.rootNode=l(),this.stack=[this.rootNode]}get top(){return this.stack[this.stack.length-1]}get root(){return this.rootNode}add(e){this.top.children.push(e)}openNode(e){let t=l({scope:e});this.add(t),this.stack.push(t)}closeNode(){if(this.stack.length>1)return this.stack.pop()}closeAllNodes(){for(;this.closeNode(););}toJSON(){return JSON.stringify(this.rootNode,null,4)}walk(e){return this.constructor._walk(e,this.rootNode)}static _walk(e,t){return"string"==typeof t?e.addText(t):t.children&&(e.openNode(t),t.children.forEach(t=>this._walk(e,t)),e.closeNode(t)),e}static _collapse(e){"string"!=typeof e&&e.children&&(e.children.every(e=>"string"==typeof e)?e.children=[e.children.join("")]:e.children.forEach(e=>{s._collapse(e)}))}}class c extends s{constructor(e){super(),this.options=e}addText(e){""!==e&&this.add(e)}startScope(e){this.openNode(e)}endScope(){this.closeNode()}__addSublanguage(e,t){let n=e.root;t&&(n.scope=`language:${t}`),this.add(n)}toHTML(){let e=new a(this,this.options);return e.value()}finalize(){return this.closeAllNodes(),!0}}function u(e){return e?"string"==typeof e?e:e.source:null}function f(e){return h("(?=",e,")")}function d(e){return h("(?:",e,")*")}function p(e){return h("(?:",e,")?")}function h(...e){let t=e.map(e=>u(e)).join("");return t}function g(...e){let t=function(e){let t=e[e.length-1];return"object"==typeof t&&t.constructor===Object?(e.splice(e.length-1,1),t):{}}(e),n="("+(t.capture?"":"?:")+e.map(e=>u(e)).join("|")+")";return n}function m(e){return RegExp(e.toString()+"|").exec("").length-1}let b=/\[(?:[^\\\]]|\\.)*\]|\(\??|\\([1-9][0-9]*)|\\./;function v(e,{joinWith:t}){let n=0;return e.map(e=>{n+=1;let t=n,r=u(e),o="";for(;r.length>0;){let e=b.exec(r);if(!e){o+=r;break}o+=r.substring(0,e.index),r=r.substring(e.index+e[0].length),"\\"===e[0][0]&&e[1]?o+="\\"+String(Number(e[1])+t):(o+=e[0],"("===e[0]&&n++)}return o}).map(e=>`(${e})`).join(t)}let y="[a-zA-Z]\\w*",x="[a-zA-Z_]\\w*",w="\\b\\d+(\\.\\d+)?",E="(-?)(\\b0[xX][a-fA-F0-9]+|(\\b\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?)",S="\\b(0b[01]+)",k={begin:"\\\\[\\s\\S]",relevance:0},_=function(e,t,n={}){let o=r({scope:"comment",begin:e,end:t,contains:[]},n);o.contains.push({scope:"doctag",begin:"[ ]*(?=(TODO|FIXME|NOTE|BUG|OPTIMIZE|HACK|XXX):)",end:/(TODO|FIXME|NOTE|BUG|OPTIMIZE|HACK|XXX):/,excludeBegin:!0,relevance:0});let i=g("I","a","is","so","us","to","at","if","in","it","on",/[A-Za-z]+['](d|ve|re|ll|t|s|n)/,/[A-Za-z]+[-][a-z]+/,/[A-Za-z][a-z]{2,}/);return o.contains.push({begin:h(/[ ]+/,"(",i,/[.]?[:]?([.][ ]|[ ])/,"){3}")}),o},O=_("//","$"),C=_("/\\*","\\*/"),A=_("#","$");var N=Object.freeze({__proto__:null,MATCH_NOTHING_RE:/\b\B/,IDENT_RE:y,UNDERSCORE_IDENT_RE:x,NUMBER_RE:w,C_NUMBER_RE:E,BINARY_NUMBER_RE:S,RE_STARTERS_RE:"!|!=|!==|%|%=|&|&&|&=|\\*|\\*=|\\+|\\+=|,|-|-=|/=|/|:|;|<<|<<=|<=|<|===|==|=|>>>=|>>=|>=|>>>|>>|>|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~",SHEBANG:(e={})=>{let t=/^#![ ]*\//;return e.binary&&(e.begin=h(t,/.*\b/,e.binary,/\b.*/)),r({scope:"meta",begin:t,end:/$/,relevance:0,"on:begin":(e,t)=>{0!==e.index&&t.ignoreMatch()}},e)},BACKSLASH_ESCAPE:k,APOS_STRING_MODE:{scope:"string",begin:"'",end:"'",illegal:"\\n",contains:[k]},QUOTE_STRING_MODE:{scope:"string",begin:'"',end:'"',illegal:"\\n",contains:[k]},PHRASAL_WORDS_MODE:{begin:/\b(a|an|the|are|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such|will|you|your|they|like|more)\b/},COMMENT:_,C_LINE_COMMENT_MODE:O,C_BLOCK_COMMENT_MODE:C,HASH_COMMENT_MODE:A,NUMBER_MODE:{scope:"number",begin:w,relevance:0},C_NUMBER_MODE:{scope:"number",begin:E,relevance:0},BINARY_NUMBER_MODE:{scope:"number",begin:S,relevance:0},REGEXP_MODE:{begin:/(?=\/[^/\n]*\/)/,contains:[{scope:"regexp",begin:/\//,end:/\/[gimuy]*/,illegal:/\n/,contains:[k,{begin:/\[/,end:/\]/,relevance:0,contains:[k]}]}]},TITLE_MODE:{scope:"title",begin:y,relevance:0},UNDERSCORE_TITLE_MODE:{scope:"title",begin:x,relevance:0},METHOD_GUARD:{begin:"\\.\\s*"+x,relevance:0},END_SAME_AS_BEGIN:function(e){return Object.assign(e,{"on:begin":(e,t)=>{t.data._beginMatch=e[1]},"on:end":(e,t)=>{t.data._beginMatch!==e[1]&&t.ignoreMatch()}})}});function R(e,t){let n=e.input[e.index-1];"."===n&&t.ignoreMatch()}function T(e,t){void 0!==e.className&&(e.scope=e.className,delete e.className)}function P(e,t){t&&e.beginKeywords&&(e.begin="\\b("+e.beginKeywords.split(" ").join("|")+")(?!\\.)(?=\\b|\\s)",e.__beforeBegin=R,e.keywords=e.keywords||e.beginKeywords,delete e.beginKeywords,void 0===e.relevance&&(e.relevance=0))}function M(e,t){Array.isArray(e.illegal)&&(e.illegal=g(...e.illegal))}function j(e,t){if(e.match){if(e.begin||e.end)throw Error("begin & end are not supported with match");e.begin=e.match,delete e.match}}function L(e,t){void 0===e.relevance&&(e.relevance=1)}let I=(e,t)=>{if(!e.beforeMatch)return;if(e.starts)throw Error("beforeMatch cannot be used with starts");let n=Object.assign({},e);Object.keys(e).forEach(t=>{delete e[t]}),e.keywords=n.keywords,e.begin=h(n.beforeMatch,f(n.begin)),e.starts={relevance:0,contains:[Object.assign(n,{endsParent:!0})]},e.relevance=0,delete n.beforeMatch},D=["of","and","for","in","not","or","if","then","parent","list","value"],F={},B=e=>{console.error(e)},z=(e,...t)=>{console.log(`WARN: ${e}`,...t)},$=(e,t)=>{F[`${e}/${t}`]||(console.log(`Deprecated as of ${e}. ${t}`),F[`${e}/${t}`]=!0)},U=Error();function H(e,t,{key:n}){let r=0,o=e[n],i={},a={};for(let e=1;e<=t.length;e++)a[e+r]=o[e],i[e+r]=!0,r+=m(t[e-1]);e[n]=a,e[n]._emit=i,e[n]._multi=!0}function Z(e){e.scope&&"object"==typeof e.scope&&null!==e.scope&&(e.beginScope=e.scope,delete e.scope),"string"==typeof e.beginScope&&(e.beginScope={_wrap:e.beginScope}),"string"==typeof e.endScope&&(e.endScope={_wrap:e.endScope}),function(e){if(Array.isArray(e.begin)){if(e.skip||e.excludeBegin||e.returnBegin)throw B("skip, excludeBegin, returnBegin not compatible with beginScope: {}"),U;if("object"!=typeof e.beginScope||null===e.beginScope)throw B("beginScope must be object"),U;H(e,e.begin,{key:"beginScope"}),e.begin=v(e.begin,{joinWith:""})}}(e),function(e){if(Array.isArray(e.end)){if(e.skip||e.excludeEnd||e.returnEnd)throw B("skip, excludeEnd, returnEnd not compatible with endScope: {}"),U;if("object"!=typeof e.endScope||null===e.endScope)throw B("endScope must be object"),U;H(e,e.end,{key:"endScope"}),e.end=v(e.end,{joinWith:""})}}(e)}class q extends Error{constructor(e,t){super(e),this.name="HTMLInjectionError",this.html=t}}let W=Symbol("nomatch"),V=function(e){let o=Object.create(null),i=Object.create(null),a=[],l=!0,s="Could not find the language '{}', did you forget to load/include a language module?",b={disableAutodetect:!0,name:"Plain text",contains:[]},y={ignoreUnescapedHTML:!1,throwUnescapedHTML:!1,noHighlightRe:/^(no-?highlight)$/i,languageDetectRe:/\blang(?:uage)?-([\w-]+)\b/i,classPrefix:"hljs-",cssSelector:"pre code",languages:null,__emitter:c};function x(e){return y.noHighlightRe.test(e)}function w(e,t,n){let r="",o="";"object"==typeof t?(r=e,n=t.ignoreIllegals,o=t.language):($("10.7.0","highlight(lang, code, ...args) has been deprecated."),$("10.7.0","Please use highlight(code, options) instead.\nhttps://github.com/highlightjs/highlight.js/issues/2277"),o=e,r=t),void 0===n&&(n=!0);let i={code:r,language:o};F("before:highlight",i);let a=i.result?i.result:E(i.language,i.code,n);return a.code=i.code,F("after:highlight",a),a}function E(e,i,a,c){let f=Object.create(null);function d(){if(!A.keywords){R.addText(F);return}let e=0;A.keywordPatternRe.lastIndex=0;let t=A.keywordPatternRe.exec(F),n="";for(;t;){n+=F.substring(e,t.index);let r=k.case_insensitive?t[0].toLowerCase():t[0],o=A.keywords[r];if(o){let[e,i]=o;if(R.addText(n),n="",f[r]=(f[r]||0)+1,f[r]<=7&&(z+=i),e.startsWith("_"))n+=t[0];else{let n=k.classNameAliases[e]||e;h(t[0],n)}}else n+=t[0];e=A.keywordPatternRe.lastIndex,t=A.keywordPatternRe.exec(F)}n+=F.substring(e),R.addText(n)}function p(){null!=A.subLanguage?function(){if(""===F)return;let e=null;if("string"==typeof A.subLanguage){if(!o[A.subLanguage]){R.addText(F);return}e=E(A.subLanguage,F,!0,N[A.subLanguage]),N[A.subLanguage]=e._top}else e=S(F,A.subLanguage.length?A.subLanguage:null);A.relevance>0&&(z+=e.relevance),R.__addSublanguage(e._emitter,e.language)}():d(),F=""}function h(e,t){""!==e&&(R.startScope(t),R.addText(e),R.endScope())}function g(e,t){let n=1,r=t.length-1;for(;n<=r;){if(!e._emit[n]){n++;continue}let r=k.classNameAliases[e[n]]||e[n],o=t[n];r?h(o,r):(F=o,d(),F=""),n++}}function b(e,t){return e.scope&&"string"==typeof e.scope&&R.openNode(k.classNameAliases[e.scope]||e.scope),e.beginScope&&(e.beginScope._wrap?(h(F,k.classNameAliases[e.beginScope._wrap]||e.beginScope._wrap),F=""):e.beginScope._multi&&(g(e.beginScope,t),F="")),A=Object.create(e,{parent:{value:A}})}let x={};function w(n,r){let o=r&&r[0];if(F+=n,null==o)return p(),0;if("begin"===x.type&&"end"===r.type&&x.index===r.index&&""===o){if(F+=i.slice(r.index,r.index+1),!l){let t=Error(`0 width match regex (${e})`);throw t.languageName=e,t.badRule=x.rule,t}return 1}if(x=r,"begin"===r.type)return function(e){let n=e[0],r=e.rule,o=new t(r),i=[r.__beforeBegin,r["on:begin"]];for(let t of i)if(t&&(t(e,o),o.isMatchIgnored))return 0===A.matcher.regexIndex?(F+=n[0],1):(H=!0,0);return r.skip?F+=n:(r.excludeBegin&&(F+=n),p(),r.returnBegin||r.excludeBegin||(F=n)),b(r,e),r.returnBegin?0:n.length}(r);if("illegal"!==r.type||a){if("end"===r.type){let e=function(e){let n=e[0],r=i.substring(e.index),o=function e(n,r,o){let i=function(e,t){let n=e&&e.exec(t);return n&&0===n.index}(n.endRe,o);if(i){if(n["on:end"]){let e=new t(n);n["on:end"](r,e),e.isMatchIgnored&&(i=!1)}if(i){for(;n.endsParent&&n.parent;)n=n.parent;return n}}if(n.endsWithParent)return e(n.parent,r,o)}(A,e,r);if(!o)return W;let a=A;A.endScope&&A.endScope._wrap?(p(),h(n,A.endScope._wrap)):A.endScope&&A.endScope._multi?(p(),g(A.endScope,e)):a.skip?F+=n:(a.returnEnd||a.excludeEnd||(F+=n),p(),a.excludeEnd&&(F=n));do A.scope&&R.closeNode(),A.skip||A.subLanguage||(z+=A.relevance),A=A.parent;while(A!==o.parent);return o.starts&&b(o.starts,e),a.returnEnd?0:n.length}(r);if(e!==W)return e}}else{let e=Error('Illegal lexeme "'+o+'" for mode "'+(A.scope||"")+'"');throw e.mode=A,e}if("illegal"===r.type&&""===o)return 1;if(U>1e5&&U>3*r.index){let e=Error("potential infinite loop, way more iterations than matches");throw e}return F+=o,o.length}let k=C(e);if(!k)throw B(s.replace("{}",e)),Error('Unknown language: "'+e+'"');let _=function(e){function t(t,n){return RegExp(u(t),"m"+(e.case_insensitive?"i":"")+(e.unicodeRegex?"u":"")+(n?"g":""))}class n{constructor(){this.matchIndexes={},this.regexes=[],this.matchAt=1,this.position=0}addRule(e,t){t.position=this.position++,this.matchIndexes[this.matchAt]=t,this.regexes.push([t,e]),this.matchAt+=m(e)+1}compile(){0===this.regexes.length&&(this.exec=()=>null);let e=this.regexes.map(e=>e[1]);this.matcherRe=t(v(e,{joinWith:"|"}),!0),this.lastIndex=0}exec(e){this.matcherRe.lastIndex=this.lastIndex;let t=this.matcherRe.exec(e);if(!t)return null;let n=t.findIndex((e,t)=>t>0&&void 0!==e),r=this.matchIndexes[n];return t.splice(0,n),Object.assign(t,r)}}class o{constructor(){this.rules=[],this.multiRegexes=[],this.count=0,this.lastIndex=0,this.regexIndex=0}getMatcher(e){if(this.multiRegexes[e])return this.multiRegexes[e];let t=new n;return this.rules.slice(e).forEach(([e,n])=>t.addRule(e,n)),t.compile(),this.multiRegexes[e]=t,t}resumingScanAtSamePosition(){return 0!==this.regexIndex}considerAll(){this.regexIndex=0}addRule(e,t){this.rules.push([e,t]),"begin"===t.type&&this.count++}exec(e){let t=this.getMatcher(this.regexIndex);t.lastIndex=this.lastIndex;let n=t.exec(e);if(this.resumingScanAtSamePosition()){if(n&&n.index===this.lastIndex);else{let t=this.getMatcher(0);t.lastIndex=this.lastIndex+1,n=t.exec(e)}}return n&&(this.regexIndex+=n.position+1,this.regexIndex===this.count&&this.considerAll()),n}}if(e.compilerExtensions||(e.compilerExtensions=[]),e.contains&&e.contains.includes("self"))throw Error("ERR: contains `self` is not supported at the top-level of a language. See documentation.");return e.classNameAliases=r(e.classNameAliases||{}),function n(i,a){if(i.isCompiled)return i;[T,j,Z,I].forEach(e=>e(i,a)),e.compilerExtensions.forEach(e=>e(i,a)),i.__beforeBegin=null,[P,M,L].forEach(e=>e(i,a)),i.isCompiled=!0;let l=null;return"object"==typeof i.keywords&&i.keywords.$pattern&&(i.keywords=Object.assign({},i.keywords),l=i.keywords.$pattern,delete i.keywords.$pattern),l=l||/\w+/,i.keywords&&(i.keywords=function e(t,n,r="keyword"){let o=Object.create(null);return"string"==typeof t?i(r,t.split(" ")):Array.isArray(t)?i(r,t):Object.keys(t).forEach(function(r){Object.assign(o,e(t[r],n,r))}),o;function i(e,t){n&&(t=t.map(e=>e.toLowerCase())),t.forEach(function(t){var n,r;let i=t.split("|");o[i[0]]=[e,(n=i[0],(r=i[1])?Number(r):D.includes(n.toLowerCase())?0:1)]})}}(i.keywords,e.case_insensitive)),i.keywordPatternRe=t(l,!0),a&&(i.begin||(i.begin=/\B|\b/),i.beginRe=t(i.begin),i.end||i.endsWithParent||(i.end=/\B|\b/),i.end&&(i.endRe=t(i.end)),i.terminatorEnd=u(i.end)||"",i.endsWithParent&&a.terminatorEnd&&(i.terminatorEnd+=(i.end?"|":"")+a.terminatorEnd)),i.illegal&&(i.illegalRe=t(i.illegal)),i.contains||(i.contains=[]),i.contains=[].concat(...i.contains.map(function(e){var t;return((t="self"===e?i:e).variants&&!t.cachedVariants&&(t.cachedVariants=t.variants.map(function(e){return r(t,{variants:null},e)})),t.cachedVariants)?t.cachedVariants:!function e(t){return!!t&&(t.endsWithParent||e(t.starts))}(t)?Object.isFrozen(t)?r(t):t:r(t,{starts:t.starts?r(t.starts):null})})),i.contains.forEach(function(e){n(e,i)}),i.starts&&n(i.starts,a),i.matcher=function(e){let t=new o;return e.contains.forEach(e=>t.addRule(e.begin,{rule:e,type:"begin"})),e.terminatorEnd&&t.addRule(e.terminatorEnd,{type:"end"}),e.illegal&&t.addRule(e.illegal,{type:"illegal"}),t}(i),i}(e)}(k),O="",A=c||_,N={},R=new y.__emitter(y);!function(){let e=[];for(let t=A;t!==k;t=t.parent)t.scope&&e.unshift(t.scope);e.forEach(e=>R.openNode(e))}();let F="",z=0,$=0,U=0,H=!1;try{if(k.__emitTokens)k.__emitTokens(i,R);else{for(A.matcher.considerAll();;){U++,H?H=!1:A.matcher.considerAll(),A.matcher.lastIndex=$;let e=A.matcher.exec(i);if(!e)break;let t=i.substring($,e.index),n=w(t,e);$=e.index+n}w(i.substring($))}return R.finalize(),O=R.toHTML(),{language:e,value:O,relevance:z,illegal:!1,_emitter:R,_top:A}}catch(t){if(t.message&&t.message.includes("Illegal"))return{language:e,value:n(i),illegal:!0,relevance:0,_illegalBy:{message:t.message,index:$,context:i.slice($-100,$+100),mode:t.mode,resultSoFar:O},_emitter:R};if(l)return{language:e,value:n(i),illegal:!1,relevance:0,errorRaised:t,_emitter:R,_top:A};throw t}}function S(e,t){t=t||y.languages||Object.keys(o);let r=function(e){let t={value:n(e),illegal:!1,relevance:0,_top:b,_emitter:new y.__emitter(y)};return t._emitter.addText(e),t}(e),i=t.filter(C).filter(R).map(t=>E(t,e,!1));i.unshift(r);let a=i.sort((e,t)=>{if(e.relevance!==t.relevance)return t.relevance-e.relevance;if(e.language&&t.language){if(C(e.language).supersetOf===t.language)return 1;if(C(t.language).supersetOf===e.language)return -1}return 0}),[l,s]=a;return l.secondBest=s,l}function k(e){let t=null,n=function(e){let t=e.className+" ";t+=e.parentNode?e.parentNode.className:"";let n=y.languageDetectRe.exec(t);if(n){let t=C(n[1]);return t||(z(s.replace("{}",n[1])),z("Falling back to no-highlight mode for this block.",e)),t?n[1]:"no-highlight"}return t.split(/\s+/).find(e=>x(e)||C(e))}(e);if(x(n))return;if(F("before:highlightElement",{el:e,language:n}),e.children.length>0&&(y.ignoreUnescapedHTML||(console.warn("One of your code blocks includes unescaped HTML. This is a potentially serious security risk."),console.warn("https://github.com/highlightjs/highlight.js/wiki/security"),console.warn("The element with unescaped HTML:"),console.warn(e)),y.throwUnescapedHTML)){let t=new q("One of your code blocks includes unescaped HTML.",e.innerHTML);throw t}t=e;let r=t.textContent,o=n?w(r,{language:n,ignoreIllegals:!0}):S(r);e.innerHTML=o.value,function(e,t,n){let r=t&&i[t]||n;e.classList.add("hljs"),e.classList.add(`language-${r}`)}(e,n,o.language),e.result={language:o.language,re:o.relevance,relevance:o.relevance},o.secondBest&&(e.secondBest={language:o.secondBest.language,relevance:o.secondBest.relevance}),F("after:highlightElement",{el:e,result:o,text:r})}let _=!1;function O(){if("loading"===document.readyState){_=!0;return}let e=document.querySelectorAll(y.cssSelector);e.forEach(k)}function C(e){return o[e=(e||"").toLowerCase()]||o[i[e]]}function A(e,{languageName:t}){"string"==typeof e&&(e=[e]),e.forEach(e=>{i[e.toLowerCase()]=t})}function R(e){let t=C(e);return t&&!t.disableAutodetect}function F(e,t){a.forEach(function(n){n[e]&&n[e](t)})}for(let t in"undefined"!=typeof window&&window.addEventListener&&window.addEventListener("DOMContentLoaded",function(){_&&O()},!1),Object.assign(e,{highlight:w,highlightAuto:S,highlightAll:O,highlightElement:k,highlightBlock:function(e){return $("10.7.0","highlightBlock will be removed entirely in v12.0"),$("10.7.0","Please use highlightElement now."),k(e)},configure:function(e){y=r(y,e)},initHighlighting:()=>{O(),$("10.6.0","initHighlighting() deprecated. Use highlightAll() now.")},initHighlightingOnLoad:function(){O(),$("10.6.0","initHighlightingOnLoad() deprecated. Use highlightAll() now.")},registerLanguage:function(t,n){let r=null;try{r=n(e)}catch(e){if(B("Language definition for '{}' could not be registered.".replace("{}",t)),l)B(e);else throw e;r=b}r.name||(r.name=t),o[t]=r,r.rawDefinition=n.bind(null,e),r.aliases&&A(r.aliases,{languageName:t})},unregisterLanguage:function(e){for(let t of(delete o[e],Object.keys(i)))i[t]===e&&delete i[t]},listLanguages:function(){return Object.keys(o)},getLanguage:C,registerAliases:A,autoDetection:R,inherit:r,addPlugin:function(e){var t;(t=e)["before:highlightBlock"]&&!t["before:highlightElement"]&&(t["before:highlightElement"]=e=>{t["before:highlightBlock"](Object.assign({block:e.el},e))}),t["after:highlightBlock"]&&!t["after:highlightElement"]&&(t["after:highlightElement"]=e=>{t["after:highlightBlock"](Object.assign({block:e.el},e))}),a.push(e)},removePlugin:function(e){let t=a.indexOf(e);-1!==t&&a.splice(t,1)}}),e.debugMode=function(){l=!1},e.safeMode=function(){l=!0},e.versionString="11.8.0",e.regex={concat:h,lookahead:f,either:g,optional:p,anyNumberOfTimes:d},N)"object"==typeof N[t]&&function e(t){return t instanceof Map?t.clear=t.delete=t.set=function(){throw Error("map is read-only")}:t instanceof Set&&(t.add=t.clear=t.delete=function(){throw Error("set is read-only")}),Object.freeze(t),Object.getOwnPropertyNames(t).forEach(n=>{let r=t[n],o=typeof r;"object"!==o&&"function"!==o||Object.isFrozen(r)||e(r)}),t}(N[t]);return Object.assign(e,N),e},G=V({});G.newInstance=()=>V({}),e.exports=G,G.HighlightJS=G,G.default=G},86351:function(e,t,n){"use strict";function r(e){if(Array.isArray(e))return e}n.d(t,{Z:function(){return r}})},18050:function(e,t,n){"use strict";function r(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}n.d(t,{Z:function(){return r}})},49449:function(e,t,n){"use strict";n.d(t,{Z:function(){return i}});var r=n(58774);function o(e,t){for(var n=0;ne.length)&&(t=e.length);for(var n=0,r=Array(t);n{let{placement:r="bottom",strategy:o="absolute",middleware:i=[],platform:a}=n,s=i.filter(Boolean),c=await (null==a.isRTL?void 0:a.isRTL(t)),u=await a.getElementRects({reference:e,floating:t,strategy:o}),{x:f,y:d}=l(u,r,c),p=r,h={},g=0;for(let n=0;n({name:"arrow",options:e,async fn(t){let{x:n,y:i,placement:l,rects:s,platform:f,elements:d}=t,{element:g,padding:m=0}=c(e,t)||{};if(null==g)return{};let b=u(m),v={x:n,y:i},y=a(l),x=o(y),w=await f.getDimensions(g),E="y"===y,S=E?"clientHeight":"clientWidth",k=s.reference[x]+s.reference[y]-v[y]-s.floating[x],_=v[y]-s.reference[y],O=await (null==f.getOffsetParent?void 0:f.getOffsetParent(g)),C=O?O[S]:0;C&&await (null==f.isElement?void 0:f.isElement(O))||(C=d.floating[S]||s.floating[x]);let A=C/2-w[x]/2-1,N=p(b[E?"top":"left"],A),R=p(b[E?"bottom":"right"],A),T=C-w[x]-R,P=C/2-w[x]/2+(k/2-_/2),M=h(N,p(P,T)),j=null!=r(l)&&P!=M&&s.reference[x]/2-(Pe.concat(t,t+"-start",t+"-end"),[]),{left:"right",right:"left",bottom:"top",top:"bottom"});function v(e){return e.replace(/left|right|bottom|top/g,e=>b[e])}let y={start:"end",end:"start"};function x(e){return e.replace(/start|end/g,e=>y[e])}let w=function(e){return void 0===e&&(e={}),{name:"flip",options:e,async fn(t){var n,l,s,u;let{placement:f,middlewareData:p,rects:h,initialPlacement:g,platform:m,elements:b}=t,{mainAxis:y=!0,crossAxis:w=!0,fallbackPlacements:E,fallbackStrategy:S="bestFit",fallbackAxisSideDirection:k="none",flipAlignment:_=!0,...O}=c(e,t),C=i(f),A=i(g)===g,N=await (null==m.isRTL?void 0:m.isRTL(b.floating)),R=E||(A||!_?[v(g)]:function(e){let t=v(e);return[x(e),t,x(t)]}(g));E||"none"===k||R.push(...function(e,t,n,o){let a=r(e),l=function(e,t,n){let r=["left","right"],o=["right","left"];switch(e){case"top":case"bottom":return n?t?o:r:t?r:o;case"left":case"right":return t?["top","bottom"]:["bottom","top"];default:return[]}}(i(e),"start"===n,o);return a&&(l=l.map(e=>e+"-"+a),t&&(l=l.concat(l.map(x)))),l}(g,_,k,N));let T=[g,...R],P=await d(t,O),M=[],j=(null==(n=p.flip)?void 0:n.overflows)||[];if(y&&M.push(P[C]),w){let{main:e,cross:t}=function(e,t,n){void 0===n&&(n=!1);let i=r(e),l=a(e),s=o(l),c="x"===l?i===(n?"end":"start")?"right":"left":"start"===i?"bottom":"top";return t.reference[s]>t.floating[s]&&(c=v(c)),{main:c,cross:v(c)}}(f,h,N);M.push(P[e],P[t])}if(j=[...j,{placement:f,overflows:M}],!M.every(e=>e<=0)){let e=((null==(l=p.flip)?void 0:l.index)||0)+1,t=T[e];if(t)return{data:{index:e,overflows:j},reset:{placement:t}};let n=null==(s=j.filter(e=>e.overflows[0]<=0).sort((e,t)=>e.overflows[1]-t.overflows[1])[0])?void 0:s.placement;if(!n)switch(S){case"bestFit":{let e=null==(u=j.map(e=>[e.placement,e.overflows.filter(e=>e>0).reduce((e,t)=>e+t,0)]).sort((e,t)=>e[1]-t[1])[0])?void 0:u[0];e&&(n=e);break}case"initialPlacement":n=g}if(f!==n)return{reset:{placement:n}}}return{}}}};function E(e,t){return{top:e.top-t.height,right:e.right-t.width,bottom:e.bottom-t.height,left:e.left-t.width}}function S(e){return m.some(t=>e[t]>=0)}let k=function(e){return void 0===e&&(e={}),{name:"hide",options:e,async fn(t){let{rects:n}=t,{strategy:r="referenceHidden",...o}=c(e,t);switch(r){case"referenceHidden":{let e=E(await d(t,{...o,elementContext:"reference"}),n.reference);return{data:{referenceHiddenOffsets:e,referenceHidden:S(e)}}}case"escaped":{let e=E(await d(t,{...o,altBoundary:!0}),n.floating);return{data:{escapedOffsets:e,escaped:S(e)}}}default:return{}}}}},_=function(e){return void 0===e&&(e=0),{name:"offset",options:e,async fn(t){let{x:n,y:o}=t,l=await async function(e,t){let{placement:n,platform:o,elements:l}=e,s=await (null==o.isRTL?void 0:o.isRTL(l.floating)),u=i(n),f=r(n),d="x"===a(n),p=["left","top"].includes(u)?-1:1,h=s&&d?-1:1,g=c(t,e),{mainAxis:m,crossAxis:b,alignmentAxis:v}="number"==typeof g?{mainAxis:g,crossAxis:0,alignmentAxis:null}:{mainAxis:0,crossAxis:0,alignmentAxis:null,...g};return f&&"number"==typeof v&&(b="end"===f?-1*v:v),d?{x:b*h,y:m*p}:{x:m*p,y:b*h}}(t,e);return{x:n+l.x,y:o+l.y,data:l}}}};function O(e){return"x"===e?"y":"x"}let C=function(e){return void 0===e&&(e={}),{name:"shift",options:e,async fn(t){let{x:n,y:r,placement:o}=t,{mainAxis:l=!0,crossAxis:s=!1,limiter:u={fn:e=>{let{x:t,y:n}=e;return{x:t,y:n}}},...f}=c(e,t),g={x:n,y:r},m=await d(t,f),b=a(i(o)),v=O(b),y=g[b],x=g[v];if(l){let e="y"===b?"bottom":"right";y=h(y+m["y"===b?"top":"left"],p(y,y-m[e]))}s&&(x=h(x+m["y"===v?"top":"left"],p(x,x-m["y"===v?"bottom":"right"])));let w=u.fn({...t,[b]:y,[v]:x});return{...w,data:{x:w.x-n,y:w.y-r}}}}},A=function(e){return void 0===e&&(e={}),{options:e,fn(t){let{x:n,y:r,placement:o,rects:l,middlewareData:s}=t,{offset:u=0,mainAxis:f=!0,crossAxis:d=!0}=c(e,t),p={x:n,y:r},h=a(o),g=O(h),m=p[h],b=p[g],v=c(u,t),y="number"==typeof v?{mainAxis:v,crossAxis:0}:{mainAxis:0,crossAxis:0,...v};if(f){let e="y"===h?"height":"width",t=l.reference[h]-l.floating[e]+y.mainAxis,n=l.reference[h]+l.reference[e]-y.mainAxis;mn&&(m=n)}if(d){var x,w;let e="y"===h?"width":"height",t=["top","left"].includes(i(o)),n=l.reference[g]-l.floating[e]+(t&&(null==(x=s.offset)?void 0:x[g])||0)+(t?0:y.crossAxis),r=l.reference[g]+l.reference[e]+(t?0:(null==(w=s.offset)?void 0:w[g])||0)-(t?y.crossAxis:0);br&&(b=r)}return{[h]:m,[g]:b}}}},N=function(e){return void 0===e&&(e={}),{name:"size",options:e,async fn(t){let n,o;let{placement:l,rects:s,platform:u,elements:f}=t,{apply:g=()=>{},...m}=c(e,t),b=await d(t,m),v=i(l),y=r(l),x="x"===a(l),{width:w,height:E}=s.floating;"top"===v||"bottom"===v?(n=v,o=y===(await (null==u.isRTL?void 0:u.isRTL(f.floating))?"start":"end")?"left":"right"):(o=v,n="end"===y?"top":"bottom");let S=E-b[n],k=w-b[o],_=!t.middlewareData.shift,O=S,C=k;if(x){let e=w-b.left-b.right;C=y||_?p(k,e):e}else{let e=E-b.top-b.bottom;O=y||_?p(S,e):e}if(_&&!y){let e=h(b.left,0),t=h(b.right,0),n=h(b.top,0),r=h(b.bottom,0);x?C=w-2*(0!==e||0!==t?e+t:h(b.left,b.right)):O=E-2*(0!==n||0!==r?n+r:h(b.top,b.bottom))}await g({...t,availableWidth:C,availableHeight:O});let A=await u.getDimensions(f.floating);return w!==A.width||E!==A.height?{reset:{rects:!0}}:{}}}}},41778:function(e,t,n){"use strict";n.d(t,{Kx:function(){return R},Me:function(){return L},oo:function(){return I}});var r=n(21828);function o(e){var t;return(null==e||null==(t=e.ownerDocument)?void 0:t.defaultView)||window}function i(e){return o(e).getComputedStyle(e)}function a(e){return e instanceof o(e).Node}function l(e){return a(e)?(e.nodeName||"").toLowerCase():"#document"}function s(e){return e instanceof HTMLElement||e instanceof o(e).HTMLElement}function c(e){return"undefined"!=typeof ShadowRoot&&(e instanceof o(e).ShadowRoot||e instanceof ShadowRoot)}function u(e){let{overflow:t,overflowX:n,overflowY:r,display:o}=i(e);return/auto|scroll|overlay|hidden|clip/.test(t+r+n)&&!["inline","contents"].includes(o)}function f(e){let t=d(),n=i(e);return"none"!==n.transform||"none"!==n.perspective||!!n.containerType&&"normal"!==n.containerType||!t&&!!n.backdropFilter&&"none"!==n.backdropFilter||!t&&!!n.filter&&"none"!==n.filter||["transform","perspective","filter"].some(e=>(n.willChange||"").includes(e))||["paint","layout","strict","content"].some(e=>(n.contain||"").includes(e))}function d(){return!("undefined"==typeof CSS||!CSS.supports)&&CSS.supports("-webkit-backdrop-filter","none")}function p(e){return["html","body","#document"].includes(l(e))}let h=Math.min,g=Math.max,m=Math.round,b=Math.floor,v=e=>({x:e,y:e});function y(e){let t=i(e),n=parseFloat(t.width)||0,r=parseFloat(t.height)||0,o=s(e),a=o?e.offsetWidth:n,l=o?e.offsetHeight:r,c=m(n)!==a||m(r)!==l;return c&&(n=a,r=l),{width:n,height:r,$:c}}function x(e){return e instanceof Element||e instanceof o(e).Element}function w(e){return x(e)?e:e.contextElement}function E(e){let t=w(e);if(!s(t))return v(1);let n=t.getBoundingClientRect(),{width:r,height:o,$:i}=y(t),a=(i?m(n.width):n.width)/r,l=(i?m(n.height):n.height)/o;return a&&Number.isFinite(a)||(a=1),l&&Number.isFinite(l)||(l=1),{x:a,y:l}}let S=v(0);function k(e){let t=o(e);return d()&&t.visualViewport?{x:t.visualViewport.offsetLeft,y:t.visualViewport.offsetTop}:S}function _(e,t,n,i){var a;void 0===t&&(t=!1),void 0===n&&(n=!1);let l=e.getBoundingClientRect(),s=w(e),c=v(1);t&&(i?x(i)&&(c=E(i)):c=E(e));let u=(void 0===(a=n)&&(a=!1),!(!i||a&&i!==o(s))&&a)?k(s):v(0),f=(l.left+u.x)/c.x,d=(l.top+u.y)/c.y,p=l.width/c.x,h=l.height/c.y;if(s){let e=o(s),t=i&&x(i)?o(i):i,n=e.frameElement;for(;n&&i&&t!==e;){let e=E(n),t=n.getBoundingClientRect(),r=getComputedStyle(n),i=t.left+(n.clientLeft+parseFloat(r.paddingLeft))*e.x,a=t.top+(n.clientTop+parseFloat(r.paddingTop))*e.y;f*=e.x,d*=e.y,p*=e.x,h*=e.y,f+=i,d+=a,n=o(n).frameElement}}return(0,r.JB)({width:p,height:h,x:f,y:d})}function O(e){return x(e)?{scrollLeft:e.scrollLeft,scrollTop:e.scrollTop}:{scrollLeft:e.pageXOffset,scrollTop:e.pageYOffset}}function C(e){var t;return null==(t=(a(e)?e.ownerDocument:e.document)||window.document)?void 0:t.documentElement}function A(e){return _(C(e)).left+O(e).scrollLeft}function N(e){if("html"===l(e))return e;let t=e.assignedSlot||e.parentNode||c(e)&&e.host||C(e);return c(t)?t.host:t}function R(e,t){var n;void 0===t&&(t=[]);let r=function e(t){let n=N(t);return p(n)?t.ownerDocument?t.ownerDocument.body:t.body:s(n)&&u(n)?n:e(n)}(e),i=r===(null==(n=e.ownerDocument)?void 0:n.body),a=o(r);return i?t.concat(a,a.visualViewport||[],u(r)?r:[]):t.concat(r,R(r))}function T(e,t,n){let a;if("viewport"===t)a=function(e,t){let n=o(e),r=C(e),i=n.visualViewport,a=r.clientWidth,l=r.clientHeight,s=0,c=0;if(i){a=i.width,l=i.height;let e=d();(!e||e&&"fixed"===t)&&(s=i.offsetLeft,c=i.offsetTop)}return{width:a,height:l,x:s,y:c}}(e,n);else if("document"===t)a=function(e){let t=C(e),n=O(e),r=e.ownerDocument.body,o=g(t.scrollWidth,t.clientWidth,r.scrollWidth,r.clientWidth),a=g(t.scrollHeight,t.clientHeight,r.scrollHeight,r.clientHeight),l=-n.scrollLeft+A(e),s=-n.scrollTop;return"rtl"===i(r).direction&&(l+=g(t.clientWidth,r.clientWidth)-o),{width:o,height:a,x:l,y:s}}(C(e));else if(x(t))a=function(e,t){let n=_(e,!0,"fixed"===t),r=n.top+e.clientTop,o=n.left+e.clientLeft,i=s(e)?E(e):v(1);return{width:e.clientWidth*i.x,height:e.clientHeight*i.y,x:o*i.x,y:r*i.y}}(t,n);else{let n=k(e);a={...t,x:t.x-n.x,y:t.y-n.y}}return(0,r.JB)(a)}function P(e,t){return s(e)&&"fixed"!==i(e).position?t?t(e):e.offsetParent:null}function M(e,t){let n=o(e);if(!s(e))return n;let r=P(e,t);for(;r&&["table","td","th"].includes(l(r))&&"static"===i(r).position;)r=P(r,t);return r&&("html"===l(r)||"body"===l(r)&&"static"===i(r).position&&!f(r))?n:r||function(e){let t=N(e);for(;s(t)&&!p(t);){if(f(t))return t;t=N(t)}return null}(e)||n}let j={convertOffsetParentRelativeRectToViewportRelativeRect:function(e){let{rect:t,offsetParent:n,strategy:r}=e,o=s(n),i=C(n);if(n===i)return t;let a={scrollLeft:0,scrollTop:0},c=v(1),f=v(0);if((o||!o&&"fixed"!==r)&&(("body"!==l(n)||u(i))&&(a=O(n)),s(n))){let e=_(n);c=E(n),f.x=e.x+n.clientLeft,f.y=e.y+n.clientTop}return{width:t.width*c.x,height:t.height*c.y,x:t.x*c.x-a.scrollLeft*c.x+f.x,y:t.y*c.y-a.scrollTop*c.y+f.y}},getDocumentElement:C,getClippingRect:function(e){let{element:t,boundary:n,rootBoundary:r,strategy:o}=e,a=[..."clippingAncestors"===n?function(e,t){let n=t.get(e);if(n)return n;let r=R(e).filter(e=>x(e)&&"body"!==l(e)),o=null,a="fixed"===i(e).position,s=a?N(e):e;for(;x(s)&&!p(s);){let t=i(s),n=f(s);n||"fixed"!==t.position||(o=null),(a?!n&&!o:!n&&"static"===t.position&&o&&["absolute","fixed"].includes(o.position)||u(s)&&!n&&function e(t,n){let r=N(t);return!(r===n||!x(r)||p(r))&&("fixed"===i(r).position||e(r,n))}(e,s))?r=r.filter(e=>e!==s):o=t,s=N(s)}return t.set(e,r),r}(t,this._c):[].concat(n),r],s=a[0],c=a.reduce((e,n)=>{let r=T(t,n,o);return e.top=g(r.top,e.top),e.right=h(r.right,e.right),e.bottom=h(r.bottom,e.bottom),e.left=g(r.left,e.left),e},T(t,s,o));return{width:c.right-c.left,height:c.bottom-c.top,x:c.left,y:c.top}},getOffsetParent:M,getElementRects:async function(e){let{reference:t,floating:n,strategy:r}=e,o=this.getOffsetParent||M,i=this.getDimensions;return{reference:function(e,t,n){let r=s(t),o=C(t),i="fixed"===n,a=_(e,!0,i,t),c={scrollLeft:0,scrollTop:0},f=v(0);if(r||!r&&!i){if(("body"!==l(t)||u(o))&&(c=O(t)),s(t)){let e=_(t,!0,i,t);f.x=e.x+t.clientLeft,f.y=e.y+t.clientTop}else o&&(f.x=A(o))}return{x:a.left+c.scrollLeft-f.x,y:a.top+c.scrollTop-f.y,width:a.width,height:a.height}}(t,await o(n),r),floating:{x:0,y:0,...await i(n)}}},getClientRects:function(e){return Array.from(e.getClientRects())},getDimensions:function(e){return y(e)},getScale:E,isElement:x,isRTL:function(e){return"rtl"===getComputedStyle(e).direction}};function L(e,t,n,r){void 0===r&&(r={});let{ancestorScroll:o=!0,ancestorResize:i=!0,elementResize:a="function"==typeof ResizeObserver,layoutShift:l="function"==typeof IntersectionObserver,animationFrame:s=!1}=r,c=w(e),u=o||i?[...c?R(c):[],...R(t)]:[];u.forEach(e=>{o&&e.addEventListener("scroll",n,{passive:!0}),i&&e.addEventListener("resize",n)});let f=c&&l?function(e,t){let n,r=null,o=C(e);function i(){clearTimeout(n),r&&r.disconnect(),r=null}return function a(l,s){void 0===l&&(l=!1),void 0===s&&(s=1),i();let{left:c,top:u,width:f,height:d}=e.getBoundingClientRect();if(l||t(),!f||!d)return;let p={rootMargin:-b(u)+"px "+-b(o.clientWidth-(c+f))+"px "+-b(o.clientHeight-(u+d))+"px "+-b(c)+"px",threshold:g(0,h(1,s))||1},m=!0;function v(e){let t=e[0].intersectionRatio;if(t!==s){if(!m)return a();t?a(!1,t):n=setTimeout(()=>{a(!1,1e-7)},100)}m=!1}try{r=new IntersectionObserver(v,{...p,root:o.ownerDocument})}catch(e){r=new IntersectionObserver(v,p)}r.observe(e)}(!0),i}(c,n):null,d,p=-1,m=null;a&&(m=new ResizeObserver(e=>{let[r]=e;r&&r.target===c&&m&&(m.unobserve(t),cancelAnimationFrame(p),p=requestAnimationFrame(()=>{m&&m.observe(t)})),n()}),c&&!s&&m.observe(c),m.observe(t));let v=s?_(e):null;return s&&function t(){let r=_(e);v&&(r.x!==v.x||r.y!==v.y||r.width!==v.width||r.height!==v.height)&&n(),v=r,d=requestAnimationFrame(t)}(),n(),()=>{u.forEach(e=>{o&&e.removeEventListener("scroll",n),i&&e.removeEventListener("resize",n)}),f&&f(),m&&m.disconnect(),m=null,s&&cancelAnimationFrame(d)}}let I=(e,t,n)=>{let o=new Map,i={platform:j,...n},a={...i.platform,_c:o};return(0,r.oo)(e,t,{...i,platform:a})}},4058:function(e,t,n){"use strict";n.d(t,{d:function(){return f},f:function(){return u}});var r=n(86006),o=n(53858),i=n(42810),a=n(60961),l=n(68496),s=n(3562);let c=(0,r.createContext)(null);function u(){let[e,t]=(0,r.useState)([]);return[e.length>0?e.join(" "):void 0,(0,r.useMemo)(()=>function(e){let n=(0,s.z)(e=>(t(t=>[...t,e]),()=>t(t=>{let n=t.slice(),r=n.indexOf(e);return -1!==r&&n.splice(r,1),n}))),o=(0,r.useMemo)(()=>({register:n,slot:e.slot,name:e.name,props:e.props}),[n,e.slot,e.name,e.props]);return r.createElement(c.Provider,{value:o},e.children)},[t])]}let f=Object.assign((0,i.yV)(function(e,t){let n=(0,o.M)(),{id:s=`headlessui-description-${n}`,...u}=e,f=function e(){let t=(0,r.useContext)(c);if(null===t){let t=Error("You used a component, but it is not inside a relevant parent.");throw Error.captureStackTrace&&Error.captureStackTrace(t,e),t}return t}(),d=(0,l.T)(t);(0,a.e)(()=>f.register(s),[s,f.register]);let p={ref:d,...f.props,id:s};return(0,i.sY)({ourProps:p,theirProps:u,slot:f.slot||{},defaultTag:"p",name:f.name||"Description"})}),{})},22940:function(e,t,n){"use strict";let r,o;n.d(t,{V:function(){return eb}});var i,a,l,s,c,u,f=n(86006),d=n.t(f,2),p=n(59325),h=n(42810),g=n(68496),m=n(68277),b=n(24373),v=n(53858),y=n(11405),x=n(45106),w=n(32243),E=n(3562),S=n(58257),k=((i=k||{})[i.Forwards=0]="Forwards",i[i.Backwards=1]="Backwards",i),_=n(58260),O=n(29101),C=n(1485);function A(e,t,n,r){let o=(0,C.E)(n);(0,f.useEffect)(()=>{function n(e){o.current(e)}return(e=null!=e?e:window).addEventListener(t,n,r),()=>e.removeEventListener(t,n,r)},[e,t,r])}var N=n(10670);function R(e,t){let n=(0,f.useRef)([]),r=(0,E.z)(e);(0,f.useEffect)(()=>{let e=[...n.current];for(let[o,i]of t.entries())if(n.current[o]!==i){let o=r(t,e);return n.current=t,o}},[r,...t])}var T=n(48807);function P(e){let t=(0,E.z)(e),n=(0,f.useRef)(!1);(0,f.useEffect)(()=>(n.current=!1,()=>{n.current=!0,(0,N.Y)(()=>{n.current&&t()})}),[t])}function M(e){if(!e)return new Set;if("function"==typeof e)return new Set(e());let t=new Set;for(let n of e.current)n.current instanceof HTMLElement&&t.add(n.current);return t}var j=((a=j||{})[a.None=1]="None",a[a.InitialFocus=2]="InitialFocus",a[a.TabLock=4]="TabLock",a[a.FocusLock=8]="FocusLock",a[a.RestoreFocus=16]="RestoreFocus",a[a.All=30]="All",a);let L=Object.assign((0,h.yV)(function(e,t){let n,r=(0,f.useRef)(null),o=(0,g.T)(r,t),{initialFocus:i,containers:a,features:l=30,...s}=e;(0,y.H)()||(l=1);let c=(0,O.i)(r);!function({ownerDocument:e},t){let n=function(e=!0){let t=(0,f.useRef)(I.slice());return R(([e],[n])=>{!0===n&&!1===e&&(0,N.Y)(()=>{t.current.splice(0)}),!1===n&&!0===e&&(t.current=I.slice())},[e,I,t]),(0,E.z)(()=>{var e;return null!=(e=t.current.find(e=>null!=e&&e.isConnected))?e:null})}(t);R(()=>{t||(null==e?void 0:e.activeElement)===(null==e?void 0:e.body)&&(0,w.C5)(n())},[t]),P(()=>{t&&(0,w.C5)(n())})}({ownerDocument:c},!!(16&l));let u=function({ownerDocument:e,container:t,initialFocus:n},r){let o=(0,f.useRef)(null),i=(0,_.t)();return R(()=>{if(!r)return;let a=t.current;a&&(0,N.Y)(()=>{if(!i.current)return;let t=null==e?void 0:e.activeElement;if(null!=n&&n.current){if((null==n?void 0:n.current)===t){o.current=t;return}}else if(a.contains(t)){o.current=t;return}null!=n&&n.current?(0,w.C5)(n.current):(0,w.jA)(a,w.TO.First)===w.fE.Error&&console.warn("There are no focusable elements inside the "),o.current=null==e?void 0:e.activeElement})},[r]),o}({ownerDocument:c,container:r,initialFocus:i},!!(2&l));!function({ownerDocument:e,container:t,containers:n,previousActiveElement:r},o){let i=(0,_.t)();A(null==e?void 0:e.defaultView,"focus",e=>{if(!o||!i.current)return;let a=M(n);t.current instanceof HTMLElement&&a.add(t.current);let l=r.current;if(!l)return;let s=e.target;s&&s instanceof HTMLElement?D(a,s)?(r.current=s,(0,w.C5)(s)):(e.preventDefault(),e.stopPropagation(),(0,w.C5)(l)):(0,w.C5)(r.current)},!0)}({ownerDocument:c,container:r,containers:a,previousActiveElement:u},!!(8&l));let d=(n=(0,f.useRef)(0),(0,S.s)("keydown",e=>{"Tab"===e.key&&(n.current=e.shiftKey?1:0)},!0),n),m=(0,E.z)(e=>{let t=r.current;t&&(0,p.E)(d.current,{[k.Forwards]:()=>{(0,w.jA)(t,w.TO.First,{skipElements:[e.relatedTarget]})},[k.Backwards]:()=>{(0,w.jA)(t,w.TO.Last,{skipElements:[e.relatedTarget]})}})}),b=(0,T.G)(),v=(0,f.useRef)(!1);return f.createElement(f.Fragment,null,!!(4&l)&&f.createElement(x._,{as:"button",type:"button","data-headlessui-focus-guard":!0,onFocus:m,features:x.A.Focusable}),(0,h.sY)({ourProps:{ref:o,onKeyDown(e){"Tab"==e.key&&(v.current=!0,b.requestAnimationFrame(()=>{v.current=!1}))},onBlur(e){let t=M(a);r.current instanceof HTMLElement&&t.add(r.current);let n=e.relatedTarget;n instanceof HTMLElement&&"true"!==n.dataset.headlessuiFocusGuard&&(D(t,n)||(v.current?(0,w.jA)(r.current,(0,p.E)(d.current,{[k.Forwards]:()=>w.TO.Next,[k.Backwards]:()=>w.TO.Previous})|w.TO.WrapAround,{relativeTo:e.target}):e.target instanceof HTMLElement&&(0,w.C5)(e.target)))}},theirProps:s,defaultTag:"div",name:"FocusTrap"}),!!(4&l)&&f.createElement(x._,{as:"button",type:"button","data-headlessui-focus-guard":!0,onFocus:m,features:x.A.Focusable}))}),{features:j}),I=[];function D(e,t){for(let n of e)if(n.contains(t))return!0;return!1}!function(e){function t(){"loading"!==document.readyState&&(e(),document.removeEventListener("DOMContentLoaded",t))}"undefined"!=typeof window&&"undefined"!=typeof document&&(document.addEventListener("DOMContentLoaded",t),t())}(()=>{function e(e){e.target instanceof HTMLElement&&e.target!==document.body&&I[0]!==e.target&&(I.unshift(e.target),(I=I.filter(e=>null!=e&&e.isConnected)).splice(10))}window.addEventListener("click",e,{capture:!0}),window.addEventListener("mousedown",e,{capture:!0}),window.addEventListener("focus",e,{capture:!0}),document.body.addEventListener("click",e,{capture:!0}),document.body.addEventListener("mousedown",e,{capture:!0}),document.body.addEventListener("focus",e,{capture:!0})});var F=n(8431),B=n(60961);let z=(0,f.createContext)(!1);function $(e){return f.createElement(z.Provider,{value:e.force},e.children)}var U=n(30028);let H=f.Fragment,Z=f.Fragment,q=(0,f.createContext)(null),W=(0,f.createContext)(null),V=Object.assign((0,h.yV)(function(e,t){let n=(0,f.useRef)(null),r=(0,g.T)((0,g.h)(e=>{n.current=e}),t),o=(0,O.i)(n),i=function(e){let t=(0,f.useContext)(z),n=(0,f.useContext)(q),r=(0,O.i)(e),[o,i]=(0,f.useState)(()=>{if(!t&&null!==n||U.O.isServer)return null;let e=null==r?void 0:r.getElementById("headlessui-portal-root");if(e)return e;if(null===r)return null;let o=r.createElement("div");return o.setAttribute("id","headlessui-portal-root"),r.body.appendChild(o)});return(0,f.useEffect)(()=>{null!==o&&(null!=r&&r.body.contains(o)||null==r||r.body.appendChild(o))},[o,r]),(0,f.useEffect)(()=>{t||null!==n&&i(n.current)},[n,i,t]),o}(n),[a]=(0,f.useState)(()=>{var e;return U.O.isServer?null:null!=(e=null==o?void 0:o.createElement("div"))?e:null}),l=(0,f.useContext)(W),s=(0,y.H)();return(0,B.e)(()=>{!i||!a||i.contains(a)||(a.setAttribute("data-headlessui-portal",""),i.appendChild(a))},[i,a]),(0,B.e)(()=>{if(a&&l)return l.register(a)},[l,a]),P(()=>{var e;i&&a&&(a instanceof Node&&i.contains(a)&&i.removeChild(a),i.childNodes.length<=0&&(null==(e=i.parentElement)||e.removeChild(i)))}),s&&i&&a?(0,F.createPortal)((0,h.sY)({ourProps:{ref:r},theirProps:e,defaultTag:H,name:"Portal"}),a):null}),{Group:(0,h.yV)(function(e,t){let{target:n,...r}=e,o={ref:(0,g.T)(t)};return f.createElement(q.Provider,{value:n},(0,h.sY)({ourProps:o,theirProps:r,defaultTag:Z,name:"Popover.Group"}))})});var G=n(4058),K=n(10546);let Y=(0,f.createContext)(()=>{});Y.displayName="StackContext";var X=((l=X||{})[l.Add=0]="Add",l[l.Remove=1]="Remove",l);function J({children:e,onUpdate:t,type:n,element:r,enabled:o}){let i=(0,f.useContext)(Y),a=(0,E.z)((...e)=>{null==t||t(...e),i(...e)});return(0,B.e)(()=>{let e=void 0===o||!0===o;return e&&a(0,n,r),()=>{e&&a(1,n,r)}},[a,n,r,o]),f.createElement(Y.Provider,{value:a},e)}var Q=n(45880);let{useState:ee,useEffect:et,useLayoutEffect:en,useDebugValue:er}=d;"undefined"!=typeof window&&void 0!==window.document&&window.document.createElement;let eo=d.useSyncExternalStore;var ei=n(70650);let ea=(s={PUSH(e,t){var n;let r=null!=(n=this.get(e))?n:{doc:e,count:0,d:(0,ei.k)(),meta:new Set};return r.count++,r.meta.add(t),this.set(e,r),this},POP(e,t){let n=this.get(e);return n&&(n.count--,n.meta.delete(t)),this},SCROLL_PREVENT({doc:e,d:t,meta:n}){let r,o;let i={doc:e,d:t,meta:function(e){let t={};for(let n of e)Object.assign(t,n(t));return t}(n)},a=[/iPhone/gi.test(window.navigator.platform)||/Mac/gi.test(window.navigator.platform)&&window.navigator.maxTouchPoints>0?{before(){r=window.pageYOffset},after({doc:e,d:t,meta:n}){function o(e){return n.containers.flatMap(e=>e()).some(t=>t.contains(e))}t.style(e.body,"marginTop",`-${r}px`),window.scrollTo(0,0);let i=null;t.addEventListener(e,"click",t=>{if(t.target instanceof HTMLElement)try{let n=t.target.closest("a");if(!n)return;let{hash:r}=new URL(n.href),a=e.querySelector(r);a&&!o(a)&&(i=a)}catch{}},!0),t.addEventListener(e,"touchmove",e=>{e.target instanceof HTMLElement&&!o(e.target)&&e.preventDefault()},{passive:!1}),t.add(()=>{window.scrollTo(0,window.pageYOffset+r),i&&i.isConnected&&(i.scrollIntoView({block:"nearest"}),i=null)})}}:{},{before({doc:e}){var t;let n=e.documentElement;o=(null!=(t=e.defaultView)?t:window).innerWidth-n.clientWidth},after({doc:e,d:t}){let n=e.documentElement,r=o-(n.clientWidth-n.offsetWidth);t.style(n,"paddingRight",`${r}px`)}},{before({doc:e,d:t}){t.style(e.documentElement,"overflow","hidden")}}];a.forEach(({before:e})=>null==e?void 0:e(i)),a.forEach(({after:e})=>null==e?void 0:e(i))},SCROLL_ALLOW({d:e}){e.dispose()},TEARDOWN({doc:e}){this.delete(e)}},r=new Map,o=new Set,{getSnapshot:()=>r,subscribe:e=>(o.add(e),()=>o.delete(e)),dispatch(e,...t){let n=s[e].call(r,...t);n&&(r=n,o.forEach(e=>e()))}});ea.subscribe(()=>{let e=ea.getSnapshot(),t=new Map;for(let[n]of e)t.set(n,n.documentElement.style.overflow);for(let n of e.values()){let e="hidden"===t.get(n.doc),r=0!==n.count;(r&&!e||!r&&e)&&ea.dispatch(n.count>0?"SCROLL_PREVENT":"SCROLL_ALLOW",n),0===n.count&&ea.dispatch("TEARDOWN",n)}});let el=new Map,es=new Map;function ec(e,t=!0){(0,B.e)(()=>{var n;if(!t)return;let r="function"==typeof e?e():e.current;if(!r)return;let o=null!=(n=es.get(r))?n:0;return es.set(r,o+1),0!==o||(el.set(r,{"aria-hidden":r.getAttribute("aria-hidden"),inert:r.inert}),r.setAttribute("aria-hidden","true"),r.inert=!0),function(){var e;if(!r)return;let t=null!=(e=es.get(r))?e:1;if(1===t?es.delete(r):es.set(r,t-1),1!==t)return;let n=el.get(r);n&&(null===n["aria-hidden"]?r.removeAttribute("aria-hidden"):r.setAttribute("aria-hidden",n["aria-hidden"]),r.inert=n.inert,el.delete(r))}},[e,t])}var eu=((c=eu||{})[c.Open=0]="Open",c[c.Closed=1]="Closed",c),ef=((u=ef||{})[u.SetTitleId=0]="SetTitleId",u);let ed={0:(e,t)=>e.titleId===t.id?e:{...e,titleId:t.id}},ep=(0,f.createContext)(null);function eh(e){let t=(0,f.useContext)(ep);if(null===t){let t=Error(`<${e} /> is missing a parent component.`);throw Error.captureStackTrace&&Error.captureStackTrace(t,eh),t}return t}function eg(e,t){return(0,p.E)(t.type,ed,e,t)}ep.displayName="DialogContext";let em=h.AN.RenderStrategy|h.AN.Static,eb=Object.assign((0,h.yV)(function(e,t){var n;let r,o,i,a,l;let s=(0,v.M)(),{id:c=`headlessui-dialog-${s}`,open:u,onClose:d,initialFocus:b,__demoMode:w=!1,...S}=e,[k,_]=(0,f.useState)(0),C=(0,K.oJ)();void 0===u&&null!==C&&(u=(C&K.ZM.Open)===K.ZM.Open);let N=(0,f.useRef)(null),R=(0,g.T)(N,t),T=(0,O.i)(N),P=e.hasOwnProperty("open")||null!==C,M=e.hasOwnProperty("onClose");if(!P&&!M)throw Error("You have to provide an `open` and an `onClose` prop to the `Dialog` component.");if(!P)throw Error("You provided an `onClose` prop to the `Dialog`, but forgot an `open` prop.");if(!M)throw Error("You provided an `open` prop to the `Dialog`, but forgot an `onClose` prop.");if("boolean"!=typeof u)throw Error(`You provided an \`open\` prop to the \`Dialog\`, but the value is not a boolean. Received: ${u}`);if("function"!=typeof d)throw Error(`You provided an \`onClose\` prop to the \`Dialog\`, but the value is not a function. Received: ${d}`);let j=u?0:1,[I,D]=(0,f.useReducer)(eg,{titleId:null,descriptionId:null,panelRef:(0,f.createRef)()}),F=(0,E.z)(()=>d(!1)),z=(0,E.z)(e=>D({type:0,id:e})),U=!!(0,y.H)()&&!w&&0===j,H=k>1,Z=null!==(0,f.useContext)(ep),[q,Y]=(r=(0,f.useContext)(W),o=(0,f.useRef)([]),i=(0,E.z)(e=>(o.current.push(e),r&&r.register(e),()=>a(e))),a=(0,E.z)(e=>{let t=o.current.indexOf(e);-1!==t&&o.current.splice(t,1),r&&r.unregister(e)}),l=(0,f.useMemo)(()=>({register:i,unregister:a,portals:o}),[i,a,o]),[o,(0,f.useMemo)(()=>function({children:e}){return f.createElement(W.Provider,{value:l},e)},[l])]),{resolveContainers:ee,mainTreeNodeRef:et,MainTreeNode:en}=function({defaultContainers:e=[],portals:t}={}){let n=(0,f.useRef)(null),r=(0,O.i)(n),o=(0,E.z)(()=>{var o;let i=[];for(let t of e)null!==t&&(t instanceof HTMLElement?i.push(t):"current"in t&&t.current instanceof HTMLElement&&i.push(t.current));if(null!=t&&t.current)for(let e of t.current)i.push(e);for(let e of null!=(o=null==r?void 0:r.querySelectorAll("html > *, body > *"))?o:[])e!==document.body&&e!==document.head&&e instanceof HTMLElement&&"headlessui-portal-root"!==e.id&&(e.contains(n.current)||i.some(t=>e.contains(t))||i.push(e));return i});return{resolveContainers:o,contains:(0,E.z)(e=>o().some(t=>t.contains(e))),mainTreeNodeRef:n,MainTreeNode:(0,f.useMemo)(()=>function(){return f.createElement(x._,{features:x.A.Hidden,ref:n})},[n])}}({portals:q,defaultContainers:[null!=(n=I.panelRef.current)?n:N.current]}),er=H?"parent":"leaf",ei=null!==C&&(C&K.ZM.Closing)===K.ZM.Closing,el=!Z&&!ei&&U;ec((0,f.useCallback)(()=>{var e,t;return null!=(t=Array.from(null!=(e=null==T?void 0:T.querySelectorAll("body > *"))?e:[]).find(e=>"headlessui-portal-root"!==e.id&&e.contains(et.current)&&e instanceof HTMLElement))?t:null},[et]),el);let es=!!H||U;ec((0,f.useCallback)(()=>{var e,t;return null!=(t=Array.from(null!=(e=null==T?void 0:T.querySelectorAll("[data-headlessui-portal]"))?e:[]).find(e=>e.contains(et.current)&&e instanceof HTMLElement))?t:null},[et]),es);let eu=!(!U||H);(0,Q.O)(ee,F,eu);let ef=!(H||0!==j);A(null==T?void 0:T.defaultView,"keydown",e=>{ef&&(e.defaultPrevented||e.key===m.R.Escape&&(e.preventDefault(),e.stopPropagation(),F()))}),function(e,t,n=()=>[document.body]){var r;let o,i;r=e=>{var t;return{containers:[...null!=(t=e.containers)?t:[],n]}},o=eo(ea.subscribe,ea.getSnapshot,ea.getSnapshot),(i=e?o.get(e):void 0)&&i.count,(0,B.e)(()=>{if(!(!e||!t))return ea.dispatch("PUSH",e,r),()=>ea.dispatch("POP",e,r)},[t,e])}(T,!(ei||0!==j||Z),ee),(0,f.useEffect)(()=>{if(0!==j||!N.current)return;let e=new ResizeObserver(e=>{for(let t of e){let e=t.target.getBoundingClientRect();0===e.x&&0===e.y&&0===e.width&&0===e.height&&F()}});return e.observe(N.current),()=>e.disconnect()},[j,N,F]);let[ed,eh]=(0,G.f)(),eb=(0,f.useMemo)(()=>[{dialogState:j,close:F,setTitleId:z},I],[j,I,F,z]),ev=(0,f.useMemo)(()=>({open:0===j}),[j]),ey={ref:R,id:c,role:"dialog","aria-modal":0===j||void 0,"aria-labelledby":I.titleId,"aria-describedby":ed};return f.createElement(J,{type:"Dialog",enabled:0===j,element:N,onUpdate:(0,E.z)((e,t)=>{"Dialog"===t&&(0,p.E)(e,{[X.Add]:()=>_(e=>e+1),[X.Remove]:()=>_(e=>e-1)})})},f.createElement($,{force:!0},f.createElement(V,null,f.createElement(ep.Provider,{value:eb},f.createElement(V.Group,{target:N},f.createElement($,{force:!1},f.createElement(eh,{slot:ev,name:"Dialog.Description"},f.createElement(L,{initialFocus:b,containers:ee,features:U?(0,p.E)(er,{parent:L.features.RestoreFocus,leaf:L.features.All&~L.features.FocusLock}):L.features.None},f.createElement(Y,null,(0,h.sY)({ourProps:ey,theirProps:S,slot:ev,defaultTag:"div",features:em,visible:0===j,name:"Dialog"}))))))))),f.createElement(en,null))}),{Backdrop:(0,h.yV)(function(e,t){let n=(0,v.M)(),{id:r=`headlessui-dialog-backdrop-${n}`,...o}=e,[{dialogState:i},a]=eh("Dialog.Backdrop"),l=(0,g.T)(t);(0,f.useEffect)(()=>{if(null===a.panelRef.current)throw Error("A component is being used, but a component is missing.")},[a.panelRef]);let s=(0,f.useMemo)(()=>({open:0===i}),[i]);return f.createElement($,{force:!0},f.createElement(V,null,(0,h.sY)({ourProps:{ref:l,id:r,"aria-hidden":!0},theirProps:o,slot:s,defaultTag:"div",name:"Dialog.Backdrop"})))}),Panel:(0,h.yV)(function(e,t){let n=(0,v.M)(),{id:r=`headlessui-dialog-panel-${n}`,...o}=e,[{dialogState:i},a]=eh("Dialog.Panel"),l=(0,g.T)(t,a.panelRef),s=(0,f.useMemo)(()=>({open:0===i}),[i]),c=(0,E.z)(e=>{e.stopPropagation()});return(0,h.sY)({ourProps:{ref:l,id:r,onClick:c},theirProps:o,slot:s,defaultTag:"div",name:"Dialog.Panel"})}),Overlay:(0,h.yV)(function(e,t){let n=(0,v.M)(),{id:r=`headlessui-dialog-overlay-${n}`,...o}=e,[{dialogState:i,close:a}]=eh("Dialog.Overlay"),l=(0,g.T)(t),s=(0,E.z)(e=>{if(e.target===e.currentTarget){if((0,b.P)(e.currentTarget))return e.preventDefault();e.preventDefault(),e.stopPropagation(),a()}}),c=(0,f.useMemo)(()=>({open:0===i}),[i]);return(0,h.sY)({ourProps:{ref:l,id:r,"aria-hidden":!0,onClick:s},theirProps:o,slot:c,defaultTag:"div",name:"Dialog.Overlay"})}),Title:(0,h.yV)(function(e,t){let n=(0,v.M)(),{id:r=`headlessui-dialog-title-${n}`,...o}=e,[{dialogState:i,setTitleId:a}]=eh("Dialog.Title"),l=(0,g.T)(t);(0,f.useEffect)(()=>(a(r),()=>a(null)),[r,a]);let s=(0,f.useMemo)(()=>({open:0===i}),[i]);return(0,h.sY)({ourProps:{ref:l,id:r},theirProps:o,slot:s,defaultTag:"h2",name:"Dialog.Title"})}),Description:G.d})},68277:function(e,t,n){"use strict";n.d(t,{R:function(){return o}});var r,o=((r=o||{}).Space=" ",r.Enter="Enter",r.Escape="Escape",r.Backspace="Backspace",r.Delete="Delete",r.ArrowLeft="ArrowLeft",r.ArrowUp="ArrowUp",r.ArrowRight="ArrowRight",r.ArrowDown="ArrowDown",r.Home="Home",r.End="End",r.PageUp="PageUp",r.PageDown="PageDown",r.Tab="Tab",r)},3420:function(e,t,n){"use strict";n.d(t,{R:function(){return Z}});var r,o,i,a,l=n(86006),s=n(48807),c=n(53858),u=n(60961),f=n(1485);function d(e,t){let[n,r]=(0,l.useState)(e),o=(0,f.E)(e);return(0,u.e)(()=>r(o.current),[o,r,...t]),n}var p=n(68496),h=n(42810),g=n(59325),m=n(70650),b=n(68277),v=n(55216),y=n(24373),x=n(32243),w=n(10546),E=n(51795),S=n(45880),k=n(45106),_=n(65969),O=n(53432),C=n(3562),A=n(92490),N=n(23017),R=n(49421),T=((r=T||{})[r.Open=0]="Open",r[r.Closed=1]="Closed",r),P=((o=P||{})[o.Single=0]="Single",o[o.Multi=1]="Multi",o),M=((i=M||{})[i.Pointer=0]="Pointer",i[i.Other=1]="Other",i),j=((a=j||{})[a.OpenListbox=0]="OpenListbox",a[a.CloseListbox=1]="CloseListbox",a[a.GoToOption=2]="GoToOption",a[a.Search=3]="Search",a[a.ClearSearch=4]="ClearSearch",a[a.RegisterOption=5]="RegisterOption",a[a.UnregisterOption=6]="UnregisterOption",a[a.RegisterLabel=7]="RegisterLabel",a);function L(e,t=e=>e){let n=null!==e.activeOptionIndex?e.options[e.activeOptionIndex]:null,r=(0,x.z2)(t(e.options.slice()),e=>e.dataRef.current.domRef.current),o=n?r.indexOf(n):null;return -1===o&&(o=null),{options:r,activeOptionIndex:o}}let I={1:e=>e.dataRef.current.disabled||1===e.listboxState?e:{...e,activeOptionIndex:null,listboxState:1},0(e){if(e.dataRef.current.disabled||0===e.listboxState)return e;let t=e.activeOptionIndex,{isSelected:n}=e.dataRef.current,r=e.options.findIndex(e=>n(e.dataRef.current.value));return -1!==r&&(t=r),{...e,listboxState:0,activeOptionIndex:t}},2(e,t){var n;if(e.dataRef.current.disabled||1===e.listboxState)return e;let r=L(e),o=(0,v.d)(t,{resolveItems:()=>r.options,resolveActiveIndex:()=>r.activeOptionIndex,resolveId:e=>e.id,resolveDisabled:e=>e.dataRef.current.disabled});return{...e,...r,searchQuery:"",activeOptionIndex:o,activationTrigger:null!=(n=t.trigger)?n:1}},3:(e,t)=>{if(e.dataRef.current.disabled||1===e.listboxState)return e;let n=""!==e.searchQuery?0:1,r=e.searchQuery+t.value.toLowerCase(),o=(null!==e.activeOptionIndex?e.options.slice(e.activeOptionIndex+n).concat(e.options.slice(0,e.activeOptionIndex+n)):e.options).find(e=>{var t;return!e.dataRef.current.disabled&&(null==(t=e.dataRef.current.textValue)?void 0:t.startsWith(r))}),i=o?e.options.indexOf(o):-1;return -1===i||i===e.activeOptionIndex?{...e,searchQuery:r}:{...e,searchQuery:r,activeOptionIndex:i,activationTrigger:1}},4:e=>e.dataRef.current.disabled||1===e.listboxState||""===e.searchQuery?e:{...e,searchQuery:""},5:(e,t)=>{let n={id:t.id,dataRef:t.dataRef},r=L(e,e=>[...e,n]);return null===e.activeOptionIndex&&e.dataRef.current.isSelected(t.dataRef.current.value)&&(r.activeOptionIndex=r.options.indexOf(n)),{...e,...r}},6:(e,t)=>{let n=L(e,e=>{let n=e.findIndex(e=>e.id===t.id);return -1!==n&&e.splice(n,1),e});return{...e,...n,activationTrigger:1}},7:(e,t)=>({...e,labelId:t.id})},D=(0,l.createContext)(null);function F(e){let t=(0,l.useContext)(D);if(null===t){let t=Error(`<${e} /> is missing a parent component.`);throw Error.captureStackTrace&&Error.captureStackTrace(t,F),t}return t}D.displayName="ListboxActionsContext";let B=(0,l.createContext)(null);function z(e){let t=(0,l.useContext)(B);if(null===t){let t=Error(`<${e} /> is missing a parent component.`);throw Error.captureStackTrace&&Error.captureStackTrace(t,z),t}return t}function $(e,t){return(0,g.E)(t.type,I,e,t)}B.displayName="ListboxDataContext";let U=l.Fragment,H=h.AN.RenderStrategy|h.AN.Static,Z=Object.assign((0,h.yV)(function(e,t){let{value:n,defaultValue:r,form:o,name:i,onChange:a,by:c=(e,t)=>e===t,disabled:f=!1,horizontal:d=!1,multiple:m=!1,...b}=e,y=d?"horizontal":"vertical",E=(0,p.T)(t),[O=m?[]:void 0,N]=(0,A.q)(n,a,r),[R,T]=(0,l.useReducer)($,{dataRef:(0,l.createRef)(),listboxState:1,options:[],searchQuery:"",labelId:null,activeOptionIndex:null,activationTrigger:1}),P=(0,l.useRef)({static:!1,hold:!1}),M=(0,l.useRef)(null),j=(0,l.useRef)(null),L=(0,l.useRef)(null),I=(0,C.z)("string"==typeof c?(e,t)=>(null==e?void 0:e[c])===(null==t?void 0:t[c]):c),F=(0,l.useCallback)(e=>(0,g.E)(z.mode,{1:()=>O.some(t=>I(t,e)),0:()=>I(O,e)}),[O]),z=(0,l.useMemo)(()=>({...R,value:O,disabled:f,mode:m?1:0,orientation:y,compare:I,isSelected:F,optionsPropsRef:P,labelRef:M,buttonRef:j,optionsRef:L}),[O,f,m,R]);(0,u.e)(()=>{R.dataRef.current=z},[z]),(0,S.O)([z.buttonRef,z.optionsRef],(e,t)=>{var n;T({type:1}),(0,x.sP)(t,x.tJ.Loose)||(e.preventDefault(),null==(n=z.buttonRef.current)||n.focus())},0===z.listboxState);let H=(0,l.useMemo)(()=>({open:0===z.listboxState,disabled:f,value:O}),[z,f,O]),Z=(0,C.z)(e=>{let t=z.options.find(t=>t.id===e);t&&X(t.dataRef.current.value)}),q=(0,C.z)(()=>{if(null!==z.activeOptionIndex){let{dataRef:e,id:t}=z.options[z.activeOptionIndex];X(e.current.value),T({type:2,focus:v.T.Specific,id:t})}}),W=(0,C.z)(()=>T({type:0})),V=(0,C.z)(()=>T({type:1})),G=(0,C.z)((e,t,n)=>e===v.T.Specific?T({type:2,focus:v.T.Specific,id:t,trigger:n}):T({type:2,focus:e,trigger:n})),K=(0,C.z)((e,t)=>(T({type:5,id:e,dataRef:t}),()=>T({type:6,id:e}))),Y=(0,C.z)(e=>(T({type:7,id:e}),()=>T({type:7,id:null}))),X=(0,C.z)(e=>(0,g.E)(z.mode,{0:()=>null==N?void 0:N(e),1(){let t=z.value.slice(),n=t.findIndex(t=>I(t,e));return -1===n?t.push(e):t.splice(n,1),null==N?void 0:N(t)}})),J=(0,C.z)(e=>T({type:3,value:e})),Q=(0,C.z)(()=>T({type:4})),ee=(0,l.useMemo)(()=>({onChange:X,registerOption:K,registerLabel:Y,goToOption:G,closeListbox:V,openListbox:W,selectActiveOption:q,selectOption:Z,search:J,clearSearch:Q}),[]),et=(0,l.useRef)(null),en=(0,s.G)();return(0,l.useEffect)(()=>{et.current&&void 0!==r&&en.addEventListener(et.current,"reset",()=>{X(r)})},[et,X]),l.createElement(D.Provider,{value:ee},l.createElement(B.Provider,{value:z},l.createElement(w.up,{value:(0,g.E)(z.listboxState,{0:w.ZM.Open,1:w.ZM.Closed})},null!=i&&null!=O&&(0,_.t)({[i]:O}).map(([e,t],n)=>l.createElement(k._,{features:k.A.Hidden,ref:0===n?e=>{var t;et.current=null!=(t=null==e?void 0:e.closest("form"))?t:null}:void 0,...(0,h.oA)({key:e,as:"input",type:"hidden",hidden:!0,readOnly:!0,form:o,name:e,value:t})})),(0,h.sY)({ourProps:{ref:E},theirProps:b,slot:H,defaultTag:U,name:"Listbox"}))))}),{Button:(0,h.yV)(function(e,t){var n;let r=(0,c.M)(),{id:o=`headlessui-listbox-button-${r}`,...i}=e,a=z("Listbox.Button"),u=F("Listbox.Button"),f=(0,p.T)(a.buttonRef,t),g=(0,s.G)(),m=(0,C.z)(e=>{switch(e.key){case b.R.Space:case b.R.Enter:case b.R.ArrowDown:e.preventDefault(),u.openListbox(),g.nextFrame(()=>{a.value||u.goToOption(v.T.First)});break;case b.R.ArrowUp:e.preventDefault(),u.openListbox(),g.nextFrame(()=>{a.value||u.goToOption(v.T.Last)})}}),x=(0,C.z)(e=>{e.key===b.R.Space&&e.preventDefault()}),w=(0,C.z)(e=>{if((0,y.P)(e.currentTarget))return e.preventDefault();0===a.listboxState?(u.closeListbox(),g.nextFrame(()=>{var e;return null==(e=a.buttonRef.current)?void 0:e.focus({preventScroll:!0})})):(e.preventDefault(),u.openListbox())}),S=d(()=>{if(a.labelId)return[a.labelId,o].join(" ")},[a.labelId,o]),k=(0,l.useMemo)(()=>({open:0===a.listboxState,disabled:a.disabled,value:a.value}),[a]),_={ref:f,id:o,type:(0,E.f)(e,a.buttonRef),"aria-haspopup":"listbox","aria-controls":null==(n=a.optionsRef.current)?void 0:n.id,"aria-expanded":a.disabled?void 0:0===a.listboxState,"aria-labelledby":S,disabled:a.disabled,onKeyDown:m,onKeyUp:x,onClick:w};return(0,h.sY)({ourProps:_,theirProps:i,slot:k,defaultTag:"button",name:"Listbox.Button"})}),Label:(0,h.yV)(function(e,t){let n=(0,c.M)(),{id:r=`headlessui-listbox-label-${n}`,...o}=e,i=z("Listbox.Label"),a=F("Listbox.Label"),s=(0,p.T)(i.labelRef,t);(0,u.e)(()=>a.registerLabel(r),[r]);let f=(0,C.z)(()=>{var e;return null==(e=i.buttonRef.current)?void 0:e.focus({preventScroll:!0})}),d=(0,l.useMemo)(()=>({open:0===i.listboxState,disabled:i.disabled}),[i]);return(0,h.sY)({ourProps:{ref:s,id:r,onClick:f},theirProps:o,slot:d,defaultTag:"label",name:"Listbox.Label"})}),Options:(0,h.yV)(function(e,t){var n;let r=(0,c.M)(),{id:o=`headlessui-listbox-options-${r}`,...i}=e,a=z("Listbox.Options"),u=F("Listbox.Options"),f=(0,p.T)(a.optionsRef,t),y=(0,s.G)(),x=(0,s.G)(),E=(0,w.oJ)(),S=null!==E?(E&w.ZM.Open)===w.ZM.Open:0===a.listboxState;(0,l.useEffect)(()=>{var e;let t=a.optionsRef.current;t&&0===a.listboxState&&t!==(null==(e=(0,O.r)(t))?void 0:e.activeElement)&&t.focus({preventScroll:!0})},[a.listboxState,a.optionsRef]);let k=(0,C.z)(e=>{switch(x.dispose(),e.key){case b.R.Space:if(""!==a.searchQuery)return e.preventDefault(),e.stopPropagation(),u.search(e.key);case b.R.Enter:if(e.preventDefault(),e.stopPropagation(),null!==a.activeOptionIndex){let{dataRef:e}=a.options[a.activeOptionIndex];u.onChange(e.current.value)}0===a.mode&&(u.closeListbox(),(0,m.k)().nextFrame(()=>{var e;return null==(e=a.buttonRef.current)?void 0:e.focus({preventScroll:!0})}));break;case(0,g.E)(a.orientation,{vertical:b.R.ArrowDown,horizontal:b.R.ArrowRight}):return e.preventDefault(),e.stopPropagation(),u.goToOption(v.T.Next);case(0,g.E)(a.orientation,{vertical:b.R.ArrowUp,horizontal:b.R.ArrowLeft}):return e.preventDefault(),e.stopPropagation(),u.goToOption(v.T.Previous);case b.R.Home:case b.R.PageUp:return e.preventDefault(),e.stopPropagation(),u.goToOption(v.T.First);case b.R.End:case b.R.PageDown:return e.preventDefault(),e.stopPropagation(),u.goToOption(v.T.Last);case b.R.Escape:return e.preventDefault(),e.stopPropagation(),u.closeListbox(),y.nextFrame(()=>{var e;return null==(e=a.buttonRef.current)?void 0:e.focus({preventScroll:!0})});case b.R.Tab:e.preventDefault(),e.stopPropagation();break;default:1===e.key.length&&(u.search(e.key),x.setTimeout(()=>u.clearSearch(),350))}}),_=d(()=>{var e,t,n;return null!=(n=null==(e=a.labelRef.current)?void 0:e.id)?n:null==(t=a.buttonRef.current)?void 0:t.id},[a.labelRef.current,a.buttonRef.current]),A=(0,l.useMemo)(()=>({open:0===a.listboxState}),[a]),N={"aria-activedescendant":null===a.activeOptionIndex||null==(n=a.options[a.activeOptionIndex])?void 0:n.id,"aria-multiselectable":1===a.mode||void 0,"aria-labelledby":_,"aria-orientation":a.orientation,id:o,onKeyDown:k,role:"listbox",tabIndex:0,ref:f};return(0,h.sY)({ourProps:N,theirProps:i,slot:A,defaultTag:"ul",features:H,visible:S,name:"Listbox.Options"})}),Option:(0,h.yV)(function(e,t){let n=(0,c.M)(),{id:r=`headlessui-listbox-option-${n}`,disabled:o=!1,value:i,...a}=e,s=z("Listbox.Option"),d=F("Listbox.Option"),g=null!==s.activeOptionIndex&&s.options[s.activeOptionIndex].id===r,b=s.isSelected(i),y=(0,l.useRef)(null),x=(0,R.x)(y),w=(0,f.E)({disabled:o,value:i,domRef:y,get textValue(){return x()}}),E=(0,p.T)(t,y);(0,u.e)(()=>{if(0!==s.listboxState||!g||0===s.activationTrigger)return;let e=(0,m.k)();return e.requestAnimationFrame(()=>{var e,t;null==(t=null==(e=y.current)?void 0:e.scrollIntoView)||t.call(e,{block:"nearest"})}),e.dispose},[y,g,s.listboxState,s.activationTrigger,s.activeOptionIndex]),(0,u.e)(()=>d.registerOption(r,w),[w,r]);let S=(0,C.z)(e=>{if(o)return e.preventDefault();d.onChange(i),0===s.mode&&(d.closeListbox(),(0,m.k)().nextFrame(()=>{var e;return null==(e=s.buttonRef.current)?void 0:e.focus({preventScroll:!0})}))}),k=(0,C.z)(()=>{if(o)return d.goToOption(v.T.Nothing);d.goToOption(v.T.Specific,r)}),_=(0,N.g)(),O=(0,C.z)(e=>_.update(e)),A=(0,C.z)(e=>{_.wasMoved(e)&&(o||g||d.goToOption(v.T.Specific,r,0))}),T=(0,C.z)(e=>{_.wasMoved(e)&&(o||g&&d.goToOption(v.T.Nothing))}),P=(0,l.useMemo)(()=>({active:g,selected:b,disabled:o}),[g,b,o]);return(0,h.sY)({ourProps:{id:r,ref:E,role:"option",tabIndex:!0===o?void 0:-1,"aria-disabled":!0===o||void 0,"aria-selected":b,disabled:void 0,onClick:S,onFocus:k,onPointerEnter:O,onMouseEnter:O,onPointerMove:A,onMouseMove:A,onPointerLeave:T,onMouseLeave:T},theirProps:a,slot:P,defaultTag:"li",name:"Listbox.Option"})})})},40102:function(e,t,n){"use strict";n.d(t,{v:function(){return D}});var r,o,i,a=n(86006),l=n(59325),s=n(42810),c=n(70650),u=n(48807),f=n(60961),d=n(68496),p=n(53858),h=n(68277),g=n(55216),m=n(24373),b=n(32243),v=n(45880),y=n(53432),x=n(10546),w=n(51795),E=n(29101),S=n(3562),k=n(23017),_=n(49421),O=((r=O||{})[r.Open=0]="Open",r[r.Closed=1]="Closed",r),C=((o=C||{})[o.Pointer=0]="Pointer",o[o.Other=1]="Other",o),A=((i=A||{})[i.OpenMenu=0]="OpenMenu",i[i.CloseMenu=1]="CloseMenu",i[i.GoToItem=2]="GoToItem",i[i.Search=3]="Search",i[i.ClearSearch=4]="ClearSearch",i[i.RegisterItem=5]="RegisterItem",i[i.UnregisterItem=6]="UnregisterItem",i);function N(e,t=e=>e){let n=null!==e.activeItemIndex?e.items[e.activeItemIndex]:null,r=(0,b.z2)(t(e.items.slice()),e=>e.dataRef.current.domRef.current),o=n?r.indexOf(n):null;return -1===o&&(o=null),{items:r,activeItemIndex:o}}let R={1:e=>1===e.menuState?e:{...e,activeItemIndex:null,menuState:1},0:e=>0===e.menuState?e:{...e,__demoMode:!1,menuState:0},2:(e,t)=>{var n;let r=N(e),o=(0,g.d)(t,{resolveItems:()=>r.items,resolveActiveIndex:()=>r.activeItemIndex,resolveId:e=>e.id,resolveDisabled:e=>e.dataRef.current.disabled});return{...e,...r,searchQuery:"",activeItemIndex:o,activationTrigger:null!=(n=t.trigger)?n:1}},3:(e,t)=>{let n=""!==e.searchQuery?0:1,r=e.searchQuery+t.value.toLowerCase(),o=(null!==e.activeItemIndex?e.items.slice(e.activeItemIndex+n).concat(e.items.slice(0,e.activeItemIndex+n)):e.items).find(e=>{var t;return(null==(t=e.dataRef.current.textValue)?void 0:t.startsWith(r))&&!e.dataRef.current.disabled}),i=o?e.items.indexOf(o):-1;return -1===i||i===e.activeItemIndex?{...e,searchQuery:r}:{...e,searchQuery:r,activeItemIndex:i,activationTrigger:1}},4:e=>""===e.searchQuery?e:{...e,searchQuery:"",searchActiveItemIndex:null},5:(e,t)=>{let n=N(e,e=>[...e,{id:t.id,dataRef:t.dataRef}]);return{...e,...n}},6:(e,t)=>{let n=N(e,e=>{let n=e.findIndex(e=>e.id===t.id);return -1!==n&&e.splice(n,1),e});return{...e,...n,activationTrigger:1}}},T=(0,a.createContext)(null);function P(e){let t=(0,a.useContext)(T);if(null===t){let t=Error(`<${e} /> is missing a parent component.`);throw Error.captureStackTrace&&Error.captureStackTrace(t,P),t}return t}function M(e,t){return(0,l.E)(t.type,R,e,t)}T.displayName="MenuContext";let j=a.Fragment,L=s.AN.RenderStrategy|s.AN.Static,I=a.Fragment,D=Object.assign((0,s.yV)(function(e,t){let{__demoMode:n=!1,...r}=e,o=(0,a.useReducer)(M,{__demoMode:n,menuState:n?0:1,buttonRef:(0,a.createRef)(),itemsRef:(0,a.createRef)(),items:[],searchQuery:"",activeItemIndex:null,activationTrigger:1}),[{menuState:i,itemsRef:c,buttonRef:u},f]=o,p=(0,d.T)(t);(0,v.O)([u,c],(e,t)=>{var n;f({type:1}),(0,b.sP)(t,b.tJ.Loose)||(e.preventDefault(),null==(n=u.current)||n.focus())},0===i);let h=(0,S.z)(()=>{f({type:1})}),g=(0,a.useMemo)(()=>({open:0===i,close:h}),[i,h]);return a.createElement(T.Provider,{value:o},a.createElement(x.up,{value:(0,l.E)(i,{0:x.ZM.Open,1:x.ZM.Closed})},(0,s.sY)({ourProps:{ref:p},theirProps:r,slot:g,defaultTag:j,name:"Menu"})))}),{Button:(0,s.yV)(function(e,t){var n;let r=(0,p.M)(),{id:o=`headlessui-menu-button-${r}`,...i}=e,[l,c]=P("Menu.Button"),f=(0,d.T)(l.buttonRef,t),b=(0,u.G)(),v=(0,S.z)(e=>{switch(e.key){case h.R.Space:case h.R.Enter:case h.R.ArrowDown:e.preventDefault(),e.stopPropagation(),c({type:0}),b.nextFrame(()=>c({type:2,focus:g.T.First}));break;case h.R.ArrowUp:e.preventDefault(),e.stopPropagation(),c({type:0}),b.nextFrame(()=>c({type:2,focus:g.T.Last}))}}),y=(0,S.z)(e=>{e.key===h.R.Space&&e.preventDefault()}),x=(0,S.z)(t=>{if((0,m.P)(t.currentTarget))return t.preventDefault();e.disabled||(0===l.menuState?(c({type:1}),b.nextFrame(()=>{var e;return null==(e=l.buttonRef.current)?void 0:e.focus({preventScroll:!0})})):(t.preventDefault(),c({type:0})))}),E=(0,a.useMemo)(()=>({open:0===l.menuState}),[l]),k={ref:f,id:o,type:(0,w.f)(e,l.buttonRef),"aria-haspopup":"menu","aria-controls":null==(n=l.itemsRef.current)?void 0:n.id,"aria-expanded":e.disabled?void 0:0===l.menuState,onKeyDown:v,onKeyUp:y,onClick:x};return(0,s.sY)({ourProps:k,theirProps:i,slot:E,defaultTag:"button",name:"Menu.Button"})}),Items:(0,s.yV)(function(e,t){var n,r;let o=(0,p.M)(),{id:i=`headlessui-menu-items-${o}`,...l}=e,[m,v]=P("Menu.Items"),w=(0,d.T)(m.itemsRef,t),k=(0,E.i)(m.itemsRef),_=(0,u.G)(),O=(0,x.oJ)(),C=null!==O?(O&x.ZM.Open)===x.ZM.Open:0===m.menuState;(0,a.useEffect)(()=>{let e=m.itemsRef.current;e&&0===m.menuState&&e!==(null==k?void 0:k.activeElement)&&e.focus({preventScroll:!0})},[m.menuState,m.itemsRef,k]),function({container:e,accept:t,walk:n,enabled:r=!0}){let o=(0,a.useRef)(t),i=(0,a.useRef)(n);(0,a.useEffect)(()=>{o.current=t,i.current=n},[t,n]),(0,f.e)(()=>{if(!e||!r)return;let t=(0,y.r)(e);if(!t)return;let n=o.current,a=i.current,l=Object.assign(e=>n(e),{acceptNode:n}),s=t.createTreeWalker(e,NodeFilter.SHOW_ELEMENT,l,!1);for(;s.nextNode();)a(s.currentNode)},[e,r,o,i])}({container:m.itemsRef.current,enabled:0===m.menuState,accept:e=>"menuitem"===e.getAttribute("role")?NodeFilter.FILTER_REJECT:e.hasAttribute("role")?NodeFilter.FILTER_SKIP:NodeFilter.FILTER_ACCEPT,walk(e){e.setAttribute("role","none")}});let A=(0,S.z)(e=>{var t,n;switch(_.dispose(),e.key){case h.R.Space:if(""!==m.searchQuery)return e.preventDefault(),e.stopPropagation(),v({type:3,value:e.key});case h.R.Enter:if(e.preventDefault(),e.stopPropagation(),v({type:1}),null!==m.activeItemIndex){let{dataRef:e}=m.items[m.activeItemIndex];null==(n=null==(t=e.current)?void 0:t.domRef.current)||n.click()}(0,b.wI)(m.buttonRef.current);break;case h.R.ArrowDown:return e.preventDefault(),e.stopPropagation(),v({type:2,focus:g.T.Next});case h.R.ArrowUp:return e.preventDefault(),e.stopPropagation(),v({type:2,focus:g.T.Previous});case h.R.Home:case h.R.PageUp:return e.preventDefault(),e.stopPropagation(),v({type:2,focus:g.T.First});case h.R.End:case h.R.PageDown:return e.preventDefault(),e.stopPropagation(),v({type:2,focus:g.T.Last});case h.R.Escape:e.preventDefault(),e.stopPropagation(),v({type:1}),(0,c.k)().nextFrame(()=>{var e;return null==(e=m.buttonRef.current)?void 0:e.focus({preventScroll:!0})});break;case h.R.Tab:e.preventDefault(),e.stopPropagation(),v({type:1}),(0,c.k)().nextFrame(()=>{(0,b.EO)(m.buttonRef.current,e.shiftKey?b.TO.Previous:b.TO.Next)});break;default:1===e.key.length&&(v({type:3,value:e.key}),_.setTimeout(()=>v({type:4}),350))}}),N=(0,S.z)(e=>{e.key===h.R.Space&&e.preventDefault()}),R=(0,a.useMemo)(()=>({open:0===m.menuState}),[m]),T={"aria-activedescendant":null===m.activeItemIndex||null==(n=m.items[m.activeItemIndex])?void 0:n.id,"aria-labelledby":null==(r=m.buttonRef.current)?void 0:r.id,id:i,onKeyDown:A,onKeyUp:N,role:"menu",tabIndex:0,ref:w};return(0,s.sY)({ourProps:T,theirProps:l,slot:R,defaultTag:"div",features:L,visible:C,name:"Menu.Items"})}),Item:(0,s.yV)(function(e,t){let n=(0,p.M)(),{id:r=`headlessui-menu-item-${n}`,disabled:o=!1,...i}=e,[l,u]=P("Menu.Item"),h=null!==l.activeItemIndex&&l.items[l.activeItemIndex].id===r,m=(0,a.useRef)(null),v=(0,d.T)(t,m);(0,f.e)(()=>{if(l.__demoMode||0!==l.menuState||!h||0===l.activationTrigger)return;let e=(0,c.k)();return e.requestAnimationFrame(()=>{var e,t;null==(t=null==(e=m.current)?void 0:e.scrollIntoView)||t.call(e,{block:"nearest"})}),e.dispose},[l.__demoMode,m,h,l.menuState,l.activationTrigger,l.activeItemIndex]);let y=(0,_.x)(m),x=(0,a.useRef)({disabled:o,domRef:m,get textValue(){return y()}});(0,f.e)(()=>{x.current.disabled=o},[x,o]),(0,f.e)(()=>(u({type:5,id:r,dataRef:x}),()=>u({type:6,id:r})),[x,r]);let w=(0,S.z)(()=>{u({type:1})}),E=(0,S.z)(e=>{if(o)return e.preventDefault();u({type:1}),(0,b.wI)(l.buttonRef.current)}),O=(0,S.z)(()=>{if(o)return u({type:2,focus:g.T.Nothing});u({type:2,focus:g.T.Specific,id:r})}),C=(0,k.g)(),A=(0,S.z)(e=>C.update(e)),N=(0,S.z)(e=>{C.wasMoved(e)&&(o||h||u({type:2,focus:g.T.Specific,id:r,trigger:0}))}),R=(0,S.z)(e=>{C.wasMoved(e)&&(o||h&&u({type:2,focus:g.T.Nothing}))}),T=(0,a.useMemo)(()=>({active:h,disabled:o,close:w}),[h,o,w]);return(0,s.sY)({ourProps:{id:r,ref:v,role:"menuitem",tabIndex:!0===o?void 0:-1,"aria-disabled":!0===o||void 0,disabled:void 0,onClick:E,onFocus:O,onPointerEnter:A,onMouseEnter:A,onPointerMove:N,onMouseMove:N,onPointerLeave:R,onMouseLeave:R},theirProps:i,slot:T,defaultTag:I,name:"Menu.Item"})})})},34199:function(e,t,n){"use strict";n.d(t,{r:function(){return w}});var r=n(86006),o=n(42810),i=n(53858),a=n(68277),l=n(24373),s=n(60961),c=n(68496),u=n(3562);let f=(0,r.createContext)(null),d=Object.assign((0,o.yV)(function(e,t){let n=(0,i.M)(),{id:a=`headlessui-label-${n}`,passive:l=!1,...u}=e,d=function e(){let t=(0,r.useContext)(f);if(null===t){let t=Error("You used a

Download Filehttps://gohhs.com/2uz5Tu



- -mp3 and .wav audiotracks can be converted to any instrument, bank, or synth format with unprecedented accuracy. The instrument's manual is compatible with Windows, Mac, and Linux operating systems. - -[Palliative interventional treatment of gastrointestinal bleeding]. - -Gastrointestinal bleeding is a common problem in patients with malignant diseases. Due to the high morbidity and mortality related to gastrointestinal bleeding, patients and their families often demand palliative care. In this review, the focus is put on the palliation of gastrointestinal bleeding in patients with advanced malignancies. The possible role of endoscopic techniques in palliation of gastrointestinal bleeding is discussed. A combination of endoscopic procedures such as placement of self-expanding metal stents, endoscopic ultrasound and brachytherapy can be useful in relieving obstructions in the gastrointestinal tract. Other interventional procedures including local drug therapy and transarterial chemoembolization can be used to treat tumor and/or ectopic tissue. All these techniques have to be combined with a continuous effort to treat the underlying malignancies with curative intent./* -*- mode: C++ ; c-file-style: "stroustrup" -*- ***************************** - - * Qwt Widget Library - - * Copyright (C) 1997 Josef Wilgen - - * Copyright (C) 2002 Uwe Rathmann - - * - - * This library is free software; you can redistribute it and/or - - * modify it under the terms of the Qwt License, Version 1.0 - - *****************************************************************************/ - -#ifndef QWT_SINGLE_VALUE_RANGE_H - -#define QWT_SINGLE_VALUE_RANGE_H - -#include "qwt_global.h" - -#include "qwt_abstract_symbol.h" - -class QwtPlot; - -class QwtPlotCurve; - -class QwtScaleEngine; - -class QwtRasterData; - -class QwtPointArray; - -class QwtValueList; - -class QwtSingleSymbol; - -class QwtSingleSeries; - -/*! - - \brief A value range for a QwtPlotCurve - - The QwtSingleValueRange object stores the min and max value for - - the single value series. In case of a multiple value series the value - - range stores 4fefd39f24
-
-
-

diff --git a/spaces/inamXcontru/PoeticTTS/Crysis 2 Serial Key Crack BEST Free 11.md b/spaces/inamXcontru/PoeticTTS/Crysis 2 Serial Key Crack BEST Free 11.md deleted file mode 100644 index 8294692e4049bc625a9d08e9908ff243e9f0f903..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Crysis 2 Serial Key Crack BEST Free 11.md +++ /dev/null @@ -1,98 +0,0 @@ -
-

Crysis 2 Serial Key Crack Free 11: How to Download and Play the Game

- -

If you are a fan of first-person shooter games, you might have heard of Crysis 2, a sci-fi game that lets you fight against aliens in a devastated New York City. The game features stunning graphics, realistic physics, and a customizable nanosuit that gives you different abilities such as stealth, armor, and power.

-

crysis 2 serial key crack free 11


Downloadhttps://gohhs.com/2uz4uZ



- -

However, if you want to play the game on your PC, you might encounter some problems with the activation serial. The game requires a valid serial key to run, but some users have reported that their keys are already in use or invalid. This can be frustrating, especially if you have bought the game legally.

- -

Fortunately, there is a solution to this problem. You can download a crack file that bypasses the activation serial and lets you play the game without any hassle. A crack file is a modified version of the game executable that removes the need for a serial key. You can find many crack files online, but some of them might not work properly or contain viruses.

- -

That's why we have prepared this guide for you. We will show you how to download and use a reliable and safe crack file for Crysis 2 serial key crack free 11. Follow these steps carefully and you will be able to enjoy the game in no time.

- -

Step 1: Download the Crack File

- -

The first thing you need to do is to download the crack file from a trusted source. We recommend using this link: https://www.mediafire.com/file/924323... This is a verified and tested crack file that works for Crysis 2 serial key crack free 11. It is also free of viruses and malware.

-

- -

Once you click on the link, you will be redirected to a download page. Click on the green button that says "Download" and wait for the file to be downloaded to your computer. The file size is about 20 MB, so it should not take long.

- -

Step 2: Extract the Crack File

- -

After downloading the crack file, you need to extract it using a program like WinRAR or 7-Zip. You can download these programs for free from their official websites. To extract the file, right-click on it and select "Extract here" or "Extract to Crysis_2_Crack". You will get a folder named "Crysis_2_Crack" that contains two files: "Crysis2.exe" and "Crysis2Launcher.exe". These are the crack files that you need to use.

- -

Step 3: Copy and Paste the Crack Files

- -

Now that you have extracted the crack files, you need to copy and paste them into your game folder. To do this, locate your game folder on your computer. It should be something like "C:\Program Files (x86)\Electronic Arts\Crytek\Crysis 2". If you have installed the game in a different location, find it there.

- -

Once you have found your game folder, open it and look for two files named "Crysis2.exe" and "Crysis2Launcher.exe". These are the original game files that you need to replace with the crack files. To do this, right-click on each file and select "Rename". Add ".bak" at the end of each file name, so they become "Crysis2.exe.bak" and "Crysis2Launcher.exe.bak". This way, you can keep a backup of the original files in case something goes wrong.

- -

After renaming the original files, copy and paste the crack files from the "Crysis_2_Crack" folder into your game folder. You should now have two new files named "Crysis2.exe" and "Crysis2Launcher.exe" in your game folder.

- -

Step 4: Run the Game

- -

You are almost done. The last thing you need to do is to run the game using the crack files. To do this, double-click on "Crysis2Launcher.exe" in your game folder. This will launch the game without asking for a serial key. You can now enjoy playing Crysis 2 serial key crack free 11 on your PC.

- -

Note: If you want to play the game online or update it to the latest version, you might need to download another crack file that works for multiplayer and patches. You can find these files online as well, but make sure they are compatible with your game version and safe to use.

- -

Conclusion

- -

Crysis 2 is an amazing game that offers a thrilling and immersive experience. However, if you have problems with the activation serial, you might not be able to play it on your PC. That's why we have shown you how to download and use a crack file that bypasses the serial key requirement and lets you play Crysis 2 serial key crack free 11 without any hassle.

- -

We hope this guide was helpful and easy to follow. If you have any questions or comments, feel free to leave them below. We would love to hear from you.

-

How to Fix Common Problems with Crysis 2 Serial Key Crack Free 11

- -

While playing Crysis 2 serial key crack free 11, you might encounter some problems that can affect your gaming experience. These problems can range from minor glitches to major errors that prevent the game from running. Here are some of the common problems that users have reported and how to fix them:

- -
    -
  • The game crashes or freezes. This can be caused by various factors, such as incompatible drivers, corrupted files, insufficient system requirements, or malware. To fix this problem, you should update your drivers, verify your game files, lower your graphics settings, or scan your computer for viruses.
  • -
  • The game does not start or shows a black screen. This can be caused by a missing or invalid serial key, a faulty crack file, or a conflict with other programs. To fix this problem, you should make sure you have entered a valid serial key, download a new crack file from the link above, or disable any antivirus or firewall programs that might interfere with the game.
  • -
  • The game has poor performance or low FPS. This can be caused by high graphics settings, outdated hardware, or background processes. To fix this problem, you should lower your graphics settings, upgrade your hardware, or close any unnecessary programs that might consume your CPU or RAM.
  • -
- -

These are just some of the common problems that users have faced while playing Crysis 2 serial key crack free 11. If you have any other problems or questions, you can check out online forums or support pages that offer more solutions and tips.

- -

What Others Are Saying About Crysis 2 Serial Key Crack Free 11

- -

Crysis 2 serial key crack free 11 is a game that has received a lot of praise and criticism from users and critics alike. Some people love it for its graphics, gameplay, and story, while others hate it for its bugs, DRM, and lack of originality. Here are some of the reviews and comments that users have posted online about the game:

- -
-

"Crysis 2 is one of the best games I have ever played. The graphics are amazing, the gameplay is smooth and fun, and the story is engaging and immersive. I love how you can use the nanosuit to adapt to any situation and create your own tactics. The multiplayer mode is also very enjoyable and competitive. I highly recommend this game to anyone who likes sci-fi shooters."

-- A user from Steam -
- -
-

"Crysis 2 is a huge disappointment. The game is full of bugs and glitches that ruin the experience. The game also requires a serial key to run, which is annoying and unnecessary. The game is also very linear and boring compared to the first Crysis game. The nanosuit abilities are limited and unbalanced, and the enemies are dumb and repetitive. The multiplayer mode is also lame and laggy. I do not recommend this game to anyone who likes quality games."

-- A user from Metacritic -
- -

These are just some of the opinions that users have expressed about Crysis 2 serial key crack free 11. As you can see, the game has its pros and cons, and it is up to you to decide whether you like it or not.

-

Where to Download Crysis 2 Serial Key Crack Free 11

- -

If you want to play Crysis 2 serial key crack free 11, you need to download the game and the crack file from a reliable source. There are many websites that offer these files, but some of them might be fake, outdated, or infected with viruses. Therefore, you should be careful and choose a trusted source that provides high-quality and safe files.

- -

One of the best sources that we recommend is this website: https://www.mediafire.com/file/924323... This website offers a verified and tested crack file that works for Crysis 2 serial key crack free 11. It is also free of viruses and malware. You can download the crack file from this link and follow the instructions in this article to install it on your PC.

- -

However, you also need to download the game itself from another source. You can either buy the game from an official store or download it from a torrent site. If you choose to buy the game, you will get a valid serial key that you can use to activate the game. However, if you choose to download the game from a torrent site, you will need to use the crack file to bypass the serial key requirement.

- -

There are many torrent sites that offer Crysis 2 game files, but some of them might be fake, outdated, or infected with viruses. Therefore, you should be careful and choose a trusted torrent site that provides high-quality and safe files. One of the best torrent sites that we recommend is this one: https://thepiratebay.org/torrent/6261... This torrent site offers a verified and updated game file that works for Crysis 2 serial key crack free 11. It is also free of viruses and malware. You can download the game file from this link and follow the instructions in this article to install it on your PC.

- -

How to Uninstall Crysis 2 Serial Key Crack Free 11

- -

If you want to uninstall Crysis 2 serial key crack free 11 from your PC, you need to follow these steps:

- -
    -
  • Go to your Control Panel and select "Programs and Features". Find "Crysis 2" in the list of programs and click on "Uninstall". Follow the instructions to remove the game from your PC.
  • -
  • Go to your game folder and delete any remaining files or folders related to Crysis 2. The default location of your game folder is "C:\Program Files (x86)\Electronic Arts\Crytek\Crysis 2". If you have installed the game in a different location, find it there.
  • -
  • Go to your "Crysis_2_Crack" folder and delete any remaining files or folders related to the crack file. The default location of your "Crysis_2_Crack" folder is "C:\Users\YourName\Downloads\Crysis_2_Crack". If you have extracted the crack file in a different location, find it there.
  • -
  • Go to your registry editor and delete any remaining entries related to Crysis 2. To do this, press "Windows + R" keys on your keyboard and type "regedit" in the run box. Click on "OK" to open the registry editor. Navigate to "HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Electronic Arts\Crytek\Crysis 2" and delete this key. Navigate to "HKEY_CURRENT_USER\Software\Electronic Arts\Crytek\Crysis 2" and delete this key.
  • -
- -

These are the steps that you need to follow to uninstall Crysis 2 serial key crack free 11 from your PC. If you have any problems or questions, you can check out online forums or support pages that offer more solutions and tips.

-

Conclusion

- -

Crysis 2 serial key crack free 11 is a game that offers a lot of fun and excitement for fans of sci-fi shooters. However, if you want to play the game on your PC, you might face some problems with the activation serial. That's why we have shown you how to download and use a crack file that bypasses the serial key requirement and lets you play the game without any hassle.

- -

We hope this article was helpful and easy to follow. If you have any questions or comments, feel free to leave them below. We would love to hear from you.

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/1touchlaserphotocrackdownload ((FULL)).md b/spaces/inplisQlawa/anything-midjourney-v4-1/1touchlaserphotocrackdownload ((FULL)).md deleted file mode 100644 index bb5e7cee2b5536cd2d589809a6c67aa63a994269..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/1touchlaserphotocrackdownload ((FULL)).md +++ /dev/null @@ -1,30 +0,0 @@ - -``` -

How to Turn Your Photos into Laser Engraved Masterpieces with 1-Touch Laser Photo

-

If you have ever wanted to create stunning laser engraved products from your digital photos, you need to check out 1-Touch Laser Photo, a software app that makes the process quick and easy. 1-Touch Laser Photo is designed to work with any laser engraver and any material, and it can transform any photo into a professional quality laser engraving in just a few clicks.

-

1touchlaserphotocrackdownload


Download Zip ❤❤❤ https://urlin.us/2uEw2b



-

Here are some of the benefits of using 1-Touch Laser Photo for your laser engraving projects:

-
    -
  • It saves you time and materials by automatically converting your photos into optimized bitmaps that are ready for laser engraving.
  • -
  • It delivers high quality results by applying the best bitmap screen and image enhancements for each material, based on the expertise of Universal Laser Systems.
  • -
  • It offers a wide range of material compatibility, allowing you to engrave photos onto hundreds of materials, such as wood, stone, metal, glass, acrylic, leather, fabric, and more.
  • -
  • It provides regular software updates with new materials and features added every quarter.
  • -
  • It has an intuitive user interface that lets you crop, resize, rotate, or mirror your image, and select your material from a drop-down menu.
  • -
  • It has a print preview mode that lets you see how your photo will look like on different materials before you engrave it.
  • -
-

With 1-Touch Laser Photo, you can turn your photos into personalized gifts, home decor, art pieces, business signs, and more. You can also use it to create unique products for your customers or clients. Whether you are a hobbyist or a professional, 1-Touch Laser Photo will help you unleash your creativity and make the most of your laser engraver.

-

To learn more about 1-Touch Laser Photo and download a free 30-day trial, visit https://www.ulsinc.com/discover-uls-innovations/1-touch-laser-photo.

-

-``` - -``` -

If you are wondering what kind of photos you can engrave with 1-Touch Laser Photo, the answer is almost any kind. You can engrave portraits, landscapes, animals, flowers, logos, text, and more. You can also choose from different effects, such as grayscale, sepia, or color. The software will automatically adjust the image to suit the chosen effect and material.

-

To give you some inspiration, here are some examples of laser engraved photos on different materials:

-
    -
  • Wood: Wood is one of the most popular materials for laser engraving photos. It gives a warm and natural look to your images, and it can be used for various products, such as plaques, coasters, cutting boards, ornaments, and more. You can engrave photos on different types of wood, such as cherry, maple, walnut, bamboo, or plywood.
  • -
  • Metal: Metal is another versatile material for laser engraving photos. It can create a sleek and modern look for your images, and it can be used for products such as tumblers, flasks, dog tags, jewelry, signs, and more. You can engrave photos on different types of metal, such as stainless steel, aluminum, brass, or titanium.
  • -
  • Glass: Glass is a beautiful material for laser engraving photos. It can create a stunning contrast between the clear and frosted areas of your images, and it can be used for products such as wine glasses, mugs, vases, mirrors, frames, and more. You can engrave photos on different types of glass, such as clear glass, colored glass, or mirrored glass.
  • -
-

As you can see, 1-Touch Laser Photo opens up a world of possibilities for creating amazing laser engraved products from your photos. Whether you want to make something for yourself or for someone else, you will find that 1-Touch Laser Photo is the perfect tool for the job.

d5da3c52bf
-
-
\ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Download Keygen Xforce For PowerMill 2016 !LINK!.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Download Keygen Xforce For PowerMill 2016 !LINK!.md deleted file mode 100644 index 13207cb4ef3b9c8fdb5f994facf168ba2409e2e6..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Download Keygen Xforce For PowerMill 2016 !LINK!.md +++ /dev/null @@ -1,28 +0,0 @@ -

Download Keygen Xforce For PowerMill 2016


DOWNLOAD ❤❤❤ https://urlin.us/2uExMt



-
-The software is efficient to provide the function of , wherein a builder of appliances or devices is able to implement functions, and , and . How it works and for what purpose is very important in . The software allows you to provide the support of all the functions at the time of creating the application. It is very useful for all the people. You can also control the . This is the most important point of this software. It allows us to manage all the functions and options. is the best software for managing all the functions of the . - -Customer Support :* - -If you are having any problem, then you need to contact to the customer support service of the software and contact our customer support team by visiting their official website. You have to click here to visit the site and you will get full information about the software. - -How to Install PowerMILL 2016 Crack And Keygen Free Download - -Open the installer with double click. - -It will open the installation process of the software. - -Wait until the setup process completes. - -After the setup process is complete, you have to activate the software using a registration key. - -Now click on the button and enter the activation code to activate the software. - -After the activation of the software, you have to use the software according to the instructions.2.02.2013 - -I was sitting in on my first class yesterday. It was my first college class, too. For those of you who know me, you know that was a BIG deal. I'm proud to say that I conquered the campus! I was pretty happy with my presentation and my confidence in myself was on high. It was pretty darn good. I wasn't nervous at all and I think I did a pretty good job. I still have to go to my advisor to work out what I'll be doing for the rest of the semester, but for now I'm taking a Honors English course and a Math class that's part of the Honors Math sequence. The first class I'm taking that I don't have to turn in anything is for Psychology of the Family. I am very excited for that class! It sounds like I'm going to have to learn a lot about myself and it's a chance to step into a whole new arena. - -I should be taking my last bio class this Friday. It's only 50 hours, which is half the normal credit load. I'll be getting some awesome science and math credits. YAY 4fefd39f24
-
-
-

diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/FontBase Crack VERIFIED.md b/spaces/inplisQlawa/anything-midjourney-v4-1/FontBase Crack VERIFIED.md deleted file mode 100644 index 920ed9761584411720375c9790a7f2c06f7d1caf..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/FontBase Crack VERIFIED.md +++ /dev/null @@ -1,119 +0,0 @@ - -

FontBase Crack: How to Manage Your Fonts for Free

-

Fonts are essential elements of any design project, whether it is a logo, a poster, a website, or a presentation. However, managing fonts can be a challenging task, especially if you have hundreds or thousands of fonts installed on your computer. You may need to search, organize, edit, compare, and activate fonts quickly and easily.

-

FontBase Crack


Download File > https://urlin.us/2uExVX



-

Fortunately, there is a solution that can help you manage your fonts for free: FontBase Crack. FontBase Crack is a file that can activate FontBase, a free, beautiful, and fast font manager for Windows, Mac, and Linux. FontBase is a software that offers an absolutely new way to manage your fonts, with many professional features and a stunning user interface.

-

In this article, we will show you how to download and use FontBase Crack safely and effectively. We will also explain the benefits and risks of using FontBase Crack, as well as some alternatives that you can consider.

-

Where to Download FontBase Crack

-

There are many websites that offer FontBase Crack for download, but not all of them are reliable or secure. Some of them may contain viruses, malware, or fake files that can harm your computer or steal your personal information. Therefore, you need to be careful when choosing where to download FontBase Crack.

-

One of the best places to download FontBase Crack is Sleepbedocas. This is a website that provides various software cracks and activators for free. You can find FontBase Crack by hosttracsibdurchry, who uploaded it on June 7, 2022. This file has been checked by McAfee and no virus was detected.

-

To download FontBase Crack from Sleepbedocas, you need to create an account or sign in with your social media account. Then, you can click on the download button and save the file to your computer.

-

-

How to Use FontBase Crack

-

After downloading FontBase Crack, you need to extract it using a program like WinRAR or 7-Zip. You will get a file called FontBase 2.16.4 Activator.exe.

-

This file is the activator itself, which can generate a license key for FontBase 2.16.4 and activate it automatically. To use FontBase Crack, you need to follow these steps:

-
    -
  1. Make sure you have installed FontBase 2.16.4 on your computer.
  2. -
  3. Run FontBase 2.16.4 Activator.exe as administrator.
  4. -
  5. Click on the "Activate" button and wait for the process to finish.
  6. -
  7. Restart your computer.
  8. -
  9. Enjoy using FontBase 2.16.4 with full features.
  10. -
-

Note: You may need to disable your antivirus or firewall before running the activator, as they may block or delete it.

-

What are the Benefits of Using FontBase Crack

-

By using FontBase Crack, you can enjoy many benefits that FontBase 2.16.4 has to offer. Some of these benefits are:

-
    -
  • You can quickly change the color, background, size, height, and alignment of all your fonts.
  • -
  • You can quickly search, add, edit, and delete all your fonts.
  • -
  • You can synchronize multiple computers via Dropbox, Google Docs, or OneDrive.
  • -
  • You can change font names, display names, and group names with keyboard and mouse shortcuts.
  • -
  • You can easily add or remove fonts from your favorites and a recently used list.
  • -
  • You can easily compare two fonts side-by-side and quickly look at their differences.
  • -
  • You can text edit live with the same changes being reflected in real-time.
  • -
  • You can access text properties including type, size, weight, and font name with keyboard shortcuts.
  • -
  • You can view and copy any glyph for any language and any font in your collection.
  • -
  • You can save money and time by avoiding buying a license key or subscribing to a service plan.
  • -
-

With FontBase Crack, you can unleash your creativity and productivity with FontBase 2.16.4.

-

What are the Risks of Using FontBase Crack

-

While using FontBase Crack may seem tempting and convenient, it also comes with some risks that you need to be aware of. Some of these risks are:

-
    -
  • You may violate the intellectual property rights of FontBase developers and face legal consequences or penalties.
  • -
  • You may expose your computer to viruses, malware, or fake files that can damage your system or compromise your security.
  • -
  • You may lose your data or files if the activator corrupts or deletes them by accident or on purpose.
  • -
  • You may miss out on the latest updates, features, or support that FontBase provides to its legitimate users.
  • -
  • You may experience technical issues or errors while using a cracked software that may affect your work quality or efficiency.
  • -
-

Using FontBase Crack is not worth the risk, as you may end up losing more than you gain.

-

What are the Alternatives to FontBase Crack

-

If you want to manage your fonts for free without using FontBase Crack, you have some alternatives that you can consider. Some of these alternatives are:

-
    -
  • NexusFont: A free font manager for Windows that lets you preview, install, uninstall, and organize your fonts.
  • -
  • Font Manager: A free font manager for Linux that lets you browse, categorize, install, and uninstall your fonts.
  • -
  • FontExplorer X: A free font manager for Mac that lets you preview, activate, deactivate, and organize your fonts.
  • -
  • Google Fonts: A free online service that lets you access and use thousands of fonts for your web projects.
  • -
  • Font Squirrel: A free online service that lets you download and use hundreds of fonts for your personal or commercial projects.
  • -
-

These alternatives are legal, safe, and reliable ways to manage your fonts for free.

-

Conclusion

-

FontBase Crack is a file that can activate FontBase 2.16.4, a free, beautiful, and fast font manager for Windows, Mac, and Linux. FontBase Crack can help you manage your fonts with ease and style, but it also has some risks and drawbacks that you need to consider before using it. You may violate the law, harm your computer, lose your data, miss out on updates, or encounter errors.

-

If you want to manage your fonts for free without using FontBase Crack, you have some alternatives that you can consider. You can use other free font managers or online services that are legal, safe, and reliable.

-

FontBase 2.16.4 is a powerful and versatile software that can help you create and edit 3D models, perform simulations and analyses, design and document your products, collaborate and communicate with others, and more. Whether you use FontBase Crack or buy a license key, you should use FontBase 2.16.4 responsibly and ethically.

-

How to Use FontBase 2.16.4 with Full Features

-

Once you have activated FontBase 2.16.4 with FontBase Crack, you can start using it to manage your fonts with full features. Here are some tips on how to use FontBase 2.16.4 effectively:

-
    -
  • To add fonts to FontBase 2.16.4, you can drag and drop them from your computer or use the "Add Fonts" button on the top left corner.
  • -
  • To organize your fonts into collections, you can right-click on any font and choose "Add to Collection" or use the "Create Collection" button on the bottom left corner.
  • -
  • To edit your fonts, you can double-click on any font and use the tools on the right panel. You can change the color, background, size, height, alignment, and more.
  • -
  • To compare your fonts, you can select two fonts and click on the "Compare" button on the top right corner. You can see the differences in glyphs, metrics, kerning, and more.
  • -
  • To sync your fonts across multiple devices, you can use Dropbox, Google Docs, or OneDrive as your font source. You can set this up in the "Settings" menu on the top right corner.
  • -
-

FontBase 2.16.4 is a user-friendly and powerful font manager that can help you work with fonts more efficiently and creatively.

-

How to Update FontBase 2.16.4

-

One of the drawbacks of using FontBase Crack is that you may not be able to update FontBase 2.16.4 to the latest version. This means that you may miss out on some new features, bug fixes, or improvements that FontBase developers may release in the future.

-

If you want to update FontBase 2.16.4, you have two options:

-
    -
  1. You can uninstall FontBase 2.16.4 and FontBase Crack from your computer and download and install the latest version of FontBase from its official website or an authorized reseller. However, this means that you will lose the full features of FontBase 2.16.4 and have to use it with limited functionality.
  2. -
  3. You can wait for a new version of FontBase Crack that can activate the latest version of FontBase 2.16.4. However, this may take a long time or never happen at all.
  4. -
-

Updating FontBase 2.16.4 is a trade-off between having the latest features or having the full features.

-

How to Uninstall FontBase 2.16.4 and FontBase Crack

-

If you decide to stop using FontBase 2.16.4 and FontBase Crack for any reason, you can uninstall them from your computer easily and safely.

-

To uninstall FontBase 2.16.4 and FontBase Crack from your computer, you need to follow these steps:

-
    -
  1. Close FontBase 2.16.4 if it is running.
  2. -
  3. Delete FontBase 2.16.4 Activator.exe from your computer.
  4. -
  5. Go to Control Panel > Programs > Uninstall a Program and find FontBase 2.16.4 in the list.
  6. -
  7. Click on Uninstall and follow the instructions to remove FontBase 2.16.4 from your computer.
  8. -
  9. Restart your computer.
  10. -
-

Note: You may need to delete any leftover files or folders related to FontBase 2.16.4 or FontBase Crack from your computer manually.

-

How to Buy a License Key for FontBase 2.16.4

-

If you want to support FontBase developers and enjoy the full features of FontBase 2.16.4 without any risks or limitations, you can buy a license key for FontBase 2.16.4 from its official website or an authorized reseller.

-

To buy a license key for FontBase 2.16.4, you need to follow these steps:

-
    -
  1. Go to https://fontba.se/pricing and choose the plan that suits your needs and budget.
  2. -
  3. Click on the "Buy Now" button and enter your email address and payment details.
  4. -
  5. Check your email for the confirmation and the license key.
  6. -
  7. Open FontBase 2.16.4 and go to the "Settings" menu on the top right corner.
  8. -
  9. Click on the "Activate License" button and enter your license key.
  10. -
  11. Enjoy using FontBase 2.16.4 with full features and support.
  12. -
-

Note: You can also get a free trial of FontBase 2.16.4 for 14 days before buying a license key.

-

How to Contact FontBase Support

-

If you have any questions, issues, or feedback about FontBase 2.16.4, you can contact FontBase support team via email or social media.

-

To contact FontBase support team via email, you can send your message to hello@fontba.se and expect a reply within 24 hours.

-

To contact FontBase support team via social media, you can follow them on Twitter (@fontbaseapp), Facebook (FontBase), or Instagram (fontbaseapp) and send them a direct message or comment on their posts.

-

FontBase support team is friendly, helpful, and responsive to your needs and suggestions.

-

Conclusion

-

FontBase Crack is a file that can activate FontBase 2.16.4, a free, beautiful, and fast font manager for Windows, Mac, and Linux. FontBase Crack can help you manage your fonts with ease and style, but it also has some risks and drawbacks that you need to consider before using it. You may violate the law, harm your computer, lose your data, miss out on updates, or encounter errors.

-

If you want to manage your fonts for free without using FontBase Crack, you have some alternatives that you can consider. You can use other free font managers or online services that are legal, safe, and reliable.

-

If you want to support FontBase developers and enjoy the full features of FontBase 2.16.4 without any risks or limitations, you can buy a license key for FontBase 2.16.4 from its official website or an authorized reseller.

-

FontBase 2.16.4 is a user-friendly and powerful font manager that can help you work with fonts more efficiently and creatively.

-

Conclusion

-

FontBase Crack is a file that can activate FontBase 2.16.4, a free, beautiful, and fast font manager for Windows, Mac, and Linux. FontBase Crack can help you manage your fonts with ease and style, but it also has some risks and drawbacks that you need to consider before using it. You may violate the law, harm your computer, lose your data, miss out on updates, or encounter errors.

-

If you want to manage your fonts for free without using FontBase Crack, you have some alternatives that you can consider. You can use other free font managers or online services that are legal, safe, and reliable.

-

If you want to support FontBase developers and enjoy the full features of FontBase 2.16.4 without any risks or limitations, you can buy a license key for FontBase 2.16.4 from its official website or an authorized reseller.

-

FontBase 2.16.4 is a user-friendly and powerful font manager that can help you work with fonts more efficiently and creatively.

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Midiculousserial.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Midiculousserial.md deleted file mode 100644 index c180f0c4847c67de6fd5110fbeea02646a8922c0..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Midiculousserial.md +++ /dev/null @@ -1,6 +0,0 @@ -

midiculousserial


Download Zip >>>>> https://urlin.us/2uEy7K



-
-midiculousserial · DAEMON Tools Lite18.8.6.88 Pre-Activated Crack Serial utorrent · Line Of Sight Vietnam No Cd Crack · easyusetools for ... 4d29de3e1b
-
-
-

diff --git a/spaces/inreVtussa/clothingai/Examples/Alldata V10.40w Import Disc 5 1990-2011 Free UPD Download.md b/spaces/inreVtussa/clothingai/Examples/Alldata V10.40w Import Disc 5 1990-2011 Free UPD Download.md deleted file mode 100644 index edf18ab52e0cc1561b06ec14feadd3b2f9bb36ca..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Alldata V10.40w Import Disc 5 1990-2011 Free UPD Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

Alldata V10.40w Import Disc 5 1990-2011 Free Download


Downloadhttps://tiurll.com/2uCiia



- - 3cee63e6c2
-
-
-

diff --git a/spaces/inreVtussa/clothingai/Examples/Big Jon Pc Games Press Your Luck Download [UPDATED] For Free.md b/spaces/inreVtussa/clothingai/Examples/Big Jon Pc Games Press Your Luck Download [UPDATED] For Free.md deleted file mode 100644 index 85d9d33f023ec3fab158997970c296030133be17..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Big Jon Pc Games Press Your Luck Download [UPDATED] For Free.md +++ /dev/null @@ -1,6 +0,0 @@ -

big jon pc games press your luck download for free


Download Filehttps://tiurll.com/2uCky6



-
-gaming with bob how to download the bigjon's pc games. 17,167 views17K ... All bigjon games http://www.sendspace.com/file/82hkr3​ Whammy spinging sound. Show less ... The All-New Press Your Luck (January 26, 2020). 1fdad05405
-
-
-

diff --git a/spaces/isan2001/BertApps/app.py b/spaces/isan2001/BertApps/app.py deleted file mode 100644 index a11d010442d44486b0a48c38e92e5eebc15ecce5..0000000000000000000000000000000000000000 --- a/spaces/isan2001/BertApps/app.py +++ /dev/null @@ -1,50 +0,0 @@ -import tensorflow as tf -from transformers import BertTokenizer -from transformers import TFBertForSequenceClassification -from Sastrawi.Stemmer.StemmerFactory import StemmerFactory # Import Sastrawi -import streamlit as st - -# Fungsi untuk memuat model BERT dan tokenizer -PRE_TRAINED_MODEL = 'indobenchmark/indobert-base-p2' -bert_tokenizer = BertTokenizer.from_pretrained(PRE_TRAINED_MODEL) -bert_model = TFBertForSequenceClassification.from_pretrained(PRE_TRAINED_MODEL, num_labels=2) -bert_model.load_weights('model.h5') - -# Inisialisasi stemmer dari Sastrawi -stemmer = StemmerFactory().create_stemmer() # Membuat stemmer Sastrawi - -def preprocess_text(text): - # Menggunakan Sastrawi untuk stemming - stemmed_text = stemmer.stem(text.lower()) - - return stemmed_text - -def predict_sentiment(text): - preprocessed_text = preprocess_text(text) # Pra-pemrosesan teks dengan Sastrawi - input_ids = tf.constant(bert_tokenizer.encode(preprocessed_text, add_special_tokens=True))[None, :] - logits = bert_model(input_ids)[0] - probabilities = tf.nn.softmax(logits, axis=1) - sentiment = tf.argmax(probabilities, axis=1) - return sentiment.numpy()[0], probabilities.numpy()[0] - -# Judul aplikasi -st.title('Prediksi Sentimen menggunakan BERT') - -# Input teks -text = st.text_area('Masukkan teks', '') - -# Tombol untuk memprediksi sentimen -if st.button('Prediksi'): - if text.strip() == '': - st.warning('Masukkan teks terlebih dahulu.') - else: - sentiment, probabilities = predict_sentiment(text) - - # Menghitung persentase probabilitas sentimen positif - positive_probability = probabilities[1] * 100 - negative_probability = probabilities[0] * 100 - st.write(f'HASIL PREDIKSI') - if sentiment == 0: - st.write(f'Negatif ({negative_probability:.2f}%)') - else: - st.write(f'Positif ({positive_probability:.2f}%)') \ No newline at end of file diff --git a/spaces/ismot/1702t1/models/modules/__init__.py b/spaces/ismot/1702t1/models/modules/__init__.py deleted file mode 100644 index 78be267dda39cc4b64b79f79400fac9f638887f0..0000000000000000000000000000000000000000 --- a/spaces/ismot/1702t1/models/modules/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -""" -@Date: 2021/09/01 -@description: -""" - -from models.modules.swin_transformer import Swin_Transformer -from models.modules.swg_transformer import SWG_Transformer -from models.modules.transformer import Transformer diff --git a/spaces/jackli888/stable-diffusion-webui/modules/models/diffusion/ddpm_edit.py b/spaces/jackli888/stable-diffusion-webui/modules/models/diffusion/ddpm_edit.py deleted file mode 100644 index f3d49c44cafcc78e27a1e4f2b522faa21e135f9f..0000000000000000000000000000000000000000 --- a/spaces/jackli888/stable-diffusion-webui/modules/models/diffusion/ddpm_edit.py +++ /dev/null @@ -1,1459 +0,0 @@ -""" -wild mixture of -https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py -https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py -https://github.com/CompVis/taming-transformers --- merci -""" - -# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion). -# See more details in LICENSE. - -import torch -import torch.nn as nn -import numpy as np -import pytorch_lightning as pl -from torch.optim.lr_scheduler import LambdaLR -from einops import rearrange, repeat -from contextlib import contextmanager -from functools import partial -from tqdm import tqdm -from torchvision.utils import make_grid -from pytorch_lightning.utilities.distributed import rank_zero_only - -from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config -from ldm.modules.ema import LitEma -from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution -from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL -from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like -from ldm.models.diffusion.ddim import DDIMSampler - - -__conditioning_keys__ = {'concat': 'c_concat', - 'crossattn': 'c_crossattn', - 'adm': 'y'} - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - - -def uniform_on_device(r1, r2, shape, device): - return (r1 - r2) * torch.rand(*shape, device=device) + r2 - - -class DDPM(pl.LightningModule): - # classic DDPM with Gaussian diffusion, in image space - def __init__(self, - unet_config, - timesteps=1000, - beta_schedule="linear", - loss_type="l2", - ckpt_path=None, - ignore_keys=[], - load_only_unet=False, - monitor="val/loss", - use_ema=True, - first_stage_key="image", - image_size=256, - channels=3, - log_every_t=100, - clip_denoised=True, - linear_start=1e-4, - linear_end=2e-2, - cosine_s=8e-3, - given_betas=None, - original_elbo_weight=0., - v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta - l_simple_weight=1., - conditioning_key=None, - parameterization="eps", # all assuming fixed variance schedules - scheduler_config=None, - use_positional_encodings=False, - learn_logvar=False, - logvar_init=0., - load_ema=True, - ): - super().__init__() - assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' - self.parameterization = parameterization - print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") - self.cond_stage_model = None - self.clip_denoised = clip_denoised - self.log_every_t = log_every_t - self.first_stage_key = first_stage_key - self.image_size = image_size # try conv? - self.channels = channels - self.use_positional_encodings = use_positional_encodings - self.model = DiffusionWrapper(unet_config, conditioning_key) - count_params(self.model, verbose=True) - self.use_ema = use_ema - - self.use_scheduler = scheduler_config is not None - if self.use_scheduler: - self.scheduler_config = scheduler_config - - self.v_posterior = v_posterior - self.original_elbo_weight = original_elbo_weight - self.l_simple_weight = l_simple_weight - - if monitor is not None: - self.monitor = monitor - - if self.use_ema and load_ema: - self.model_ema = LitEma(self.model) - print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") - - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) - - # If initialing from EMA-only checkpoint, create EMA model after loading. - if self.use_ema and not load_ema: - self.model_ema = LitEma(self.model) - print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") - - self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, - linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) - - self.loss_type = loss_type - - self.learn_logvar = learn_logvar - self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) - if self.learn_logvar: - self.logvar = nn.Parameter(self.logvar, requires_grad=True) - - - def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, - linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - if exists(given_betas): - betas = given_betas - else: - betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, - cosine_s=cosine_s) - alphas = 1. - betas - alphas_cumprod = np.cumprod(alphas, axis=0) - alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) - - timesteps, = betas.shape - self.num_timesteps = int(timesteps) - self.linear_start = linear_start - self.linear_end = linear_end - assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' - - to_torch = partial(torch.tensor, dtype=torch.float32) - - self.register_buffer('betas', to_torch(betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) - - # calculations for posterior q(x_{t-1} | x_t, x_0) - posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( - 1. - alphas_cumprod) + self.v_posterior * betas - # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) - self.register_buffer('posterior_variance', to_torch(posterior_variance)) - # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain - self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) - self.register_buffer('posterior_mean_coef1', to_torch( - betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) - self.register_buffer('posterior_mean_coef2', to_torch( - (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) - - if self.parameterization == "eps": - lvlb_weights = self.betas ** 2 / ( - 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) - elif self.parameterization == "x0": - lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) - else: - raise NotImplementedError("mu not supported") - # TODO how to choose this term - lvlb_weights[0] = lvlb_weights[1] - self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) - assert not torch.isnan(self.lvlb_weights).all() - - @contextmanager - def ema_scope(self, context=None): - if self.use_ema: - self.model_ema.store(self.model.parameters()) - self.model_ema.copy_to(self.model) - if context is not None: - print(f"{context}: Switched to EMA weights") - try: - yield None - finally: - if self.use_ema: - self.model_ema.restore(self.model.parameters()) - if context is not None: - print(f"{context}: Restored training weights") - - def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): - sd = torch.load(path, map_location="cpu") - if "state_dict" in list(sd.keys()): - sd = sd["state_dict"] - keys = list(sd.keys()) - - # Our model adds additional channels to the first layer to condition on an input image. - # For the first layer, copy existing channel weights and initialize new channel weights to zero. - input_keys = [ - "model.diffusion_model.input_blocks.0.0.weight", - "model_ema.diffusion_modelinput_blocks00weight", - ] - - self_sd = self.state_dict() - for input_key in input_keys: - if input_key not in sd or input_key not in self_sd: - continue - - input_weight = self_sd[input_key] - - if input_weight.size() != sd[input_key].size(): - print(f"Manual init: {input_key}") - input_weight.zero_() - input_weight[:, :4, :, :].copy_(sd[input_key]) - ignore_keys.append(input_key) - - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( - sd, strict=False) - print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - print(f"Missing Keys: {missing}") - if len(unexpected) > 0: - print(f"Unexpected Keys: {unexpected}") - - def q_mean_variance(self, x_start, t): - """ - Get the distribution q(x_t | x_0). - :param x_start: the [N x C x ...] tensor of noiseless inputs. - :param t: the number of diffusion steps (minus 1). Here, 0 means one step. - :return: A tuple (mean, variance, log_variance), all of x_start's shape. - """ - mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) - variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) - log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) - return mean, variance, log_variance - - def predict_start_from_noise(self, x_t, t, noise): - return ( - extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise - ) - - def q_posterior(self, x_start, x_t, t): - posterior_mean = ( - extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + - extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t - ) - posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) - posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) - return posterior_mean, posterior_variance, posterior_log_variance_clipped - - def p_mean_variance(self, x, t, clip_denoised: bool): - model_out = self.model(x, t) - if self.parameterization == "eps": - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == "x0": - x_recon = model_out - if clip_denoised: - x_recon.clamp_(-1., 1.) - - model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): - b, *_, device = *x.shape, x.device - model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) - noise = noise_like(x.shape, device, repeat_noise) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - @torch.no_grad() - def p_sample_loop(self, shape, return_intermediates=False): - device = self.betas.device - b = shape[0] - img = torch.randn(shape, device=device) - intermediates = [img] - for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): - img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), - clip_denoised=self.clip_denoised) - if i % self.log_every_t == 0 or i == self.num_timesteps - 1: - intermediates.append(img) - if return_intermediates: - return img, intermediates - return img - - @torch.no_grad() - def sample(self, batch_size=16, return_intermediates=False): - image_size = self.image_size - channels = self.channels - return self.p_sample_loop((batch_size, channels, image_size, image_size), - return_intermediates=return_intermediates) - - def q_sample(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) - - def get_loss(self, pred, target, mean=True): - if self.loss_type == 'l1': - loss = (target - pred).abs() - if mean: - loss = loss.mean() - elif self.loss_type == 'l2': - if mean: - loss = torch.nn.functional.mse_loss(target, pred) - else: - loss = torch.nn.functional.mse_loss(target, pred, reduction='none') - else: - raise NotImplementedError("unknown loss type '{loss_type}'") - - return loss - - def p_losses(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - model_out = self.model(x_noisy, t) - - loss_dict = {} - if self.parameterization == "eps": - target = noise - elif self.parameterization == "x0": - target = x_start - else: - raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") - - loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) - - log_prefix = 'train' if self.training else 'val' - - loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) - loss_simple = loss.mean() * self.l_simple_weight - - loss_vlb = (self.lvlb_weights[t] * loss).mean() - loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) - - loss = loss_simple + self.original_elbo_weight * loss_vlb - - loss_dict.update({f'{log_prefix}/loss': loss}) - - return loss, loss_dict - - def forward(self, x, *args, **kwargs): - # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size - # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' - t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() - return self.p_losses(x, t, *args, **kwargs) - - def get_input(self, batch, k): - return batch[k] - - def shared_step(self, batch): - x = self.get_input(batch, self.first_stage_key) - loss, loss_dict = self(x) - return loss, loss_dict - - def training_step(self, batch, batch_idx): - loss, loss_dict = self.shared_step(batch) - - self.log_dict(loss_dict, prog_bar=True, - logger=True, on_step=True, on_epoch=True) - - self.log("global_step", self.global_step, - prog_bar=True, logger=True, on_step=True, on_epoch=False) - - if self.use_scheduler: - lr = self.optimizers().param_groups[0]['lr'] - self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) - - return loss - - @torch.no_grad() - def validation_step(self, batch, batch_idx): - _, loss_dict_no_ema = self.shared_step(batch) - with self.ema_scope(): - _, loss_dict_ema = self.shared_step(batch) - loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} - self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) - self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) - - def on_train_batch_end(self, *args, **kwargs): - if self.use_ema: - self.model_ema(self.model) - - def _get_rows_from_list(self, samples): - n_imgs_per_row = len(samples) - denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') - denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) - return denoise_grid - - @torch.no_grad() - def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): - log = dict() - x = self.get_input(batch, self.first_stage_key) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - x = x.to(self.device)[:N] - log["inputs"] = x - - # get diffusion row - diffusion_row = list() - x_start = x[:n_row] - - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(x_start) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - diffusion_row.append(x_noisy) - - log["diffusion_row"] = self._get_rows_from_list(diffusion_row) - - if sample: - # get denoise row - with self.ema_scope("Plotting"): - samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) - - log["samples"] = samples - log["denoise_row"] = self._get_rows_from_list(denoise_row) - - if return_keys: - if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: - return log - else: - return {key: log[key] for key in return_keys} - return log - - def configure_optimizers(self): - lr = self.learning_rate - params = list(self.model.parameters()) - if self.learn_logvar: - params = params + [self.logvar] - opt = torch.optim.AdamW(params, lr=lr) - return opt - - -class LatentDiffusion(DDPM): - """main class""" - def __init__(self, - first_stage_config, - cond_stage_config, - num_timesteps_cond=None, - cond_stage_key="image", - cond_stage_trainable=False, - concat_mode=True, - cond_stage_forward=None, - conditioning_key=None, - scale_factor=1.0, - scale_by_std=False, - load_ema=True, - *args, **kwargs): - self.num_timesteps_cond = default(num_timesteps_cond, 1) - self.scale_by_std = scale_by_std - assert self.num_timesteps_cond <= kwargs['timesteps'] - # for backwards compatibility after implementation of DiffusionWrapper - if conditioning_key is None: - conditioning_key = 'concat' if concat_mode else 'crossattn' - if cond_stage_config == '__is_unconditional__': - conditioning_key = None - ckpt_path = kwargs.pop("ckpt_path", None) - ignore_keys = kwargs.pop("ignore_keys", []) - super().__init__(conditioning_key=conditioning_key, *args, load_ema=load_ema, **kwargs) - self.concat_mode = concat_mode - self.cond_stage_trainable = cond_stage_trainable - self.cond_stage_key = cond_stage_key - try: - self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 - except: - self.num_downs = 0 - if not scale_by_std: - self.scale_factor = scale_factor - else: - self.register_buffer('scale_factor', torch.tensor(scale_factor)) - self.instantiate_first_stage(first_stage_config) - self.instantiate_cond_stage(cond_stage_config) - self.cond_stage_forward = cond_stage_forward - self.clip_denoised = False - self.bbox_tokenizer = None - - self.restarted_from_ckpt = False - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys) - self.restarted_from_ckpt = True - - if self.use_ema and not load_ema: - self.model_ema = LitEma(self.model) - print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") - - def make_cond_schedule(self, ): - self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) - ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() - self.cond_ids[:self.num_timesteps_cond] = ids - - @rank_zero_only - @torch.no_grad() - def on_train_batch_start(self, batch, batch_idx, dataloader_idx): - # only for very first batch - if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: - assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' - # set rescale weight to 1./std of encodings - print("### USING STD-RESCALING ###") - x = super().get_input(batch, self.first_stage_key) - x = x.to(self.device) - encoder_posterior = self.encode_first_stage(x) - z = self.get_first_stage_encoding(encoder_posterior).detach() - del self.scale_factor - self.register_buffer('scale_factor', 1. / z.flatten().std()) - print(f"setting self.scale_factor to {self.scale_factor}") - print("### USING STD-RESCALING ###") - - def register_schedule(self, - given_betas=None, beta_schedule="linear", timesteps=1000, - linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) - - self.shorten_cond_schedule = self.num_timesteps_cond > 1 - if self.shorten_cond_schedule: - self.make_cond_schedule() - - def instantiate_first_stage(self, config): - model = instantiate_from_config(config) - self.first_stage_model = model.eval() - self.first_stage_model.train = disabled_train - for param in self.first_stage_model.parameters(): - param.requires_grad = False - - def instantiate_cond_stage(self, config): - if not self.cond_stage_trainable: - if config == "__is_first_stage__": - print("Using first stage also as cond stage.") - self.cond_stage_model = self.first_stage_model - elif config == "__is_unconditional__": - print(f"Training {self.__class__.__name__} as an unconditional model.") - self.cond_stage_model = None - # self.be_unconditional = True - else: - model = instantiate_from_config(config) - self.cond_stage_model = model.eval() - self.cond_stage_model.train = disabled_train - for param in self.cond_stage_model.parameters(): - param.requires_grad = False - else: - assert config != '__is_first_stage__' - assert config != '__is_unconditional__' - model = instantiate_from_config(config) - self.cond_stage_model = model - - def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): - denoise_row = [] - for zd in tqdm(samples, desc=desc): - denoise_row.append(self.decode_first_stage(zd.to(self.device), - force_not_quantize=force_no_decoder_quantization)) - n_imgs_per_row = len(denoise_row) - denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W - denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') - denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) - return denoise_grid - - def get_first_stage_encoding(self, encoder_posterior): - if isinstance(encoder_posterior, DiagonalGaussianDistribution): - z = encoder_posterior.sample() - elif isinstance(encoder_posterior, torch.Tensor): - z = encoder_posterior - else: - raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") - return self.scale_factor * z - - def get_learned_conditioning(self, c): - if self.cond_stage_forward is None: - if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): - c = self.cond_stage_model.encode(c) - if isinstance(c, DiagonalGaussianDistribution): - c = c.mode() - else: - c = self.cond_stage_model(c) - else: - assert hasattr(self.cond_stage_model, self.cond_stage_forward) - c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) - return c - - def meshgrid(self, h, w): - y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) - x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) - - arr = torch.cat([y, x], dim=-1) - return arr - - def delta_border(self, h, w): - """ - :param h: height - :param w: width - :return: normalized distance to image border, - wtith min distance = 0 at border and max dist = 0.5 at image center - """ - lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) - arr = self.meshgrid(h, w) / lower_right_corner - dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] - dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] - edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] - return edge_dist - - def get_weighting(self, h, w, Ly, Lx, device): - weighting = self.delta_border(h, w) - weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], - self.split_input_params["clip_max_weight"], ) - weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) - - if self.split_input_params["tie_braker"]: - L_weighting = self.delta_border(Ly, Lx) - L_weighting = torch.clip(L_weighting, - self.split_input_params["clip_min_tie_weight"], - self.split_input_params["clip_max_tie_weight"]) - - L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) - weighting = weighting * L_weighting - return weighting - - def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code - """ - :param x: img of size (bs, c, h, w) - :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) - """ - bs, nc, h, w = x.shape - - # number of crops in image - Ly = (h - kernel_size[0]) // stride[0] + 1 - Lx = (w - kernel_size[1]) // stride[1] + 1 - - if uf == 1 and df == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) - - weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) - - elif uf > 1 and df == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), - dilation=1, padding=0, - stride=(stride[0] * uf, stride[1] * uf)) - fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) - - weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) - - elif df > 1 and uf == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), - dilation=1, padding=0, - stride=(stride[0] // df, stride[1] // df)) - fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) - - weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) - - else: - raise NotImplementedError - - return fold, unfold, normalization, weighting - - @torch.no_grad() - def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, - cond_key=None, return_original_cond=False, bs=None, uncond=0.05): - x = super().get_input(batch, k) - if bs is not None: - x = x[:bs] - x = x.to(self.device) - encoder_posterior = self.encode_first_stage(x) - z = self.get_first_stage_encoding(encoder_posterior).detach() - cond_key = cond_key or self.cond_stage_key - xc = super().get_input(batch, cond_key) - if bs is not None: - xc["c_crossattn"] = xc["c_crossattn"][:bs] - xc["c_concat"] = xc["c_concat"][:bs] - cond = {} - - # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. - random = torch.rand(x.size(0), device=x.device) - prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") - input_mask = 1 - rearrange((random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1") - - null_prompt = self.get_learned_conditioning([""]) - cond["c_crossattn"] = [torch.where(prompt_mask, null_prompt, self.get_learned_conditioning(xc["c_crossattn"]).detach())] - cond["c_concat"] = [input_mask * self.encode_first_stage((xc["c_concat"].to(self.device))).mode().detach()] - - out = [z, cond] - if return_first_stage_outputs: - xrec = self.decode_first_stage(z) - out.extend([x, xrec]) - if return_original_cond: - out.append(xc) - return out - - @torch.no_grad() - def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): - if predict_cids: - if z.dim() == 4: - z = torch.argmax(z.exp(), dim=1).long() - z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) - z = rearrange(z, 'b h w c -> b c h w').contiguous() - - z = 1. / self.scale_factor * z - - if hasattr(self, "split_input_params"): - if self.split_input_params["patch_distributed_vq"]: - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - uf = self.split_input_params["vqf"] - bs, nc, h, w = z.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print("reducing Kernel") - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print("reducing stride") - - fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) - - z = unfold(z) # (bn, nc * prod(**ks), L) - # 1. Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - # 2. apply model loop over last dim - if isinstance(self.first_stage_model, VQModelInterface): - output_list = [self.first_stage_model.decode(z[:, :, :, :, i], - force_not_quantize=predict_cids or force_not_quantize) - for i in range(z.shape[-1])] - else: - - output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) - for i in range(z.shape[-1])] - - o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) - o = o * weighting - # Reverse 1. reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization # norm is shape (1, 1, h, w) - return decoded - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - # same as above but without decorator - def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): - if predict_cids: - if z.dim() == 4: - z = torch.argmax(z.exp(), dim=1).long() - z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) - z = rearrange(z, 'b h w c -> b c h w').contiguous() - - z = 1. / self.scale_factor * z - - if hasattr(self, "split_input_params"): - if self.split_input_params["patch_distributed_vq"]: - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - uf = self.split_input_params["vqf"] - bs, nc, h, w = z.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print("reducing Kernel") - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print("reducing stride") - - fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) - - z = unfold(z) # (bn, nc * prod(**ks), L) - # 1. Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - # 2. apply model loop over last dim - if isinstance(self.first_stage_model, VQModelInterface): - output_list = [self.first_stage_model.decode(z[:, :, :, :, i], - force_not_quantize=predict_cids or force_not_quantize) - for i in range(z.shape[-1])] - else: - - output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) - for i in range(z.shape[-1])] - - o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) - o = o * weighting - # Reverse 1. reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization # norm is shape (1, 1, h, w) - return decoded - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - @torch.no_grad() - def encode_first_stage(self, x): - if hasattr(self, "split_input_params"): - if self.split_input_params["patch_distributed_vq"]: - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - df = self.split_input_params["vqf"] - self.split_input_params['original_image_size'] = x.shape[-2:] - bs, nc, h, w = x.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print("reducing Kernel") - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print("reducing stride") - - fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) - z = unfold(x) # (bn, nc * prod(**ks), L) - # Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) - for i in range(z.shape[-1])] - - o = torch.stack(output_list, axis=-1) - o = o * weighting - - # Reverse reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization - return decoded - - else: - return self.first_stage_model.encode(x) - else: - return self.first_stage_model.encode(x) - - def shared_step(self, batch, **kwargs): - x, c = self.get_input(batch, self.first_stage_key) - loss = self(x, c) - return loss - - def forward(self, x, c, *args, **kwargs): - t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() - if self.model.conditioning_key is not None: - assert c is not None - if self.cond_stage_trainable: - c = self.get_learned_conditioning(c) - if self.shorten_cond_schedule: # TODO: drop this option - tc = self.cond_ids[t].to(self.device) - c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) - return self.p_losses(x, c, t, *args, **kwargs) - - def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset - def rescale_bbox(bbox): - x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) - y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) - w = min(bbox[2] / crop_coordinates[2], 1 - x0) - h = min(bbox[3] / crop_coordinates[3], 1 - y0) - return x0, y0, w, h - - return [rescale_bbox(b) for b in bboxes] - - def apply_model(self, x_noisy, t, cond, return_ids=False): - - if isinstance(cond, dict): - # hybrid case, cond is exptected to be a dict - pass - else: - if not isinstance(cond, list): - cond = [cond] - key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' - cond = {key: cond} - - if hasattr(self, "split_input_params"): - assert len(cond) == 1 # todo can only deal with one conditioning atm - assert not return_ids - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - - h, w = x_noisy.shape[-2:] - - fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) - - z = unfold(x_noisy) # (bn, nc * prod(**ks), L) - # Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] - - if self.cond_stage_key in ["image", "LR_image", "segmentation", - 'bbox_img'] and self.model.conditioning_key: # todo check for completeness - c_key = next(iter(cond.keys())) # get key - c = next(iter(cond.values())) # get value - assert (len(c) == 1) # todo extend to list with more than one elem - c = c[0] # get element - - c = unfold(c) - c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] - - elif self.cond_stage_key == 'coordinates_bbox': - assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' - - # assuming padding of unfold is always 0 and its dilation is always 1 - n_patches_per_row = int((w - ks[0]) / stride[0] + 1) - full_img_h, full_img_w = self.split_input_params['original_image_size'] - # as we are operating on latents, we need the factor from the original image size to the - # spatial latent size to properly rescale the crops for regenerating the bbox annotations - num_downs = self.first_stage_model.encoder.num_resolutions - 1 - rescale_latent = 2 ** (num_downs) - - # get top left postions of patches as conforming for the bbbox tokenizer, therefore we - # need to rescale the tl patch coordinates to be in between (0,1) - tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, - rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) - for patch_nr in range(z.shape[-1])] - - # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) - patch_limits = [(x_tl, y_tl, - rescale_latent * ks[0] / full_img_w, - rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] - # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] - - # tokenize crop coordinates for the bounding boxes of the respective patches - patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) - for bbox in patch_limits] # list of length l with tensors of shape (1, 2) - print(patch_limits_tknzd[0].shape) - # cut tknzd crop position from conditioning - assert isinstance(cond, dict), 'cond must be dict to be fed into model' - cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) - print(cut_cond.shape) - - adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) - adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') - print(adapted_cond.shape) - adapted_cond = self.get_learned_conditioning(adapted_cond) - print(adapted_cond.shape) - adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) - print(adapted_cond.shape) - - cond_list = [{'c_crossattn': [e]} for e in adapted_cond] - - else: - cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient - - # apply model by loop over crops - output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] - assert not isinstance(output_list[0], - tuple) # todo cant deal with multiple model outputs check this never happens - - o = torch.stack(output_list, axis=-1) - o = o * weighting - # Reverse reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - x_recon = fold(o) / normalization - - else: - x_recon = self.model(x_noisy, t, **cond) - - if isinstance(x_recon, tuple) and not return_ids: - return x_recon[0] - else: - return x_recon - - def _predict_eps_from_xstart(self, x_t, t, pred_xstart): - return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) - - def _prior_bpd(self, x_start): - """ - Get the prior KL term for the variational lower-bound, measured in - bits-per-dim. - This term can't be optimized, as it only depends on the encoder. - :param x_start: the [N x C x ...] tensor of inputs. - :return: a batch of [N] KL values (in bits), one per batch element. - """ - batch_size = x_start.shape[0] - t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) - qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) - kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) - return mean_flat(kl_prior) / np.log(2.0) - - def p_losses(self, x_start, cond, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - model_output = self.apply_model(x_noisy, t, cond) - - loss_dict = {} - prefix = 'train' if self.training else 'val' - - if self.parameterization == "x0": - target = x_start - elif self.parameterization == "eps": - target = noise - else: - raise NotImplementedError() - - loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) - loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) - - logvar_t = self.logvar[t].to(self.device) - loss = loss_simple / torch.exp(logvar_t) + logvar_t - # loss = loss_simple / torch.exp(self.logvar) + self.logvar - if self.learn_logvar: - loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) - loss_dict.update({'logvar': self.logvar.data.mean()}) - - loss = self.l_simple_weight * loss.mean() - - loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) - loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() - loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) - loss += (self.original_elbo_weight * loss_vlb) - loss_dict.update({f'{prefix}/loss': loss}) - - return loss, loss_dict - - def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, - return_x0=False, score_corrector=None, corrector_kwargs=None): - t_in = t - model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) - - if score_corrector is not None: - assert self.parameterization == "eps" - model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) - - if return_codebook_ids: - model_out, logits = model_out - - if self.parameterization == "eps": - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == "x0": - x_recon = model_out - else: - raise NotImplementedError() - - if clip_denoised: - x_recon.clamp_(-1., 1.) - if quantize_denoised: - x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) - model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) - if return_codebook_ids: - return model_mean, posterior_variance, posterior_log_variance, logits - elif return_x0: - return model_mean, posterior_variance, posterior_log_variance, x_recon - else: - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, - return_codebook_ids=False, quantize_denoised=False, return_x0=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): - b, *_, device = *x.shape, x.device - outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, - return_codebook_ids=return_codebook_ids, - quantize_denoised=quantize_denoised, - return_x0=return_x0, - score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) - if return_codebook_ids: - raise DeprecationWarning("Support dropped.") - model_mean, _, model_log_variance, logits = outputs - elif return_x0: - model_mean, _, model_log_variance, x0 = outputs - else: - model_mean, _, model_log_variance = outputs - - noise = noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) - - if return_codebook_ids: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) - if return_x0: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 - else: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - @torch.no_grad() - def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, - img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., - score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, - log_every_t=None): - if not log_every_t: - log_every_t = self.log_every_t - timesteps = self.num_timesteps - if batch_size is not None: - b = batch_size if batch_size is not None else shape[0] - shape = [batch_size] + list(shape) - else: - b = batch_size = shape[0] - if x_T is None: - img = torch.randn(shape, device=self.device) - else: - img = x_T - intermediates = [] - if cond is not None: - if isinstance(cond, dict): - cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} - else: - cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] - - if start_T is not None: - timesteps = min(timesteps, start_T) - iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', - total=timesteps) if verbose else reversed( - range(0, timesteps)) - if type(temperature) == float: - temperature = [temperature] * timesteps - - for i in iterator: - ts = torch.full((b,), i, device=self.device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) - - img, x0_partial = self.p_sample(img, cond, ts, - clip_denoised=self.clip_denoised, - quantize_denoised=quantize_denoised, return_x0=True, - temperature=temperature[i], noise_dropout=noise_dropout, - score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) - if mask is not None: - assert x0 is not None - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1. - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(x0_partial) - if callback: callback(i) - if img_callback: img_callback(img, i) - return img, intermediates - - @torch.no_grad() - def p_sample_loop(self, cond, shape, return_intermediates=False, - x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, start_T=None, - log_every_t=None): - - if not log_every_t: - log_every_t = self.log_every_t - device = self.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - intermediates = [img] - if timesteps is None: - timesteps = self.num_timesteps - - if start_T is not None: - timesteps = min(timesteps, start_T) - iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( - range(0, timesteps)) - - if mask is not None: - assert x0 is not None - assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match - - for i in iterator: - ts = torch.full((b,), i, device=device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) - - img = self.p_sample(img, cond, ts, - clip_denoised=self.clip_denoised, - quantize_denoised=quantize_denoised) - if mask is not None: - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1. - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(img) - if callback: callback(i) - if img_callback: img_callback(img, i) - - if return_intermediates: - return img, intermediates - return img - - @torch.no_grad() - def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, - verbose=True, timesteps=None, quantize_denoised=False, - mask=None, x0=None, shape=None,**kwargs): - if shape is None: - shape = (batch_size, self.channels, self.image_size, self.image_size) - if cond is not None: - if isinstance(cond, dict): - cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} - else: - cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] - return self.p_sample_loop(cond, - shape, - return_intermediates=return_intermediates, x_T=x_T, - verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, - mask=mask, x0=x0) - - @torch.no_grad() - def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): - - if ddim: - ddim_sampler = DDIMSampler(self) - shape = (self.channels, self.image_size, self.image_size) - samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size, - shape,cond,verbose=False,**kwargs) - - else: - samples, intermediates = self.sample(cond=cond, batch_size=batch_size, - return_intermediates=True,**kwargs) - - return samples, intermediates - - - @torch.no_grad() - def log_images(self, batch, N=4, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, - quantize_denoised=True, inpaint=False, plot_denoise_rows=False, plot_progressive_rows=False, - plot_diffusion_rows=False, **kwargs): - - use_ddim = False - - log = dict() - z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, - return_first_stage_outputs=True, - force_c_encode=True, - return_original_cond=True, - bs=N, uncond=0) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - log["inputs"] = x - log["reals"] = xc["c_concat"] - log["reconstruction"] = xrec - if self.model.conditioning_key is not None: - if hasattr(self.cond_stage_model, "decode"): - xc = self.cond_stage_model.decode(c) - log["conditioning"] = xc - elif self.cond_stage_key in ["caption"]: - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) - log["conditioning"] = xc - elif self.cond_stage_key == 'class_label': - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) - log['conditioning'] = xc - elif isimage(xc): - log["conditioning"] = xc - if ismap(xc): - log["original_conditioning"] = self.to_rgb(xc) - - if plot_diffusion_rows: - # get diffusion row - diffusion_row = list() - z_start = z[:n_row] - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(z_start) - z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) - diffusion_row.append(self.decode_first_stage(z_noisy)) - - diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W - diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') - diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') - diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) - log["diffusion_row"] = diffusion_grid - - if sample: - # get denoise row - with self.ema_scope("Plotting"): - samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, - ddim_steps=ddim_steps,eta=ddim_eta) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) - x_samples = self.decode_first_stage(samples) - log["samples"] = x_samples - if plot_denoise_rows: - denoise_grid = self._get_denoise_row_from_list(z_denoise_row) - log["denoise_row"] = denoise_grid - - if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance( - self.first_stage_model, IdentityFirstStage): - # also display when quantizing x0 while sampling - with self.ema_scope("Plotting Quantized Denoised"): - samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, - ddim_steps=ddim_steps,eta=ddim_eta, - quantize_denoised=True) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, - # quantize_denoised=True) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_x0_quantized"] = x_samples - - if inpaint: - # make a simple center square - b, h, w = z.shape[0], z.shape[2], z.shape[3] - mask = torch.ones(N, h, w).to(self.device) - # zeros will be filled in - mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. - mask = mask[:, None, ...] - with self.ema_scope("Plotting Inpaint"): - - samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta, - ddim_steps=ddim_steps, x0=z[:N], mask=mask) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_inpainting"] = x_samples - log["mask"] = mask - - # outpaint - with self.ema_scope("Plotting Outpaint"): - samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta, - ddim_steps=ddim_steps, x0=z[:N], mask=mask) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_outpainting"] = x_samples - - if plot_progressive_rows: - with self.ema_scope("Plotting Progressives"): - img, progressives = self.progressive_denoising(c, - shape=(self.channels, self.image_size, self.image_size), - batch_size=N) - prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") - log["progressive_row"] = prog_row - - if return_keys: - if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: - return log - else: - return {key: log[key] for key in return_keys} - return log - - def configure_optimizers(self): - lr = self.learning_rate - params = list(self.model.parameters()) - if self.cond_stage_trainable: - print(f"{self.__class__.__name__}: Also optimizing conditioner params!") - params = params + list(self.cond_stage_model.parameters()) - if self.learn_logvar: - print('Diffusion model optimizing logvar') - params.append(self.logvar) - opt = torch.optim.AdamW(params, lr=lr) - if self.use_scheduler: - assert 'target' in self.scheduler_config - scheduler = instantiate_from_config(self.scheduler_config) - - print("Setting up LambdaLR scheduler...") - scheduler = [ - { - 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }] - return [opt], scheduler - return opt - - @torch.no_grad() - def to_rgb(self, x): - x = x.float() - if not hasattr(self, "colorize"): - self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) - x = nn.functional.conv2d(x, weight=self.colorize) - x = 2. * (x - x.min()) / (x.max() - x.min()) - 1. - return x - - -class DiffusionWrapper(pl.LightningModule): - def __init__(self, diff_model_config, conditioning_key): - super().__init__() - self.diffusion_model = instantiate_from_config(diff_model_config) - self.conditioning_key = conditioning_key - assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm'] - - def forward(self, x, t, c_concat: list = None, c_crossattn: list = None): - if self.conditioning_key is None: - out = self.diffusion_model(x, t) - elif self.conditioning_key == 'concat': - xc = torch.cat([x] + c_concat, dim=1) - out = self.diffusion_model(xc, t) - elif self.conditioning_key == 'crossattn': - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(x, t, context=cc) - elif self.conditioning_key == 'hybrid': - xc = torch.cat([x] + c_concat, dim=1) - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(xc, t, context=cc) - elif self.conditioning_key == 'adm': - cc = c_crossattn[0] - out = self.diffusion_model(x, t, y=cc) - else: - raise NotImplementedError() - - return out - - -class Layout2ImgDiffusion(LatentDiffusion): - # TODO: move all layout-specific hacks to this class - def __init__(self, cond_stage_key, *args, **kwargs): - assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"' - super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs) - - def log_images(self, batch, N=8, *args, **kwargs): - logs = super().log_images(batch=batch, N=N, *args, **kwargs) - - key = 'train' if self.training else 'validation' - dset = self.trainer.datamodule.datasets[key] - mapper = dset.conditional_builders[self.cond_stage_key] - - bbox_imgs = [] - map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno)) - for tknzd_bbox in batch[self.cond_stage_key][:N]: - bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256)) - bbox_imgs.append(bboximg) - - cond_img = torch.stack(bbox_imgs, dim=0) - logs['bbox_image'] = cond_img - return logs diff --git a/spaces/james-oldfield/PandA/networks/genforce/datasets/transforms.py b/spaces/james-oldfield/PandA/networks/genforce/datasets/transforms.py deleted file mode 100644 index bae766a4634ebb3c6e50f3727f59f55bb350ec5a..0000000000000000000000000000000000000000 --- a/spaces/james-oldfield/PandA/networks/genforce/datasets/transforms.py +++ /dev/null @@ -1,201 +0,0 @@ -"""Contains transform functions.""" - -import cv2 -import numpy as np -import PIL.Image - -import torch -import torch.nn as nn -import torch.nn.functional as F - - -__all__ = [ - 'crop_resize_image', 'progressive_resize_image', 'resize_image', - 'normalize_image', 'normalize_latent_code', 'ImageResizing', - 'ImageNormalization', 'LatentCodeNormalization', -] - - -def crop_resize_image(image, size): - """Crops a square patch and then resizes it to the given size. - - Args: - image: The input image to crop and resize. - size: An integer, indicating the target size. - - Returns: - An image with target size. - - Raises: - TypeError: If the input `image` is not with type `numpy.ndarray`. - ValueError: If the input `image` is not with shape [H, W, C]. - """ - if not isinstance(image, np.ndarray): - raise TypeError(f'Input image should be with type `numpy.ndarray`, ' - f'but `{type(image)}` is received!') - if image.ndim != 3: - raise ValueError(f'Input image should be with shape [H, W, C], ' - f'but `{image.shape}` is received!') - - height, width, channel = image.shape - short_side = min(height, width) - image = image[(height - short_side) // 2:(height + short_side) // 2, - (width - short_side) // 2:(width + short_side) // 2] - pil_image = PIL.Image.fromarray(image) - pil_image = pil_image.resize((size, size), PIL.Image.ANTIALIAS) - image = np.asarray(pil_image) - assert image.shape == (size, size, channel) - return image - - -def progressive_resize_image(image, size): - """Resizes image to target size progressively. - - Different from normal resize, this function will reduce the image size - progressively. In each step, the maximum reduce factor is 2. - - NOTE: This function can only handle square images, and can only be used for - downsampling. - - Args: - image: The input (square) image to resize. - size: An integer, indicating the target size. - - Returns: - An image with target size. - - Raises: - TypeError: If the input `image` is not with type `numpy.ndarray`. - ValueError: If the input `image` is not with shape [H, W, C]. - """ - if not isinstance(image, np.ndarray): - raise TypeError(f'Input image should be with type `numpy.ndarray`, ' - f'but `{type(image)}` is received!') - if image.ndim != 3: - raise ValueError(f'Input image should be with shape [H, W, C], ' - f'but `{image.shape}` is received!') - - height, width, channel = image.shape - assert height == width - assert height >= size - num_iters = int(np.log2(height) - np.log2(size)) - for _ in range(num_iters): - height = max(height // 2, size) - image = cv2.resize(image, (height, height), - interpolation=cv2.INTER_LINEAR) - assert image.shape == (size, size, channel) - return image - - -def resize_image(image, size): - """Resizes image to target size. - - NOTE: We use adaptive average pooing for image resizing. Instead of bilinear - interpolation, average pooling is able to acquire information from more - pixels, such that the resized results can be with higher quality. - - Args: - image: The input image tensor, with shape [C, H, W], to resize. - size: An integer or a tuple of integer, indicating the target size. - - Returns: - An image tensor with target size. - - Raises: - TypeError: If the input `image` is not with type `torch.Tensor`. - ValueError: If the input `image` is not with shape [C, H, W]. - """ - if not isinstance(image, torch.Tensor): - raise TypeError(f'Input image should be with type `torch.Tensor`, ' - f'but `{type(image)}` is received!') - if image.ndim != 3: - raise ValueError(f'Input image should be with shape [C, H, W], ' - f'but `{image.shape}` is received!') - - image = F.adaptive_avg_pool2d(image.unsqueeze(0), size).squeeze(0) - return image - - -def normalize_image(image, mean=127.5, std=127.5): - """Normalizes image by subtracting mean and dividing std. - - Args: - image: The input image tensor to normalize. - mean: The mean value to subtract from the input tensor. (default: 127.5) - std: The standard deviation to normalize the input tensor. (default: - 127.5) - - Returns: - A normalized image tensor. - - Raises: - TypeError: If the input `image` is not with type `torch.Tensor`. - """ - if not isinstance(image, torch.Tensor): - raise TypeError(f'Input image should be with type `torch.Tensor`, ' - f'but `{type(image)}` is received!') - out = (image - mean) / std - return out - - -def normalize_latent_code(latent_code, adjust_norm=True): - """Normalizes latent code. - - NOTE: The latent code will always be normalized along the last axis. - Meanwhile, if `adjust_norm` is set as `True`, the norm of the result will be - adjusted to `sqrt(latent_code.shape[-1])` in order to avoid too small value. - - Args: - latent_code: The input latent code tensor to normalize. - adjust_norm: Whether to adjust the norm of the output. (default: True) - - Returns: - A normalized latent code tensor. - - Raises: - TypeError: If the input `latent_code` is not with type `torch.Tensor`. - """ - if not isinstance(latent_code, torch.Tensor): - raise TypeError(f'Input latent code should be with type ' - f'`torch.Tensor`, but `{type(latent_code)}` is ' - f'received!') - dim = latent_code.shape[-1] - norm = latent_code.pow(2).sum(-1, keepdim=True).pow(0.5) - out = latent_code / norm - if adjust_norm: - out = out * (dim ** 0.5) - return out - - -class ImageResizing(nn.Module): - """Implements the image resizing layer.""" - - def __init__(self, size): - super().__init__() - self.size = size - - def forward(self, image): - return resize_image(image, self.size) - - -class ImageNormalization(nn.Module): - """Implements the image normalization layer.""" - - def __init__(self, mean=127.5, std=127.5): - super().__init__() - self.mean = mean - self.std = std - - def forward(self, image): - return normalize_image(image, self.mean, self.std) - - -class LatentCodeNormalization(nn.Module): - """Implements the latent code normalization layer.""" - - def __init__(self, adjust_norm=True): - super().__init__() - self.adjust_norm = adjust_norm - - def forward(self, latent_code): - return normalize_latent_code(latent_code, self.adjust_norm) diff --git a/spaces/jbilcke-hf/Panoremix/src/app/firehose/delete.tsx b/spaces/jbilcke-hf/Panoremix/src/app/firehose/delete.tsx deleted file mode 100644 index 267d84e8f6eb723daac48d8bf2b6176ccc0b7e76..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/Panoremix/src/app/firehose/delete.tsx +++ /dev/null @@ -1,60 +0,0 @@ -import { startTransition, useEffect, useState } from "react" - -import { Button } from "@/components/ui/button" -import { Dialog, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, DialogTrigger } from "@/components/ui/dialog" -import { cn } from "@/lib/utils" -import { Post } from "@/types" - -import { deletePost } from "../engine/community" - -export function Delete({ post, moderationKey = "", onDelete = () => {} }: { post?: Post, moderationKey?: string; onDelete: (post: Post) => void }) { - const [isOpen, setOpen] = useState(false) - - useEffect(() => { - if (post?.postId && !isOpen) { - setOpen(true) - } - }, [post?.postId]) - - const handleDelete = () => { - startTransition(() => { - const fn = async () => { - setOpen(false) - if (!post) { return } - const postId = post.postId - await deletePost({ postId, moderationKey }) - onDelete(post) - } - fn() - }) - } - - return ( - - - - Delete - - {post ?
- -
- -
-
{post.prompt}
-
: null} - -
- - -
-
-
-
- ) -} \ No newline at end of file diff --git a/spaces/jbilcke-hf/observer/src/components/ui/accordion.tsx b/spaces/jbilcke-hf/observer/src/components/ui/accordion.tsx deleted file mode 100644 index 937620af27e5d8ef577f0baca229a9b753ebd017..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/observer/src/components/ui/accordion.tsx +++ /dev/null @@ -1,60 +0,0 @@ -"use client" - -import * as React from "react" -import * as AccordionPrimitive from "@radix-ui/react-accordion" -import { ChevronDown } from "lucide-react" - -import { cn } from "@/lib/utils" - -const Accordion = AccordionPrimitive.Root - -const AccordionItem = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AccordionItem.displayName = "AccordionItem" - -const AccordionTrigger = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - svg]:rotate-180", - className - )} - {...props} - > - {children} - - - -)) -AccordionTrigger.displayName = AccordionPrimitive.Trigger.displayName - -const AccordionContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - -
{children}
-
-)) -AccordionContent.displayName = AccordionPrimitive.Content.displayName - -export { Accordion, AccordionItem, AccordionTrigger, AccordionContent } diff --git a/spaces/jbraun19/Webcam-Object-Recognition-Yolo-n-Coco/custom_layers.py b/spaces/jbraun19/Webcam-Object-Recognition-Yolo-n-Coco/custom_layers.py deleted file mode 100644 index 0c684d5acbce3fa50107e4e41f9055cacda9f06d..0000000000000000000000000000000000000000 --- a/spaces/jbraun19/Webcam-Object-Recognition-Yolo-n-Coco/custom_layers.py +++ /dev/null @@ -1,298 +0,0 @@ -import tensorflow as tf -from tensorflow.keras import layers, initializers, models - - -def conv(x, filters, kernel_size, downsampling=False, activation='leaky', batch_norm=True): - def mish(x): - return x * tf.math.tanh(tf.math.softplus(x)) - - if downsampling: - x = layers.ZeroPadding2D(padding=((1, 0), (1, 0)))(x) # top & left padding - padding = 'valid' - strides = 2 - else: - padding = 'same' - strides = 1 - x = layers.Conv2D(filters, - kernel_size, - strides=strides, - padding=padding, - use_bias=not batch_norm, - # kernel_regularizer=regularizers.l2(0.0005), - kernel_initializer=initializers.RandomNormal(mean=0.0, stddev=0.01), - # bias_initializer=initializers.Zeros() - )(x) - if batch_norm: - x = layers.BatchNormalization()(x) - if activation == 'mish': - x = mish(x) - elif activation == 'leaky': - x = layers.LeakyReLU(alpha=0.1)(x) - return x - - -def residual_block(x, filters1, filters2, activation='leaky'): - """ - :param x: input tensor - :param filters1: num of filter for 1x1 conv - :param filters2: num of filter for 3x3 conv - :param activation: default activation function: leaky relu - :return: - """ - y = conv(x, filters1, kernel_size=1, activation=activation) - y = conv(y, filters2, kernel_size=3, activation=activation) - return layers.Add()([x, y]) - - -def csp_block(x, residual_out, repeat, residual_bottleneck=False): - """ - Cross Stage Partial Network (CSPNet) - transition_bottleneck_dims: 1x1 bottleneck - output_dims: 3x3 - :param x: - :param residual_out: - :param repeat: - :param residual_bottleneck: - :return: - """ - route = x - route = conv(route, residual_out, 1, activation="mish") - x = conv(x, residual_out, 1, activation="mish") - for i in range(repeat): - x = residual_block(x, - residual_out // 2 if residual_bottleneck else residual_out, - residual_out, - activation="mish") - x = conv(x, residual_out, 1, activation="mish") - - x = layers.Concatenate()([x, route]) - return x - - -def darknet53(x): - x = conv(x, 32, 3) - x = conv(x, 64, 3, downsampling=True) - - for i in range(1): - x = residual_block(x, 32, 64) - x = conv(x, 128, 3, downsampling=True) - - for i in range(2): - x = residual_block(x, 64, 128) - x = conv(x, 256, 3, downsampling=True) - - for i in range(8): - x = residual_block(x, 128, 256) - route_1 = x - x = conv(x, 512, 3, downsampling=True) - - for i in range(8): - x = residual_block(x, 256, 512) - route_2 = x - x = conv(x, 1024, 3, downsampling=True) - - for i in range(4): - x = residual_block(x, 512, 1024) - - return route_1, route_2, x - - -def cspdarknet53(input): - x = conv(input, 32, 3) - x = conv(x, 64, 3, downsampling=True) - - x = csp_block(x, residual_out=64, repeat=1, residual_bottleneck=True) - x = conv(x, 64, 1, activation='mish') - x = conv(x, 128, 3, activation='mish', downsampling=True) - - x = csp_block(x, residual_out=64, repeat=2) - x = conv(x, 128, 1, activation='mish') - x = conv(x, 256, 3, activation='mish', downsampling=True) - - x = csp_block(x, residual_out=128, repeat=8) - x = conv(x, 256, 1, activation='mish') - route0 = x - x = conv(x, 512, 3, activation='mish', downsampling=True) - - x = csp_block(x, residual_out=256, repeat=8) - x = conv(x, 512, 1, activation='mish') - route1 = x - x = conv(x, 1024, 3, activation='mish', downsampling=True) - - x = csp_block(x, residual_out=512, repeat=4) - - x = conv(x, 1024, 1, activation="mish") - - x = conv(x, 512, 1) - x = conv(x, 1024, 3) - x = conv(x, 512, 1) - - x = layers.Concatenate()([layers.MaxPooling2D(pool_size=13, strides=1, padding='same')(x), - layers.MaxPooling2D(pool_size=9, strides=1, padding='same')(x), - layers.MaxPooling2D(pool_size=5, strides=1, padding='same')(x), - x - ]) - x = conv(x, 512, 1) - x = conv(x, 1024, 3) - route2 = conv(x, 512, 1) - return models.Model(input, [route0, route1, route2]) - - -def yolov4_neck(x, num_classes): - backbone_model = cspdarknet53(x) - route0, route1, route2 = backbone_model.output - - route_input = route2 - x = conv(route2, 256, 1) - x = layers.UpSampling2D()(x) - route1 = conv(route1, 256, 1) - x = layers.Concatenate()([route1, x]) - - x = conv(x, 256, 1) - x = conv(x, 512, 3) - x = conv(x, 256, 1) - x = conv(x, 512, 3) - x = conv(x, 256, 1) - - route1 = x - x = conv(x, 128, 1) - x = layers.UpSampling2D()(x) - route0 = conv(route0, 128, 1) - x = layers.Concatenate()([route0, x]) - - x = conv(x, 128, 1) - x = conv(x, 256, 3) - x = conv(x, 128, 1) - x = conv(x, 256, 3) - x = conv(x, 128, 1) - - route0 = x - x = conv(x, 256, 3) - conv_sbbox = conv(x, 3 * (num_classes + 5), 1, activation=None, batch_norm=False) - - x = conv(route0, 256, 3, downsampling=True) - x = layers.Concatenate()([x, route1]) - - x = conv(x, 256, 1) - x = conv(x, 512, 3) - x = conv(x, 256, 1) - x = conv(x, 512, 3) - x = conv(x, 256, 1) - - route1 = x - x = conv(x, 512, 3) - conv_mbbox = conv(x, 3 * (num_classes + 5), 1, activation=None, batch_norm=False) - - x = conv(route1, 512, 3, downsampling=True) - x = layers.Concatenate()([x, route_input]) - - x = conv(x, 512, 1) - x = conv(x, 1024, 3) - x = conv(x, 512, 1) - x = conv(x, 1024, 3) - x = conv(x, 512, 1) - - x = conv(x, 1024, 3) - conv_lbbox = conv(x, 3 * (num_classes + 5), 1, activation=None, batch_norm=False) - - return [conv_sbbox, conv_mbbox, conv_lbbox] - - -def yolov4_head(yolo_neck_outputs, classes, anchors, xyscale): - bbox0, object_probability0, class_probabilities0, pred_box0 = get_boxes(yolo_neck_outputs[0], - anchors=anchors[0, :, :], classes=classes, - grid_size=52, strides=8, - xyscale=xyscale[0]) - bbox1, object_probability1, class_probabilities1, pred_box1 = get_boxes(yolo_neck_outputs[1], - anchors=anchors[1, :, :], classes=classes, - grid_size=26, strides=16, - xyscale=xyscale[1]) - bbox2, object_probability2, class_probabilities2, pred_box2 = get_boxes(yolo_neck_outputs[2], - anchors=anchors[2, :, :], classes=classes, - grid_size=13, strides=32, - xyscale=xyscale[2]) - x = [bbox0, object_probability0, class_probabilities0, pred_box0, - bbox1, object_probability1, class_probabilities1, pred_box1, - bbox2, object_probability2, class_probabilities2, pred_box2] - - return x - - -def get_boxes(pred, anchors, classes, grid_size, strides, xyscale): - """ - - :param pred: - :param anchors: - :param classes: - :param grid_size: - :param strides: - :param xyscale: - :return: - """ - pred = tf.reshape(pred, - (tf.shape(pred)[0], - grid_size, - grid_size, - 3, - 5 + classes)) # (batch_size, grid_size, grid_size, 3, 5+classes) - box_xy, box_wh, obj_prob, class_prob = tf.split( - pred, (2, 2, 1, classes), axis=-1 - ) # (?, 52, 52, 3, 2) (?, 52, 52, 3, 2) (?, 52, 52, 3, 1) (?, 52, 52, 3, 80) - - box_xy = tf.sigmoid(box_xy) # (?, 52, 52, 3, 2) - obj_prob = tf.sigmoid(obj_prob) # (?, 52, 52, 3, 1) - class_prob = tf.sigmoid(class_prob) # (?, 52, 52, 3, 80) - pred_box_xywh = tf.concat((box_xy, box_wh), axis=-1) # (?, 52, 52, 3, 4) - - grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size)) # (52, 52) (52, 52) - grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # (52, 52, 1, 2) - grid = tf.cast(grid, dtype=tf.float32) - - box_xy = ((box_xy * xyscale) - 0.5 * (xyscale - 1) + grid) * strides # (?, 52, 52, 1, 4) - - box_wh = tf.exp(box_wh) * anchors # (?, 52, 52, 3, 2) - box_x1y1 = box_xy - box_wh / 2 # (?, 52, 52, 3, 2) - box_x2y2 = box_xy + box_wh / 2 # (?, 52, 52, 3, 2) - pred_box_x1y1x2y2 = tf.concat([box_x1y1, box_x2y2], axis=-1) # (?, 52, 52, 3, 4) - return pred_box_x1y1x2y2, obj_prob, class_prob, pred_box_xywh - # pred_box_x1y1x2y2: absolute xy value - - -def nms(model_ouputs, input_shape, num_class, iou_threshold=0.413, score_threshold=0.3): - """ - Apply Non-Maximum suppression - ref: https://www.tensorflow.org/api_docs/python/tf/image/combined_non_max_suppression - :param model_ouputs: yolo model model_ouputs - :param input_shape: size of input image - :return: nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections - """ - bs = tf.shape(model_ouputs[0])[0] - boxes = tf.zeros((bs, 0, 4)) - confidence = tf.zeros((bs, 0, 1)) - class_probabilities = tf.zeros((bs, 0, num_class)) - - for output_idx in range(0, len(model_ouputs), 4): - output_xy = model_ouputs[output_idx] - output_conf = model_ouputs[output_idx + 1] - output_classes = model_ouputs[output_idx + 2] - boxes = tf.concat([boxes, tf.reshape(output_xy, (bs, -1, 4))], axis=1) - confidence = tf.concat([confidence, tf.reshape(output_conf, (bs, -1, 1))], axis=1) - class_probabilities = tf.concat([class_probabilities, tf.reshape(output_classes, (bs, -1, num_class))], axis=1) - - scores = confidence * class_probabilities - boxes = tf.expand_dims(boxes, axis=-2) - boxes = boxes / input_shape[0] # box normalization: relative img size - print(f'nms iou: {iou_threshold} score: {score_threshold}') - (nmsed_boxes, # [bs, max_detections, 4] - nmsed_scores, # [bs, max_detections] - nmsed_classes, # [bs, max_detections] - valid_detections # [batch_size] - ) = tf.image.combined_non_max_suppression( - boxes=boxes, # y1x1, y2x2 [0~1] - scores=scores, - max_output_size_per_class=100, - max_total_size=100, # max_boxes: Maximum nmsed_boxes in a single img. - iou_threshold=iou_threshold, # iou_threshold: Minimum overlap that counts as a valid detection. - score_threshold=score_threshold, # # Minimum confidence that counts as a valid detection. - ) - return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections \ No newline at end of file diff --git a/spaces/jcenaa/Segment-Any-RGBD/datasets/scannet_preprocess/scannet_pair/reader.py b/spaces/jcenaa/Segment-Any-RGBD/datasets/scannet_preprocess/scannet_pair/reader.py deleted file mode 100644 index 3077ec828988957c95fa85e8399ed382273a0257..0000000000000000000000000000000000000000 --- a/spaces/jcenaa/Segment-Any-RGBD/datasets/scannet_preprocess/scannet_pair/reader.py +++ /dev/null @@ -1,27 +0,0 @@ -import argparse -import os, sys - -from SensorData import SensorData - - -def reader(filename, - output_path, - frame_skip, - export_color_images=False, - export_depth_images=False, - export_poses=False, - export_intrinsics=False): - if not os.path.exists(output_path): - os.makedirs(output_path) - - # load the data - print('loading %s...' % filename) - sd = SensorData(filename) - if export_depth_images: - sd.export_depth_images(os.path.join(output_path, 'depth'), frame_skip=frame_skip) - if export_color_images: - sd.export_color_images(os.path.join(output_path, 'color'), frame_skip=frame_skip) - if export_poses: - sd.export_poses(os.path.join(output_path, 'pose'), frame_skip=frame_skip) - if export_intrinsics: - sd.export_intrinsics(os.path.join(output_path, 'intrinsic')) diff --git a/spaces/jiawei011/dreamgaussian/scripts/run.sh b/spaces/jiawei011/dreamgaussian/scripts/run.sh deleted file mode 100644 index 09bceb7674dfa8b365c0a0e6e6b59fb13b484cd4..0000000000000000000000000000000000000000 --- a/spaces/jiawei011/dreamgaussian/scripts/run.sh +++ /dev/null @@ -1,5 +0,0 @@ -export CUDA_VISIBLE_DEVICES=5 - -python main.py --config configs/image.yaml input=data/anya_rgba.png save_path=anya -python main2.py --config configs/image.yaml input=data/anya_rgba.png save_path=anya -python -m kiui.render logs/anya.obj --save_video videos/anya.mp4 --wogui diff --git a/spaces/jmyungjoon/cartoon/README.md b/spaces/jmyungjoon/cartoon/README.md deleted file mode 100644 index 18be91e4aa39a5efe16124ebd447869b73632bde..0000000000000000000000000000000000000000 --- a/spaces/jmyungjoon/cartoon/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Cartoon -emoji: 🌖 -colorFrom: yellow -colorTo: blue -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/dnssecalgs/cryptography.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/dnssecalgs/cryptography.py deleted file mode 100644 index 5a31a8123db92080e9976795b2350dacbdc65abb..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/dnssecalgs/cryptography.py +++ /dev/null @@ -1,68 +0,0 @@ -from typing import Any, Optional, Type - -from cryptography.hazmat.primitives import serialization - -from dns.dnssecalgs.base import GenericPrivateKey, GenericPublicKey -from dns.exception import AlgorithmKeyMismatch - - -class CryptographyPublicKey(GenericPublicKey): - key: Any = None - key_cls: Any = None - - def __init__(self, key: Any) -> None: # pylint: disable=super-init-not-called - if self.key_cls is None: - raise TypeError("Undefined private key class") - if not isinstance( # pylint: disable=isinstance-second-argument-not-valid-type - key, self.key_cls - ): - raise AlgorithmKeyMismatch - self.key = key - - @classmethod - def from_pem(cls, public_pem: bytes) -> "GenericPublicKey": - key = serialization.load_pem_public_key(public_pem) - return cls(key=key) - - def to_pem(self) -> bytes: - return self.key.public_bytes( - encoding=serialization.Encoding.PEM, - format=serialization.PublicFormat.SubjectPublicKeyInfo, - ) - - -class CryptographyPrivateKey(GenericPrivateKey): - key: Any = None - key_cls: Any = None - public_cls: Type[CryptographyPublicKey] - - def __init__(self, key: Any) -> None: # pylint: disable=super-init-not-called - if self.key_cls is None: - raise TypeError("Undefined private key class") - if not isinstance( # pylint: disable=isinstance-second-argument-not-valid-type - key, self.key_cls - ): - raise AlgorithmKeyMismatch - self.key = key - - def public_key(self) -> "CryptographyPublicKey": - return self.public_cls(key=self.key.public_key()) - - @classmethod - def from_pem( - cls, private_pem: bytes, password: Optional[bytes] = None - ) -> "GenericPrivateKey": - key = serialization.load_pem_private_key(private_pem, password=password) - return cls(key=key) - - def to_pem(self, password: Optional[bytes] = None) -> bytes: - encryption_algorithm: serialization.KeySerializationEncryption - if password: - encryption_algorithm = serialization.BestAvailableEncryption(password) - else: - encryption_algorithm = serialization.NoEncryption() - return self.key.private_bytes( - encoding=serialization.Encoding.PEM, - format=serialization.PrivateFormat.PKCS8, - encryption_algorithm=encryption_algorithm, - ) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/rdtypes/ANY/ISDN.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/rdtypes/ANY/ISDN.py deleted file mode 100644 index 536a35d61e841352d33431818364a8913ae5f5d6..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/rdtypes/ANY/ISDN.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import struct - -import dns.exception -import dns.immutable -import dns.rdata -import dns.tokenizer - - -@dns.immutable.immutable -class ISDN(dns.rdata.Rdata): - - """ISDN record""" - - # see: RFC 1183 - - __slots__ = ["address", "subaddress"] - - def __init__(self, rdclass, rdtype, address, subaddress): - super().__init__(rdclass, rdtype) - self.address = self._as_bytes(address, True, 255) - self.subaddress = self._as_bytes(subaddress, True, 255) - - def to_text(self, origin=None, relativize=True, **kw): - if self.subaddress: - return '"{}" "{}"'.format( - dns.rdata._escapify(self.address), dns.rdata._escapify(self.subaddress) - ) - else: - return '"%s"' % dns.rdata._escapify(self.address) - - @classmethod - def from_text( - cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None - ): - address = tok.get_string() - tokens = tok.get_remaining(max_tokens=1) - if len(tokens) >= 1: - subaddress = tokens[0].unescape().value - else: - subaddress = "" - return cls(rdclass, rdtype, address, subaddress) - - def _to_wire(self, file, compress=None, origin=None, canonicalize=False): - l = len(self.address) - assert l < 256 - file.write(struct.pack("!B", l)) - file.write(self.address) - l = len(self.subaddress) - if l > 0: - assert l < 256 - file.write(struct.pack("!B", l)) - file.write(self.subaddress) - - @classmethod - def from_wire_parser(cls, rdclass, rdtype, parser, origin=None): - address = parser.get_counted_bytes() - if parser.remaining() > 0: - subaddress = parser.get_counted_bytes() - else: - subaddress = b"" - return cls(rdclass, rdtype, address, subaddress) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/D_S_I_G_.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/D_S_I_G_.py deleted file mode 100644 index d902a29080aff5a275f530c7658d3c9eb4498034..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/D_S_I_G_.py +++ /dev/null @@ -1,151 +0,0 @@ -from fontTools.misc.textTools import bytesjoin, strjoin, tobytes, tostr, safeEval -from fontTools.misc import sstruct -from . import DefaultTable -import base64 - -DSIG_HeaderFormat = """ - > # big endian - ulVersion: L - usNumSigs: H - usFlag: H -""" -# followed by an array of usNumSigs DSIG_Signature records -DSIG_SignatureFormat = """ - > # big endian - ulFormat: L - ulLength: L # length includes DSIG_SignatureBlock header - ulOffset: L -""" -# followed by an array of usNumSigs DSIG_SignatureBlock records, -# each followed immediately by the pkcs7 bytes -DSIG_SignatureBlockFormat = """ - > # big endian - usReserved1: H - usReserved2: H - cbSignature: l # length of following raw pkcs7 data -""" - -# -# NOTE -# the DSIG table format allows for SignatureBlocks residing -# anywhere in the table and possibly in a different order as -# listed in the array after the first table header -# -# this implementation does not keep track of any gaps and/or data -# before or after the actual signature blocks while decompiling, -# and puts them in the same physical order as listed in the header -# on compilation with no padding whatsoever. -# - - -class table_D_S_I_G_(DefaultTable.DefaultTable): - def decompile(self, data, ttFont): - dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self) - assert self.ulVersion == 1, "DSIG ulVersion must be 1" - assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0" - self.signatureRecords = sigrecs = [] - for n in range(self.usNumSigs): - sigrec, newData = sstruct.unpack2( - DSIG_SignatureFormat, newData, SignatureRecord() - ) - assert sigrec.ulFormat == 1, ( - "DSIG signature record #%d ulFormat must be 1" % n - ) - sigrecs.append(sigrec) - for sigrec in sigrecs: - dummy, newData = sstruct.unpack2( - DSIG_SignatureBlockFormat, data[sigrec.ulOffset :], sigrec - ) - assert sigrec.usReserved1 == 0, ( - "DSIG signature record #%d usReserverd1 must be 0" % n - ) - assert sigrec.usReserved2 == 0, ( - "DSIG signature record #%d usReserverd2 must be 0" % n - ) - sigrec.pkcs7 = newData[: sigrec.cbSignature] - - def compile(self, ttFont): - packed = sstruct.pack(DSIG_HeaderFormat, self) - headers = [packed] - offset = len(packed) + self.usNumSigs * sstruct.calcsize(DSIG_SignatureFormat) - data = [] - for sigrec in self.signatureRecords: - # first pack signature block - sigrec.cbSignature = len(sigrec.pkcs7) - packed = sstruct.pack(DSIG_SignatureBlockFormat, sigrec) + sigrec.pkcs7 - data.append(packed) - # update redundant length field - sigrec.ulLength = len(packed) - # update running table offset - sigrec.ulOffset = offset - headers.append(sstruct.pack(DSIG_SignatureFormat, sigrec)) - offset += sigrec.ulLength - if offset % 2: - # Pad to even bytes - data.append(b"\0") - return bytesjoin(headers + data) - - def toXML(self, xmlWriter, ttFont): - xmlWriter.comment( - "note that the Digital Signature will be invalid after recompilation!" - ) - xmlWriter.newline() - xmlWriter.simpletag( - "tableHeader", - version=self.ulVersion, - numSigs=self.usNumSigs, - flag="0x%X" % self.usFlag, - ) - for sigrec in self.signatureRecords: - xmlWriter.newline() - sigrec.toXML(xmlWriter, ttFont) - xmlWriter.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "tableHeader": - self.signatureRecords = [] - self.ulVersion = safeEval(attrs["version"]) - self.usNumSigs = safeEval(attrs["numSigs"]) - self.usFlag = safeEval(attrs["flag"]) - return - if name == "SignatureRecord": - sigrec = SignatureRecord() - sigrec.fromXML(name, attrs, content, ttFont) - self.signatureRecords.append(sigrec) - - -pem_spam = lambda l, spam={ - "-----BEGIN PKCS7-----": True, - "-----END PKCS7-----": True, - "": True, -}: not spam.get(l.strip()) - - -def b64encode(b): - s = base64.b64encode(b) - # Line-break at 76 chars. - items = [] - while s: - items.append(tostr(s[:76])) - items.append("\n") - s = s[76:] - return strjoin(items) - - -class SignatureRecord(object): - def __repr__(self): - return "<%s: %s>" % (self.__class__.__name__, self.__dict__) - - def toXML(self, writer, ttFont): - writer.begintag(self.__class__.__name__, format=self.ulFormat) - writer.newline() - writer.write_noindent("-----BEGIN PKCS7-----\n") - writer.write_noindent(b64encode(self.pkcs7)) - writer.write_noindent("-----END PKCS7-----\n") - writer.endtag(self.__class__.__name__) - - def fromXML(self, name, attrs, content, ttFont): - self.ulFormat = safeEval(attrs["format"]) - self.usReserved1 = safeEval(attrs.get("reserved1", "0")) - self.usReserved2 = safeEval(attrs.get("reserved2", "0")) - self.pkcs7 = base64.b64decode(tobytes(strjoin(filter(pem_spam, content)))) diff --git a/spaces/jskalbg/ChatDev01/camel/prompts/translation.py b/spaces/jskalbg/ChatDev01/camel/prompts/translation.py deleted file mode 100644 index 40993a1dd2d1d83cfe68ed2368de107d5d570fd9..0000000000000000000000000000000000000000 --- a/spaces/jskalbg/ChatDev01/camel/prompts/translation.py +++ /dev/null @@ -1,42 +0,0 @@ -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -# Licensed under the Apache License, Version 2.0 (the “License”); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an “AS IS” BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -from typing import Any - -from camel.prompts import TextPrompt, TextPromptDict -from camel.typing import RoleType - - -# flake8: noqa :E501 -class TranslationPromptTemplateDict(TextPromptDict): - r"""A dictionary containing :obj:`TextPrompt` used in the `Translation` - task. - - Attributes: - ASSISTANT_PROMPT (TextPrompt): A system prompt for the AI assistant - that outlines the rules of the conversation and provides - instructions for completing tasks. - """ - ASSISTANT_PROMPT = TextPrompt( - """You are an expert English to {language} translator. -Your sole purpose is to accurately translate any text presented to you from English to {language}. -Please provide the {language} translation for the given text. -If you are presented with an empty string, simply return an empty string as the translation. -Only text in between ```TEXT``` should not be translated. -Do not provide any explanation. Just provide a translation.""") - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.update({ - RoleType.ASSISTANT: self.ASSISTANT_PROMPT, - }) diff --git a/spaces/kevinwang676/Bark-New-Version/README.md b/spaces/kevinwang676/Bark-New-Version/README.md deleted file mode 100644 index 7571b0387dcfbbd198f3cb110f30baf3fd423c8d..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/Bark-New-Version/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Bark New Version -emoji: 👁 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.28.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kevinwang676/Bert-VITS2/text/japanese.py b/spaces/kevinwang676/Bert-VITS2/text/japanese.py deleted file mode 100644 index ddedafa0c5b7986068dc6c91637a86febc3923a9..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/Bert-VITS2/text/japanese.py +++ /dev/null @@ -1,104 +0,0 @@ -# modified from https://github.com/CjangCjengh/vits/blob/main/text/japanese.py -import re -import sys - -import pyopenjtalk - -from text import symbols - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile( - r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile( - r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (symbol, Japanese) pairs for marks: -_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('%', 'パーセント') -]] - - -# List of (consonant, sokuon) pairs: -_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'Q([↑↓]*[kg])', r'k#\1'), - (r'Q([↑↓]*[tdjʧ])', r't#\1'), - (r'Q([↑↓]*[sʃ])', r's\1'), - (r'Q([↑↓]*[pb])', r'p#\1') -]] - -# List of (consonant, hatsuon) pairs: -_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'N([↑↓]*[pbm])', r'm\1'), - (r'N([↑↓]*[ʧʥj])', r'n^\1'), - (r'N([↑↓]*[tdn])', r'n\1'), - (r'N([↑↓]*[kg])', r'ŋ\1') -]] - - - -def post_replace_ph(ph): - rep_map = { - ':': ',', - ';': ',', - ',': ',', - '。': '.', - '!': '!', - '?': '?', - '\n': '.', - "·": ",", - '、': ",", - '...': '…', - 'v': "V" - } - if ph in rep_map.keys(): - ph = rep_map[ph] - if ph in symbols: - return ph - if ph not in symbols: - ph = 'UNK' - return ph - -def symbols_to_japanese(text): - for regex, replacement in _symbols_to_japanese: - text = re.sub(regex, replacement, text) - return text - - -def preprocess_jap(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - text = symbols_to_japanese(text) - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = [] - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - p = pyopenjtalk.g2p(sentence) - text += p.split(" ") - - if i < len(marks): - text += [marks[i].replace(' ', '')] - return text - -def text_normalize(text): - # todo: jap text normalize - return text - -def g2p(norm_text): - phones = preprocess_jap(norm_text) - phones = [post_replace_ph(i) for i in phones] - # todo: implement tones and word2ph - tones = [0 for i in phones] - word2ph = [1 for i in phones] - return phones, tones, word2ph - - -if __name__ == '__main__': - for line in open("../../../Downloads/transcript_utf8.txt").readlines(): - text = line.split(":")[1] - phones, tones, word2ph = g2p(text) - for p in phones: - if p == "z": - print(text, phones) - sys.exit(0) diff --git a/spaces/kevinwang676/Bert-VITS2/text/symbols.py b/spaces/kevinwang676/Bert-VITS2/text/symbols.py deleted file mode 100644 index 9dfae4e633829f20c4fd767b1c7a9198911ed801..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/Bert-VITS2/text/symbols.py +++ /dev/null @@ -1,51 +0,0 @@ -punctuation = ['!', '?', '…', ",", ".", "'", '-'] -pu_symbols = punctuation + ["SP", "UNK"] -pad = '_' - -# chinese -zh_symbols = ['E', 'En', 'a', 'ai', 'an', 'ang', 'ao', 'b', 'c', 'ch', 'd', 'e', 'ei', 'en', 'eng', 'er', 'f', 'g', 'h', - 'i', 'i0', 'ia', 'ian', 'iang', 'iao', 'ie', 'in', 'ing', 'iong', 'ir', 'iu', 'j', 'k', 'l', 'm', 'n', 'o', - 'ong', - 'ou', 'p', 'q', 'r', 's', 'sh', 't', 'u', 'ua', 'uai', 'uan', 'uang', 'ui', 'un', 'uo', 'v', 'van', 've', 'vn', - 'w', 'x', 'y', 'z', 'zh', - "AA", "EE", "OO"] -num_zh_tones = 6 - -# japanese -ja_symbols = ['I', 'N', 'U', 'a', 'b', 'by', 'ch', 'cl', 'd', 'dy', 'e', 'f', 'g', 'gy', 'h', 'hy', 'i', 'j', 'k', 'ky', - 'm', 'my', 'n', 'ny', 'o', 'p', 'py', 'r', 'ry', 's', 'sh', 't', 'ts', 'u', 'V', 'w', 'y', 'z'] -num_ja_tones = 1 - -# English -en_symbols = ['aa', 'ae', 'ah', 'ao', 'aw', 'ay', 'b', 'ch', 'd', 'dh', 'eh', 'er', 'ey', 'f', 'g', 'hh', 'ih', 'iy', - 'jh', 'k', 'l', 'm', 'n', 'ng', 'ow', 'oy', 'p', 'r', 's', - 'sh', 't', 'th', 'uh', 'uw', 'V', 'w', 'y', 'z', 'zh'] -num_en_tones = 4 - -# combine all symbols -normal_symbols = sorted(set(zh_symbols + ja_symbols + en_symbols)) -symbols = [pad] + normal_symbols + pu_symbols -sil_phonemes_ids = [symbols.index(i) for i in pu_symbols] - -# combine all tones -num_tones = num_zh_tones + num_ja_tones + num_en_tones - -# language maps -language_id_map = { - 'ZH': 0, - "JA": 1, - "EN": 2 -} -num_languages = len(language_id_map.keys()) - -language_tone_start_map = { - 'ZH': 0, - "JA": num_zh_tones, - "EN": num_zh_tones + num_ja_tones -} - -if __name__ == '__main__': - a = set(zh_symbols) - b = set(en_symbols) - print(sorted(a&b)) - diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/runner/hooks/logger/dvclive.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/runner/hooks/logger/dvclive.py deleted file mode 100644 index 687cdc58c0336c92b1e4f9a410ba67ebaab2bc7a..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/runner/hooks/logger/dvclive.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ...dist_utils import master_only -from ..hook import HOOKS -from .base import LoggerHook - - -@HOOKS.register_module() -class DvcliveLoggerHook(LoggerHook): - """Class to log metrics with dvclive. - - It requires `dvclive`_ to be installed. - - Args: - path (str): Directory where dvclive will write TSV log files. - interval (int): Logging interval (every k iterations). - Default 10. - ignore_last (bool): Ignore the log of last iterations in each epoch - if less than `interval`. - Default: True. - reset_flag (bool): Whether to clear the output buffer after logging. - Default: True. - by_epoch (bool): Whether EpochBasedRunner is used. - Default: True. - - .. _dvclive: - https://dvc.org/doc/dvclive - """ - - def __init__(self, - path, - interval=10, - ignore_last=True, - reset_flag=True, - by_epoch=True): - - super(DvcliveLoggerHook, self).__init__(interval, ignore_last, - reset_flag, by_epoch) - self.path = path - self.import_dvclive() - - def import_dvclive(self): - try: - import dvclive - except ImportError: - raise ImportError( - 'Please run "pip install dvclive" to install dvclive') - self.dvclive = dvclive - - @master_only - def before_run(self, runner): - self.dvclive.init(self.path) - - @master_only - def log(self, runner): - tags = self.get_loggable_tags(runner) - if tags: - for k, v in tags.items(): - self.dvclive.log(k, v, step=self.get_iter(runner)) diff --git a/spaces/kohrisatou-infinity/KIP_01_beta/hubert/hubert_model.py b/spaces/kohrisatou-infinity/KIP_01_beta/hubert/hubert_model.py deleted file mode 100644 index 7fb642d89b07ca60792debab18e3454f52d8f357..0000000000000000000000000000000000000000 --- a/spaces/kohrisatou-infinity/KIP_01_beta/hubert/hubert_model.py +++ /dev/null @@ -1,222 +0,0 @@ -import copy -import random -from typing import Optional, Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as t_func -from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present - - -class Hubert(nn.Module): - def __init__(self, num_label_embeddings: int = 100, mask: bool = True): - super().__init__() - self._mask = mask - self.feature_extractor = FeatureExtractor() - self.feature_projection = FeatureProjection() - self.positional_embedding = PositionalConvEmbedding() - self.norm = nn.LayerNorm(768) - self.dropout = nn.Dropout(0.1) - self.encoder = TransformerEncoder( - nn.TransformerEncoderLayer( - 768, 12, 3072, activation="gelu", batch_first=True - ), - 12, - ) - self.proj = nn.Linear(768, 256) - - self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_()) - self.label_embedding = nn.Embedding(num_label_embeddings, 256) - - def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - mask = None - if self.training and self._mask: - mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2) - x[mask] = self.masked_spec_embed.to(x.dtype) - return x, mask - - def encode( - self, x: torch.Tensor, layer: Optional[int] = None - ) -> Tuple[torch.Tensor, torch.Tensor]: - x = self.feature_extractor(x) - x = self.feature_projection(x.transpose(1, 2)) - x, mask = self.mask(x) - x = x + self.positional_embedding(x) - x = self.dropout(self.norm(x)) - x = self.encoder(x, output_layer=layer) - return x, mask - - def logits(self, x: torch.Tensor) -> torch.Tensor: - logits = torch.cosine_similarity( - x.unsqueeze(2), - self.label_embedding.weight.unsqueeze(0).unsqueeze(0), - dim=-1, - ) - return logits / 0.1 - - def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - x, mask = self.encode(x) - x = self.proj(x) - logits = self.logits(x) - return logits, mask - - -class HubertSoft(Hubert): - def __init__(self): - super().__init__() - - @torch.inference_mode() - def units(self, wav: torch.Tensor) -> torch.Tensor: - wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2)) - x, _ = self.encode(wav) - return self.proj(x) - - -class FeatureExtractor(nn.Module): - def __init__(self): - super().__init__() - self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False) - self.norm0 = nn.GroupNorm(512, 512) - self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False) - self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = t_func.gelu(self.norm0(self.conv0(x))) - x = t_func.gelu(self.conv1(x)) - x = t_func.gelu(self.conv2(x)) - x = t_func.gelu(self.conv3(x)) - x = t_func.gelu(self.conv4(x)) - x = t_func.gelu(self.conv5(x)) - x = t_func.gelu(self.conv6(x)) - return x - - -class FeatureProjection(nn.Module): - def __init__(self): - super().__init__() - self.norm = nn.LayerNorm(512) - self.projection = nn.Linear(512, 768) - self.dropout = nn.Dropout(0.1) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.norm(x) - x = self.projection(x) - x = self.dropout(x) - return x - - -class PositionalConvEmbedding(nn.Module): - def __init__(self): - super().__init__() - self.conv = nn.Conv1d( - 768, - 768, - kernel_size=128, - padding=128 // 2, - groups=16, - ) - self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.conv(x.transpose(1, 2)) - x = t_func.gelu(x[:, :, :-1]) - return x.transpose(1, 2) - - -class TransformerEncoder(nn.Module): - def __init__( - self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int - ) -> None: - super(TransformerEncoder, self).__init__() - self.layers = nn.ModuleList( - [copy.deepcopy(encoder_layer) for _ in range(num_layers)] - ) - self.num_layers = num_layers - - def forward( - self, - src: torch.Tensor, - mask: torch.Tensor = None, - src_key_padding_mask: torch.Tensor = None, - output_layer: Optional[int] = None, - ) -> torch.Tensor: - output = src - for layer in self.layers[:output_layer]: - output = layer( - output, src_mask=mask, src_key_padding_mask=src_key_padding_mask - ) - return output - - -def _compute_mask( - shape: Tuple[int, int], - mask_prob: float, - mask_length: int, - device: torch.device, - min_masks: int = 0, -) -> torch.Tensor: - batch_size, sequence_length = shape - - if mask_length < 1: - raise ValueError("`mask_length` has to be bigger than 0.") - - if mask_length > sequence_length: - raise ValueError( - f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`" - ) - - # compute number of masked spans in batch - num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random()) - num_masked_spans = max(num_masked_spans, min_masks) - - # make sure num masked indices <= sequence_length - if num_masked_spans * mask_length > sequence_length: - num_masked_spans = sequence_length // mask_length - - # SpecAugment mask to fill - mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool) - - # uniform distribution to sample from, make sure that offset samples are < sequence_length - uniform_dist = torch.ones( - (batch_size, sequence_length - (mask_length - 1)), device=device - ) - - # get random indices to mask - mask_indices = torch.multinomial(uniform_dist, num_masked_spans) - - # expand masked indices to masked spans - mask_indices = ( - mask_indices.unsqueeze(dim=-1) - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - offsets = ( - torch.arange(mask_length, device=device)[None, None, :] - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - mask_idxs = mask_indices + offsets - - # scatter indices to mask - mask = mask.scatter(1, mask_idxs, True) - - return mask - - -def hubert_soft( - path: str, -) -> HubertSoft: - r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`. - Args: - path (str): path of a pretrained model - """ - hubert = HubertSoft() - checkpoint = torch.load(path) - consume_prefix_in_state_dict_if_present(checkpoint, "module.") - hubert.load_state_dict(checkpoint) - hubert.eval() - return hubert diff --git a/spaces/konghl/gpt/README.md b/spaces/konghl/gpt/README.md deleted file mode 100644 index cb89d2018a9dc554a2e0d949f79ff953d5004232..0000000000000000000000000000000000000000 --- a/spaces/konghl/gpt/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Gpt -emoji: 👀 -colorFrom: red -colorTo: blue -sdk: docker -pinned: false -license: mit -app_port: 8080 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/aiofiles/threadpool/binary.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/aiofiles/threadpool/binary.py deleted file mode 100644 index 52d0cb30a3db51d1b8686001882136d69a1dc0fa..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/aiofiles/threadpool/binary.py +++ /dev/null @@ -1,108 +0,0 @@ -from ..base import AsyncBase, AsyncIndirectBase -from .utils import ( - delegate_to_executor, - proxy_method_directly, - proxy_property_directly, -) - - -@delegate_to_executor( - "close", - "flush", - "isatty", - "read", - "read1", - "readinto", - "readline", - "readlines", - "seek", - "seekable", - "tell", - "truncate", - "writable", - "write", - "writelines", -) -@proxy_method_directly("detach", "fileno", "readable") -@proxy_property_directly("closed", "raw", "name", "mode") -class AsyncBufferedIOBase(AsyncBase): - """The asyncio executor version of io.BufferedWriter and BufferedIOBase.""" - - -@delegate_to_executor("peek") -class AsyncBufferedReader(AsyncBufferedIOBase): - """The asyncio executor version of io.BufferedReader and Random.""" - - -@delegate_to_executor( - "close", - "flush", - "isatty", - "read", - "readall", - "readinto", - "readline", - "readlines", - "seek", - "seekable", - "tell", - "truncate", - "writable", - "write", - "writelines", -) -@proxy_method_directly("fileno", "readable") -@proxy_property_directly("closed", "name", "mode") -class AsyncFileIO(AsyncBase): - """The asyncio executor version of io.FileIO.""" - - -@delegate_to_executor( - "close", - "flush", - "isatty", - "read", - "read1", - "readinto", - "readline", - "readlines", - "seek", - "seekable", - "tell", - "truncate", - "writable", - "write", - "writelines", -) -@proxy_method_directly("detach", "fileno", "readable") -@proxy_property_directly("closed", "raw", "name", "mode") -class AsyncIndirectBufferedIOBase(AsyncIndirectBase): - """The indirect asyncio executor version of io.BufferedWriter and BufferedIOBase.""" - - -@delegate_to_executor("peek") -class AsyncIndirectBufferedReader(AsyncIndirectBufferedIOBase): - """The indirect asyncio executor version of io.BufferedReader and Random.""" - - -@delegate_to_executor( - "close", - "flush", - "isatty", - "read", - "readall", - "readinto", - "readline", - "readlines", - "seek", - "seekable", - "tell", - "truncate", - "writable", - "write", - "writelines", -) -@proxy_method_directly("fileno", "readable") -@proxy_property_directly("closed", "name", "mode") -class AsyncIndirectFileIO(AsyncIndirectBase): - """The indirect asyncio executor version of io.FileIO.""" diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fastapi/datastructures.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fastapi/datastructures.py deleted file mode 100644 index b20a25ab6ed090cdee112830b5510659425d9a2f..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fastapi/datastructures.py +++ /dev/null @@ -1,56 +0,0 @@ -from typing import Any, Callable, Dict, Iterable, Type, TypeVar - -from starlette.datastructures import URL as URL # noqa: F401 -from starlette.datastructures import Address as Address # noqa: F401 -from starlette.datastructures import FormData as FormData # noqa: F401 -from starlette.datastructures import Headers as Headers # noqa: F401 -from starlette.datastructures import QueryParams as QueryParams # noqa: F401 -from starlette.datastructures import State as State # noqa: F401 -from starlette.datastructures import UploadFile as StarletteUploadFile - - -class UploadFile(StarletteUploadFile): - @classmethod - def __get_validators__(cls: Type["UploadFile"]) -> Iterable[Callable[..., Any]]: - yield cls.validate - - @classmethod - def validate(cls: Type["UploadFile"], v: Any) -> Any: - if not isinstance(v, StarletteUploadFile): - raise ValueError(f"Expected UploadFile, received: {type(v)}") - return v - - @classmethod - def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: - field_schema.update({"type": "string", "format": "binary"}) - - -class DefaultPlaceholder: - """ - You shouldn't use this class directly. - - It's used internally to recognize when a default value has been overwritten, even - if the overridden default value was truthy. - """ - - def __init__(self, value: Any): - self.value = value - - def __bool__(self) -> bool: - return bool(self.value) - - def __eq__(self, o: object) -> bool: - return isinstance(o, DefaultPlaceholder) and o.value == self.value - - -DefaultType = TypeVar("DefaultType") - - -def Default(value: DefaultType) -> DefaultType: - """ - You shouldn't use this function directly. - - It's used internally to recognize when a default value has been overwritten, even - if the overridden default value was truthy. - """ - return DefaultPlaceholder(value) # type: ignore diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/httpcore-0.17.2.dist-info/LICENSE.md b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/httpcore-0.17.2.dist-info/LICENSE.md deleted file mode 100644 index 311b2b56c53f678ab95fc0def708c675d521a807..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/httpcore-0.17.2.dist-info/LICENSE.md +++ /dev/null @@ -1,27 +0,0 @@ -Copyright © 2020, [Encode OSS Ltd](https://www.encode.io/). -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/tests/test_backend_qt.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/tests/test_backend_qt.py deleted file mode 100644 index f79546323c4763f2ce7a58e5e681b0930aac3569..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/tests/test_backend_qt.py +++ /dev/null @@ -1,644 +0,0 @@ -import copy -import importlib -import inspect -import os -import signal -import subprocess -import sys - -from datetime import date, datetime -from unittest import mock - -import pytest - -import matplotlib -from matplotlib import pyplot as plt -from matplotlib._pylab_helpers import Gcf -from matplotlib import _c_internal_utils - - -try: - from matplotlib.backends.qt_compat import QtGui, QtWidgets # noqa - from matplotlib.backends.qt_editor import _formlayout -except ImportError: - pytestmark = pytest.mark.skip('No usable Qt bindings') - - -_test_timeout = 60 # A reasonably safe value for slower architectures. - - -@pytest.fixture -def qt_core(request): - qt_compat = pytest.importorskip('matplotlib.backends.qt_compat') - QtCore = qt_compat.QtCore - - return QtCore - - -@pytest.mark.backend('QtAgg', skip_on_importerror=True) -def test_fig_close(): - - # save the state of Gcf.figs - init_figs = copy.copy(Gcf.figs) - - # make a figure using pyplot interface - fig = plt.figure() - - # simulate user clicking the close button by reaching in - # and calling close on the underlying Qt object - fig.canvas.manager.window.close() - - # assert that we have removed the reference to the FigureManager - # that got added by plt.figure() - assert init_figs == Gcf.figs - - -class WaitForStringPopen(subprocess.Popen): - """ - A Popen that passes flags that allow triggering KeyboardInterrupt. - """ - - def __init__(self, *args, **kwargs): - if sys.platform == 'win32': - kwargs['creationflags'] = subprocess.CREATE_NEW_CONSOLE - super().__init__( - *args, **kwargs, - # Force Agg so that each test can switch to its desired Qt backend. - env={**os.environ, "MPLBACKEND": "Agg", "SOURCE_DATE_EPOCH": "0"}, - stdout=subprocess.PIPE, universal_newlines=True) - - def wait_for(self, terminator): - """Read until the terminator is reached.""" - buf = '' - while True: - c = self.stdout.read(1) - if not c: - raise RuntimeError( - f'Subprocess died before emitting expected {terminator!r}') - buf += c - if buf.endswith(terminator): - return - - -def _test_sigint_impl(backend, target_name, kwargs): - import sys - import matplotlib.pyplot as plt - import os - import threading - - plt.switch_backend(backend) - from matplotlib.backends.qt_compat import QtCore # noqa - - def interrupter(): - if sys.platform == 'win32': - import win32api - win32api.GenerateConsoleCtrlEvent(0, 0) - else: - import signal - os.kill(os.getpid(), signal.SIGINT) - - target = getattr(plt, target_name) - timer = threading.Timer(1, interrupter) - fig = plt.figure() - fig.canvas.mpl_connect( - 'draw_event', - lambda *args: print('DRAW', flush=True) - ) - fig.canvas.mpl_connect( - 'draw_event', - lambda *args: timer.start() - ) - try: - target(**kwargs) - except KeyboardInterrupt: - print('SUCCESS', flush=True) - - -@pytest.mark.backend('QtAgg', skip_on_importerror=True) -@pytest.mark.parametrize("target, kwargs", [ - ('show', {'block': True}), - ('pause', {'interval': 10}) -]) -def test_sigint(target, kwargs): - backend = plt.get_backend() - proc = WaitForStringPopen( - [sys.executable, "-c", - inspect.getsource(_test_sigint_impl) + - f"\n_test_sigint_impl({backend!r}, {target!r}, {kwargs!r})"]) - try: - proc.wait_for('DRAW') - stdout, _ = proc.communicate(timeout=_test_timeout) - except: - proc.kill() - stdout, _ = proc.communicate() - raise - print(stdout) - assert 'SUCCESS' in stdout - - -def _test_other_signal_before_sigint_impl(backend, target_name, kwargs): - import signal - import matplotlib.pyplot as plt - plt.switch_backend(backend) - from matplotlib.backends.qt_compat import QtCore # noqa - - target = getattr(plt, target_name) - - fig = plt.figure() - fig.canvas.mpl_connect('draw_event', - lambda *args: print('DRAW', flush=True)) - - timer = fig.canvas.new_timer(interval=1) - timer.single_shot = True - timer.add_callback(print, 'SIGUSR1', flush=True) - - def custom_signal_handler(signum, frame): - timer.start() - signal.signal(signal.SIGUSR1, custom_signal_handler) - - try: - target(**kwargs) - except KeyboardInterrupt: - print('SUCCESS', flush=True) - - -@pytest.mark.skipif(sys.platform == 'win32', - reason='No other signal available to send on Windows') -@pytest.mark.backend('QtAgg', skip_on_importerror=True) -@pytest.mark.parametrize("target, kwargs", [ - ('show', {'block': True}), - ('pause', {'interval': 10}) -]) -def test_other_signal_before_sigint(target, kwargs): - backend = plt.get_backend() - proc = WaitForStringPopen( - [sys.executable, "-c", - inspect.getsource(_test_other_signal_before_sigint_impl) + - "\n_test_other_signal_before_sigint_impl(" - f"{backend!r}, {target!r}, {kwargs!r})"]) - try: - proc.wait_for('DRAW') - os.kill(proc.pid, signal.SIGUSR1) - proc.wait_for('SIGUSR1') - os.kill(proc.pid, signal.SIGINT) - stdout, _ = proc.communicate(timeout=_test_timeout) - except: - proc.kill() - stdout, _ = proc.communicate() - raise - print(stdout) - assert 'SUCCESS' in stdout - plt.figure() - - -@pytest.mark.backend('Qt5Agg', skip_on_importerror=True) -def test_fig_sigint_override(qt_core): - from matplotlib.backends.backend_qt5 import _BackendQT5 - # Create a figure - plt.figure() - - # Variable to access the handler from the inside of the event loop - event_loop_handler = None - - # Callback to fire during event loop: save SIGINT handler, then exit - def fire_signal_and_quit(): - # Save event loop signal - nonlocal event_loop_handler - event_loop_handler = signal.getsignal(signal.SIGINT) - - # Request event loop exit - qt_core.QCoreApplication.exit() - - # Timer to exit event loop - qt_core.QTimer.singleShot(0, fire_signal_and_quit) - - # Save original SIGINT handler - original_handler = signal.getsignal(signal.SIGINT) - - # Use our own SIGINT handler to be 100% sure this is working - def custom_handler(signum, frame): - pass - - signal.signal(signal.SIGINT, custom_handler) - - try: - # mainloop() sets SIGINT, starts Qt event loop (which triggers timer - # and exits) and then mainloop() resets SIGINT - matplotlib.backends.backend_qt._BackendQT.mainloop() - - # Assert: signal handler during loop execution is changed - # (can't test equality with func) - assert event_loop_handler != custom_handler - - # Assert: current signal handler is the same as the one we set before - assert signal.getsignal(signal.SIGINT) == custom_handler - - # Repeat again to test that SIG_DFL and SIG_IGN will not be overridden - for custom_handler in (signal.SIG_DFL, signal.SIG_IGN): - qt_core.QTimer.singleShot(0, fire_signal_and_quit) - signal.signal(signal.SIGINT, custom_handler) - - _BackendQT5.mainloop() - - assert event_loop_handler == custom_handler - assert signal.getsignal(signal.SIGINT) == custom_handler - - finally: - # Reset SIGINT handler to what it was before the test - signal.signal(signal.SIGINT, original_handler) - - -@pytest.mark.parametrize( - "qt_key, qt_mods, answer", - [ - ("Key_A", ["ShiftModifier"], "A"), - ("Key_A", [], "a"), - ("Key_A", ["ControlModifier"], ("ctrl+a")), - ( - "Key_Aacute", - ["ShiftModifier"], - "\N{LATIN CAPITAL LETTER A WITH ACUTE}", - ), - ("Key_Aacute", [], "\N{LATIN SMALL LETTER A WITH ACUTE}"), - ("Key_Control", ["AltModifier"], ("alt+control")), - ("Key_Alt", ["ControlModifier"], "ctrl+alt"), - ( - "Key_Aacute", - ["ControlModifier", "AltModifier", "MetaModifier"], - ("ctrl+alt+meta+\N{LATIN SMALL LETTER A WITH ACUTE}"), - ), - # We do not currently map the media keys, this may change in the - # future. This means the callback will never fire - ("Key_Play", [], None), - ("Key_Backspace", [], "backspace"), - ( - "Key_Backspace", - ["ControlModifier"], - "ctrl+backspace", - ), - ], - ids=[ - 'shift', - 'lower', - 'control', - 'unicode_upper', - 'unicode_lower', - 'alt_control', - 'control_alt', - 'modifier_order', - 'non_unicode_key', - 'backspace', - 'backspace_mod', - ] -) -@pytest.mark.parametrize('backend', [ - # Note: the value is irrelevant; the important part is the marker. - pytest.param( - 'Qt5Agg', - marks=pytest.mark.backend('Qt5Agg', skip_on_importerror=True)), - pytest.param( - 'QtAgg', - marks=pytest.mark.backend('QtAgg', skip_on_importerror=True)), -]) -def test_correct_key(backend, qt_core, qt_key, qt_mods, answer, monkeypatch): - """ - Make a figure. - Send a key_press_event event (using non-public, qtX backend specific api). - Catch the event. - Assert sent and caught keys are the same. - """ - from matplotlib.backends.qt_compat import _enum, _to_int - - if sys.platform == "darwin" and answer is not None: - answer = answer.replace("ctrl", "cmd") - answer = answer.replace("control", "cmd") - answer = answer.replace("meta", "ctrl") - result = None - qt_mod = _enum("QtCore.Qt.KeyboardModifier").NoModifier - for mod in qt_mods: - qt_mod |= getattr(_enum("QtCore.Qt.KeyboardModifier"), mod) - - class _Event: - def isAutoRepeat(self): return False - def key(self): return _to_int(getattr(_enum("QtCore.Qt.Key"), qt_key)) - - monkeypatch.setattr(QtWidgets.QApplication, "keyboardModifiers", - lambda self: qt_mod) - - def on_key_press(event): - nonlocal result - result = event.key - - qt_canvas = plt.figure().canvas - qt_canvas.mpl_connect('key_press_event', on_key_press) - qt_canvas.keyPressEvent(_Event()) - assert result == answer - - -@pytest.mark.backend('QtAgg', skip_on_importerror=True) -def test_device_pixel_ratio_change(): - """ - Make sure that if the pixel ratio changes, the figure dpi changes but the - widget remains the same logical size. - """ - - prop = 'matplotlib.backends.backend_qt.FigureCanvasQT.devicePixelRatioF' - with mock.patch(prop) as p: - p.return_value = 3 - - fig = plt.figure(figsize=(5, 2), dpi=120) - qt_canvas = fig.canvas - qt_canvas.show() - - def set_device_pixel_ratio(ratio): - p.return_value = ratio - - # The value here doesn't matter, as we can't mock the C++ QScreen - # object, but can override the functional wrapper around it. - # Emitting this event is simply to trigger the DPI change handler - # in Matplotlib in the same manner that it would occur normally. - screen.logicalDotsPerInchChanged.emit(96) - - qt_canvas.draw() - qt_canvas.flush_events() - - # Make sure the mocking worked - assert qt_canvas.device_pixel_ratio == ratio - - qt_canvas.manager.show() - size = qt_canvas.size() - screen = qt_canvas.window().windowHandle().screen() - set_device_pixel_ratio(3) - - # The DPI and the renderer width/height change - assert fig.dpi == 360 - assert qt_canvas.renderer.width == 1800 - assert qt_canvas.renderer.height == 720 - - # The actual widget size and figure logical size don't change. - assert size.width() == 600 - assert size.height() == 240 - assert qt_canvas.get_width_height() == (600, 240) - assert (fig.get_size_inches() == (5, 2)).all() - - set_device_pixel_ratio(2) - - # The DPI and the renderer width/height change - assert fig.dpi == 240 - assert qt_canvas.renderer.width == 1200 - assert qt_canvas.renderer.height == 480 - - # The actual widget size and figure logical size don't change. - assert size.width() == 600 - assert size.height() == 240 - assert qt_canvas.get_width_height() == (600, 240) - assert (fig.get_size_inches() == (5, 2)).all() - - set_device_pixel_ratio(1.5) - - # The DPI and the renderer width/height change - assert fig.dpi == 180 - assert qt_canvas.renderer.width == 900 - assert qt_canvas.renderer.height == 360 - - # The actual widget size and figure logical size don't change. - assert size.width() == 600 - assert size.height() == 240 - assert qt_canvas.get_width_height() == (600, 240) - assert (fig.get_size_inches() == (5, 2)).all() - - -@pytest.mark.backend('QtAgg', skip_on_importerror=True) -def test_subplottool(): - fig, ax = plt.subplots() - with mock.patch("matplotlib.backends.qt_compat._exec", lambda obj: None): - fig.canvas.manager.toolbar.configure_subplots() - - -@pytest.mark.backend('QtAgg', skip_on_importerror=True) -def test_figureoptions(): - fig, ax = plt.subplots() - ax.plot([1, 2]) - ax.imshow([[1]]) - ax.scatter(range(3), range(3), c=range(3)) - with mock.patch("matplotlib.backends.qt_compat._exec", lambda obj: None): - fig.canvas.manager.toolbar.edit_parameters() - - -@pytest.mark.backend('QtAgg', skip_on_importerror=True) -def test_figureoptions_with_datetime_axes(): - fig, ax = plt.subplots() - xydata = [ - datetime(year=2021, month=1, day=1), - datetime(year=2021, month=2, day=1) - ] - ax.plot(xydata, xydata) - with mock.patch("matplotlib.backends.qt_compat._exec", lambda obj: None): - fig.canvas.manager.toolbar.edit_parameters() - - -@pytest.mark.backend('QtAgg', skip_on_importerror=True) -def test_double_resize(): - # Check that resizing a figure twice keeps the same window size - fig, ax = plt.subplots() - fig.canvas.draw() - window = fig.canvas.manager.window - - w, h = 3, 2 - fig.set_size_inches(w, h) - assert fig.canvas.width() == w * matplotlib.rcParams['figure.dpi'] - assert fig.canvas.height() == h * matplotlib.rcParams['figure.dpi'] - - old_width = window.width() - old_height = window.height() - - fig.set_size_inches(w, h) - assert window.width() == old_width - assert window.height() == old_height - - -@pytest.mark.backend('QtAgg', skip_on_importerror=True) -def test_canvas_reinit(): - from matplotlib.backends.backend_qtagg import FigureCanvasQTAgg - - called = False - - def crashing_callback(fig, stale): - nonlocal called - fig.canvas.draw_idle() - called = True - - fig, ax = plt.subplots() - fig.stale_callback = crashing_callback - # this should not raise - canvas = FigureCanvasQTAgg(fig) - fig.stale = True - assert called - - -@pytest.mark.backend('Qt5Agg', skip_on_importerror=True) -def test_form_widget_get_with_datetime_and_date_fields(): - from matplotlib.backends.backend_qt import _create_qApp - _create_qApp() - - form = [ - ("Datetime field", datetime(year=2021, month=3, day=11)), - ("Date field", date(year=2021, month=3, day=11)) - ] - widget = _formlayout.FormWidget(form) - widget.setup() - values = widget.get() - assert values == [ - datetime(year=2021, month=3, day=11), - date(year=2021, month=3, day=11) - ] - - -# The source of this function gets extracted and run in another process, so it -# must be fully self-contained. -def _test_enums_impl(): - import sys - - from matplotlib.backends.qt_compat import _enum, _to_int - from matplotlib.backend_bases import cursors, MouseButton - - _enum("QtGui.QDoubleValidator.State").Acceptable - - _enum("QtWidgets.QDialogButtonBox.StandardButton").Ok - _enum("QtWidgets.QDialogButtonBox.StandardButton").Cancel - _enum("QtWidgets.QDialogButtonBox.StandardButton").Apply - for btn_type in ["Ok", "Cancel"]: - getattr(_enum("QtWidgets.QDialogButtonBox.StandardButton"), btn_type) - - _enum("QtGui.QImage.Format").Format_ARGB32_Premultiplied - _enum("QtGui.QImage.Format").Format_ARGB32_Premultiplied - # SPECIAL_KEYS are Qt::Key that do *not* return their Unicode name instead - # they have manually specified names. - SPECIAL_KEYS = { - _to_int(getattr(_enum("QtCore.Qt.Key"), k)): v - for k, v in [ - ("Key_Escape", "escape"), - ("Key_Tab", "tab"), - ("Key_Backspace", "backspace"), - ("Key_Return", "enter"), - ("Key_Enter", "enter"), - ("Key_Insert", "insert"), - ("Key_Delete", "delete"), - ("Key_Pause", "pause"), - ("Key_SysReq", "sysreq"), - ("Key_Clear", "clear"), - ("Key_Home", "home"), - ("Key_End", "end"), - ("Key_Left", "left"), - ("Key_Up", "up"), - ("Key_Right", "right"), - ("Key_Down", "down"), - ("Key_PageUp", "pageup"), - ("Key_PageDown", "pagedown"), - ("Key_Shift", "shift"), - # In OSX, the control and super (aka cmd/apple) keys are switched. - ("Key_Control", "control" if sys.platform != "darwin" else "cmd"), - ("Key_Meta", "meta" if sys.platform != "darwin" else "control"), - ("Key_Alt", "alt"), - ("Key_CapsLock", "caps_lock"), - ("Key_F1", "f1"), - ("Key_F2", "f2"), - ("Key_F3", "f3"), - ("Key_F4", "f4"), - ("Key_F5", "f5"), - ("Key_F6", "f6"), - ("Key_F7", "f7"), - ("Key_F8", "f8"), - ("Key_F9", "f9"), - ("Key_F10", "f10"), - ("Key_F10", "f11"), - ("Key_F12", "f12"), - ("Key_Super_L", "super"), - ("Key_Super_R", "super"), - ] - } - # Define which modifier keys are collected on keyboard events. Elements - # are (Qt::KeyboardModifiers, Qt::Key) tuples. Order determines the - # modifier order (ctrl+alt+...) reported by Matplotlib. - _MODIFIER_KEYS = [ - ( - _to_int(getattr(_enum("QtCore.Qt.KeyboardModifier"), mod)), - _to_int(getattr(_enum("QtCore.Qt.Key"), key)), - ) - for mod, key in [ - ("ControlModifier", "Key_Control"), - ("AltModifier", "Key_Alt"), - ("ShiftModifier", "Key_Shift"), - ("MetaModifier", "Key_Meta"), - ] - ] - cursord = { - k: getattr(_enum("QtCore.Qt.CursorShape"), v) - for k, v in [ - (cursors.MOVE, "SizeAllCursor"), - (cursors.HAND, "PointingHandCursor"), - (cursors.POINTER, "ArrowCursor"), - (cursors.SELECT_REGION, "CrossCursor"), - (cursors.WAIT, "WaitCursor"), - ] - } - - buttond = { - getattr(_enum("QtCore.Qt.MouseButton"), k): v - for k, v in [ - ("LeftButton", MouseButton.LEFT), - ("RightButton", MouseButton.RIGHT), - ("MiddleButton", MouseButton.MIDDLE), - ("XButton1", MouseButton.BACK), - ("XButton2", MouseButton.FORWARD), - ] - } - - _enum("QtCore.Qt.WidgetAttribute").WA_OpaquePaintEvent - _enum("QtCore.Qt.FocusPolicy").StrongFocus - _enum("QtCore.Qt.ToolBarArea").TopToolBarArea - _enum("QtCore.Qt.ToolBarArea").TopToolBarArea - _enum("QtCore.Qt.AlignmentFlag").AlignRight - _enum("QtCore.Qt.AlignmentFlag").AlignVCenter - _enum("QtWidgets.QSizePolicy.Policy").Expanding - _enum("QtWidgets.QSizePolicy.Policy").Ignored - _enum("QtCore.Qt.MaskMode").MaskOutColor - _enum("QtCore.Qt.ToolBarArea").TopToolBarArea - _enum("QtCore.Qt.ToolBarArea").TopToolBarArea - _enum("QtCore.Qt.AlignmentFlag").AlignRight - _enum("QtCore.Qt.AlignmentFlag").AlignVCenter - _enum("QtWidgets.QSizePolicy.Policy").Expanding - _enum("QtWidgets.QSizePolicy.Policy").Ignored - - -def _get_testable_qt_backends(): - envs = [] - for deps, env in [ - ([qt_api], {"MPLBACKEND": "qtagg", "QT_API": qt_api}) - for qt_api in ["PyQt6", "PySide6", "PyQt5", "PySide2"] - ]: - reason = None - missing = [dep for dep in deps if not importlib.util.find_spec(dep)] - if (sys.platform == "linux" and - not _c_internal_utils.display_is_valid()): - reason = "$DISPLAY and $WAYLAND_DISPLAY are unset" - elif missing: - reason = "{} cannot be imported".format(", ".join(missing)) - elif env["MPLBACKEND"] == 'macosx' and os.environ.get('TF_BUILD'): - reason = "macosx backend fails on Azure" - marks = [] - if reason: - marks.append(pytest.mark.skip( - reason=f"Skipping {env} because {reason}")) - envs.append(pytest.param(env, marks=marks, id=str(env))) - return envs - - -@pytest.mark.parametrize("env", _get_testable_qt_backends()) -def test_enums_available(env): - proc = subprocess.run( - [sys.executable, "-c", - inspect.getsource(_test_enums_impl) + "\n_test_enums_impl()"], - env={**os.environ, "SOURCE_DATE_EPOCH": "0", **env}, - timeout=_test_timeout, check=True, - stdout=subprocess.PIPE, universal_newlines=True) diff --git a/spaces/leopoldmaillard/ImageRetrieval/app.py b/spaces/leopoldmaillard/ImageRetrieval/app.py deleted file mode 100644 index ffc076cfe3342f0a674e1c62202c4df15cde4336..0000000000000000000000000000000000000000 --- a/spaces/leopoldmaillard/ImageRetrieval/app.py +++ /dev/null @@ -1,73 +0,0 @@ -from cProfile import label -from turtle import title -import numpy as np -import gradio as gr -import pickle -from skimage import io -from scipy.spatial import distance - -# all the images name in a list -images = [line.strip() for line in open("holidays_images.dat","r")] - -# all the query image names in a list -query_images = [] -for line in open("holidays_images.dat","r"): - imname=line.strip() - imno=int(imname[:-len(".jpg")]) - if imno%100==0: - query_images.append(imname) - -with open('saved_cnn.pkl', 'rb') as f: - cnn_embeddings = pickle.load(f) - -with open('saved_bovw.pkl', 'rb') as f: - bovw_embeddings = pickle.load(f) - -with open('saved_naive.pkl', 'rb') as f: - naive_embeddings = pickle.load(f) - - -def similarity_all(query_image_name, embeddings, metric): - querry_embedding = embeddings[query_image_name] - scores = {image_name : metric(querry_embedding, embeddings[image_name]) for image_name in images} - return scores - -def euclidean_similarity_score(query_embedding, target_embedding): - return np.linalg.norm(query_embedding-target_embedding) - -def cosine_similarity_score(query_embedding, target_embedding): - return distance.cosine(np.reshape(query_embedding, -1), np.reshape(target_embedding, -1)) - -def retrieve(query_image_name, embeddings_type, metric_type): - - if embeddings_type == 'MobileNetV2' : - embeddings = cnn_embeddings - elif embeddings_type == 'BoVW' : - embeddings = bovw_embeddings - else : - embeddings = naive_embeddings - - if metric_type == 'Euclidean' : - metric = euclidean_similarity_score - else : - metric = cosine_similarity_score - - scores = similarity_all(query_image_name, embeddings, metric) - top = sorted(scores, key=scores.get)[:11] - - return io.imread('smallholidays/'+top[0]), [io.imread('smallholidays/'+img) for img in top[1:]] - -input_button = gr.inputs.Dropdown(query_images, label='Choice of the query image') -embeddings_selection = gr.inputs.Radio(['MobileNetV2', 'BoVW', 'Baseline'], label='Embeddings') -metric_selection = gr.inputs.Radio(['Euclidean', 'Cosine'], label='Similarity Metric') -retrieved_images = gr.outputs.Carousel(["image"], label='Retrieved images') - -description = "This is a demo of the content-based image retrieval system developed as part of the IR course project, 2022. The indexed dataset is [INRIA Holidays](https://lear.inrialpes.fr/~jegou/data.php). \n\nSeveral image embeddings can be used :\n \n-**MobileNetV2** : feature extraction is performed using a MobileNet architecture trained on ImageNet.\n\n-**BoVW (Bag of Visual Words)** : embedding is the BoVW histogram using color histogram as a descriptor.\n\n-**Baseline** : basic descriptor that uses pixel values of the downsized images." - -iface = gr.Interface(fn=retrieve, - inputs=[input_button, embeddings_selection, metric_selection], - outputs=[gr.outputs.Image(label='Query image'), retrieved_images], - title='Image Retrieval on INRIA Holidays', - description=description) - -iface.launch() \ No newline at end of file diff --git a/spaces/librarian-bots/notebooks-on-the-hub/index.html b/spaces/librarian-bots/notebooks-on-the-hub/index.html deleted file mode 100644 index 58275de3b1c343a98420342baa076b9baaafa157..0000000000000000000000000000000000000000 --- a/spaces/librarian-bots/notebooks-on-the-hub/index.html +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - My static Space - - - -
-

Welcome to your static Space!

-

You can modify this app directly by editing index.html in the Files and versions tab.

-

- Also don't forget to check the - Spaces documentation. -

-
- - diff --git a/spaces/library-samples/image-captioning-with-blip/README.md b/spaces/library-samples/image-captioning-with-blip/README.md deleted file mode 100644 index 7f374daf962b4098f9e0ecdcf30d65c4eb5c40ab..0000000000000000000000000000000000000000 --- a/spaces/library-samples/image-captioning-with-blip/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Image Captioning with BLIP -emoji: ⚡ -colorFrom: red -colorTo: purple -sdk: gradio -sdk_version: 4.1.1 -python_version: 3.10.13 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Age Of Mythology Titans 103 Crack No Cd [PORTABLE].md b/spaces/lincquiQcaudo/Top-20-Diffusion/Age Of Mythology Titans 103 Crack No Cd [PORTABLE].md deleted file mode 100644 index bc931dc2775133dd312187ceb4faf3173a07d817..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Age Of Mythology Titans 103 Crack No Cd [PORTABLE].md +++ /dev/null @@ -1,6 +0,0 @@ -

Age Of Mythology Titans 103 Crack No Cd


Download Ziphttps://bytlly.com/2uGwPZ



-
- 3cee63e6c2
-
-
-

diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Ecm Titanium 1.61 !!INSTALL!! Crack 107.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Ecm Titanium 1.61 !!INSTALL!! Crack 107.md deleted file mode 100644 index 7a0944cbabc11df13545ef44db318eba5d4357da..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Ecm Titanium 1.61 !!INSTALL!! Crack 107.md +++ /dev/null @@ -1,128 +0,0 @@ - -

ECM Titanium 1.61 Crack 107: A Powerful Tool for ECU Tuning and Diagnostics

- -

If you are looking for a software that can help you to modify the files stored in the engine control unit (ECU) of your car, you may have heard of ECM Titanium 1.61 Crack 107. This software allows you to accurately and easily interpret and edit the data in the ECU, such as air-fuel ratio, engine torque, injection system, rail pressure, turbo boost and many more. In this article, we will show you what ECM Titanium 1.61 Crack 107 can do, how to use it and where to download it for free.

-

ecm titanium 1.61 crack 107


Download Zip ☆☆☆ https://bytlly.com/2uGwy8



- -

What is ECM Titanium 1.61 Crack 107?

- -

ECM Titanium 1.61 Crack 107 is a cracked version of the original ECM Titanium software, which is developed by Alientech. The original software requires activation and a USB dongle to work, but the cracked version does not need any of that. You can simply download it from the internet and install it on your Windows XP or Windows 7 computer.

- -

ECM Titanium 1.61 Crack 107 comes with more than 18000 drivers, which are files that contain the information of the ECU maps. These drivers allow you to access and modify the data in the ECU without any risk of damaging it. You can also use the online database to find more compatible drivers for your ECU model.

- -

How to use ECM Titanium 1.61 Crack 107?

- -

To use ECM Titanium 1.61 Crack 107, you need to have a device that can read and write the ECU files, such as Kess V2 or KTAG. You also need to have a cable that can connect your device to your car's OBD port or directly to the ECU.

- -

Once you have everything ready, you can follow these steps:

-

- -
    -
  1. Connect your device to your computer and launch ECM Titanium 1.61 Crack 107.
  2. -
  3. Select your device from the menu and click on "Read" to read the ECU file.
  4. -
  5. Save the ECU file on your computer and load it on ECM Titanium 1.61 Crack 107.
  6. -
  7. The software will start a search for compatible drivers in your personal and online database.
  8. -
  9. Select the driver that matches your ECU and click on "Open".
  10. -
  11. You will see a list of maps that you can modify, such as fuel injection, ignition advance, boost pressure, etc.
  12. -
  13. Select the map that you want to edit and click on "Show". You will see different views of the map data, such as table view, 2D view, 3D view and hexadecimal view.
  14. -
  15. You can use the tools on the toolbar to change the values of the map data, such as increase, decrease, interpolate, smooth, etc.
  16. -
  17. You can also use the checksum function to verify and correct the integrity of the data.
  18. -
  19. When you are done with editing, click on "Save" to save the modified ECU file.
  20. -
  21. Connect your device to your car's OBD port or directly to the ECU and click on "Write" to write the modified ECU file.
  22. -
  23. Turn off your device and disconnect it from your car.
  24. -
  25. Start your car and enjoy the improved performance.
  26. -
- -

Where to download ECM Titanium 1.61 Crack 107 for free?

- -

If you want to try ECM Titanium 1.61 Crack 107, you can download it for free from various websites on the internet. However, you should be careful about the source of the download, as some files may contain viruses or malware that can harm your computer or your car.

- -

One of the websites that offer free download of ECM Titanium 1.61 Crack 107 is Blog.Teknisi.com. This website provides a link to download a rar file that contains the software and the drivers. You can use this link to download it: https://www.blog-teknisi.com/2020/08/free-download-ecm-titanium-161-18475.html

- -

You will need a password to extract the rar file, which is "garage". After extracting it, you can install it on your computer and start using it.

- -

Conclusion

- -

ECM Titanium 1.61 Crack 107 is a powerful tool for ECU tuning and diagnostics that can help you to optimize your car's performance and fuel efficiency. It allows you to edit the data in the ECU files with ease and safety. You can download it for free from various websites on the internet, but you should be careful about the source of the download.

- -

If you have any questions or comments about ECM Titanium 1.61 Crack 107, feel free to leave them below.

-

What are the advantages of ECM Titanium 1.61 Crack 107?

- -

Using ECM Titanium 1.61 Crack 107 has many benefits for your car and your wallet. Here are some of them:

- -
    -
  • You can improve your car's performance by increasing the power, torque, acceleration and top speed.
  • -
  • You can reduce your fuel consumption and emissions by optimizing the air-fuel ratio, injection timing and pressure, and other parameters.
  • -
  • You can customize your car's behavior according to your driving style and preferences.
  • -
  • You can diagnose and fix any errors or faults in your ECU by reading and clearing the trouble codes.
  • -
  • You can save money by avoiding expensive visits to the mechanic or buying a new ECU.
  • -
- -

What are the risks of ECM Titanium 1.61 Crack 107?

- -

Although ECM Titanium 1.61 Crack 107 is a safe and reliable software, there are some risks involved in using it. Here are some of them:

- -
    -
  • You may void your car's warranty by modifying the ECU files.
  • -
  • You may damage your ECU or your car's engine by making wrong or excessive changes to the ECU files.
  • -
  • You may face legal issues or fines by violating the emission standards or regulations in your country.
  • -
  • You may download a corrupted or infected file from an unreliable source that can harm your computer or your car.
  • -
- -

Therefore, you should always be careful and responsible when using ECM Titanium 1.61 Crack 107. You should always backup your original ECU file before making any changes, and restore it if anything goes wrong. You should also check the compatibility of your ECU model and your device before reading or writing the ECU file. You should also follow the instructions and guidelines provided by the software and the device manufacturers.

- -

Conclusion

- -

ECM Titanium 1.61 Crack 107 is a powerful tool for ECU tuning and diagnostics that can help you to optimize your car's performance and fuel efficiency. It allows you to edit the data in the ECU files with ease and safety. You can download it for free from various websites on the internet, but you should be careful about the source of the download.

- -

If you have any questions or comments about ECM Titanium 1.61 Crack 107, feel free to leave them below.

-

How to download ECM Titanium 1.61 Crack 107 for free?

- -

If you want to try ECM Titanium 1.61 Crack 107, you can download it for free from various websites on the internet. However, you should be careful about the source of the download, as some files may contain viruses or malware that can harm your computer or your car.

- -

One of the websites that offer free download of ECM Titanium 1.61 Crack 107 is ECU-Programmer.com. This website provides a link to download a rar file that contains the software and the drivers. You can use this link to download it: http://www.ecu-programmer.com/ecm-titanium-1-61-crack-software-and-18475-driver/

- -

You will need a password to extract the rar file, which is "ecu-programmer". After extracting it, you can install it on your computer and start using it.

- -

How to install ECM Titanium 1.61 Crack 107 on your computer?

- -

To install ECM Titanium 1.61 Crack 107 on your computer, you need to follow these steps:

- -
    -
  1. Extract the rar file that you downloaded from the link above.
  2. -
  3. Open the folder "ECM FULL" and run the file "ECM Titanium setup.exe" as administrator.
  4. -
  5. Follow the instructions on the screen to complete the installation.
  6. -
  7. Copy the file "titanium.exe" from the folder "Crack" and paste it into the installation folder, usually "C:\Program Files\Alientech\ECM Titanium". Replace the original file when prompted.
  8. -
  9. Run the file "titanium.exe" as administrator from the installation folder.
  10. -
  11. You will see a message saying "Hardware key not found". Click on "OK".
  12. -
  13. You will see another message saying "The hardware key is not plugged". Click on "OK".
  14. -
  15. You will see a window asking you to select a language. Choose your preferred language and click on "OK".
  16. -
  17. You have successfully installed ECM Titanium 1.61 Crack 107 on your computer.
  18. -
- -

How to update ECM Titanium 1.61 Crack 107?

- -

ECM Titanium 1.61 Crack 107 is a cracked version of the original software, so it cannot be updated online or offline. If you want to use the latest version of ECM Titanium, you need to buy the original software from Alientech or its authorized dealers.

- -

However, you can still update the drivers for ECM Titanium 1.61 Crack 107. The drivers are files that contain the information of the ECU maps for different car models and engines. You can download new drivers from various sources on the internet, such as forums, blogs or websites.

- -

To update the drivers for ECM Titanium 1.61 Crack 107, you need to follow these steps:

- -
    -
  1. Download the new drivers that you want to use from the internet.
  2. -
  3. Extract the files from the zip or rar archive that you downloaded.
  4. -
  5. Copy the files and paste them into the folder "C:\Users\Public\Documents\Alientech\Driver". Replace or merge the existing files when prompted.
  6. -
  7. Run ECM Titanium 1.61 Crack 107 as administrator from the installation folder.
  8. -
  9. You have successfully updated the drivers for ECM Titanium 1.61 Crack 107.
  10. -
- -

Conclusion

- -

ECM Titanium 1.61 Crack 107 is a powerful tool for ECU tuning and diagnostics that can help you to optimize your car's performance and fuel efficiency. It allows you to edit the data in the ECU files with ease and safety. You can download it for free from various websites on the internet, but you should be careful about the source of the download.

- -

If you have any questions or comments about ECM Titanium 1.61 Crack 107, feel free to leave them below.

-

ECM Titanium 1.61 Crack 107 is a powerful tool for ECU tuning and diagnostics that can help you to optimize your car's performance and fuel efficiency. It allows you to edit the data in the ECU files with ease and safety. You can download it for free from various websites on the internet, but you should be careful about the source of the download.

- -

If you have any questions or comments about ECM Titanium 1.61 Crack 107, feel free to leave them below.

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/FULL Neuro-Programmer 2 [UPDATED].md b/spaces/lincquiQcaudo/Top-20-Diffusion/FULL Neuro-Programmer 2 [UPDATED].md deleted file mode 100644 index 29b3335072f35c7145c4e78597bc56c7c78451c2..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/FULL Neuro-Programmer 2 [UPDATED].md +++ /dev/null @@ -1,8 +0,0 @@ - -

Click on the download button on the right of the page. There you can also sign up for an email newsletter which will provide full demonstrations of the software and other goodie bag items. Youll also be automatically subscribed to their newsletter which is in addition to the one youre already subscribed to. While youre waiting for your download and first run of the software, there is plenty of free stuff to consider.

-

There are free trials of the software. Ive reviewed FREE DOWNLOAD which is available to download and evaluate for 30 days. Another option is to click on the FULL Neuro-Programmer 2 Download button to access the full version. You must register to obtain this version. Registration is free. You can also sign up for an email subscription via BRAINWAVE ENTRAINMENT for notifications of new articles and software reviews.

-

FULL Neuro-Programmer 2


DOWNLOAD ✑ ✑ ✑ https://bytlly.com/2uGvKC



-

You can try out one of the demos to see if you like it. This is without the support community. Consider registering to the free user forum where you have access to their knowledge pool as well as other users. Registration is free and one of the best ways to get to know people. After that, it doesnt hurt to take full advantage of their knowledge pool to discover more regarding brainwave entrainment. There is almost never a charge for postings.

-

World class Neuro-Programmer program that is engineered to easily and quickly assist you to change your brainwave patterns and navigate new realities based on your optimal state of consciousness. With a full manual in which you will learn in depth how to set up your sessions, then enjoy making massive brainwave changes quickly.

899543212b
-
-
\ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Las Culturas Precolombinas Henri Lehmann.pdf.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Las Culturas Precolombinas Henri Lehmann.pdf.md deleted file mode 100644 index d149f4e8e72e580332d95e49933f2cf738ea30e7..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Las Culturas Precolombinas Henri Lehmann.pdf.md +++ /dev/null @@ -1,6 +0,0 @@ -

Las Culturas Precolombinas Henri Lehmann.pdf


Download ……… https://bytlly.com/2uGyl3



-
- 4d29de3e1b
-
-
-

diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Livrosruthrochapdfdownload [BETTER].md b/spaces/lincquiQcaudo/Top-20-Diffusion/Livrosruthrochapdfdownload [BETTER].md deleted file mode 100644 index d209054146f6a589debe20fe5e17c8ea1ced514d..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Livrosruthrochapdfdownload [BETTER].md +++ /dev/null @@ -1,75 +0,0 @@ -
-

Livros Ruth Rocha PDF Download: Como Baixar e Aproveitar as Obras da Renomada Escritora Infantil

-

Você é fã de literatura infantil? Então você precisa conhecer os livros de Ruth Rocha, uma das escritoras mais famosas e premiadas do Brasil. Ruth Rocha é autora de mais de 200 livros para crianças e jovens, que abordam temas como amizade, família, cidadania, cultura, folclore, história e muito mais. Seus livros são divertidos, educativos e cheios de imaginação.

-

Quem é Ruth Rocha?

-

Ruth Rocha nasceu em São Paulo, em 1931. Desde pequena, ela gostava de ler e escrever. Ela se formou em pedagogia e sociologia, e trabalhou como professora, jornalista e editora. Ela começou a escrever livros infantis em 1967, inspirada por seu filho e por sua experiência como educadora. Seu primeiro livro foi "Palavras Muitas Palavras", uma coletânea de poemas.

-

livrosruthrochapdfdownload


DOWNLOADhttps://bytlly.com/2uGwWk



-

Desde então, ela publicou diversos livros que se tornaram clássicos da literatura infantil brasileira, como "Marcelo, Marmelo, Martelo", "O Menino que Aprendeu a Ver", "A Arca de Noé", "O Reizinho Mandão", "O Amigo do Rei", "O Mistério do Coelho Pensante" e muitos outros. Ela também adaptou obras da literatura universal, como "A Odisseia", "As Mil e Uma Noites" e "Dom Quixote".

-

Por que ler os livros de Ruth Rocha?

-

Ler os livros de Ruth Rocha é uma ótima forma de estimular a leitura e o aprendizado das crianças. Os livros de Ruth Rocha são escritos com uma linguagem simples, clara e criativa, que desperta o interesse e a curiosidade dos leitores. Eles também trazem ilustrações coloridas e expressivas, que complementam e enriquecem as histórias.

-

Além disso, os livros de Ruth Rocha abordam temas importantes para a formação das crianças, como valores morais, éticos e sociais. Eles também valorizam a cultura brasileira, resgatando lendas, mitos, contos populares e personagens históricos do nosso país. Eles também estimulam a imaginação, a fantasia, o humor e a emoção dos leitores.

-

Como baixar e ler os livros de Ruth Rocha em PDF?

-

Se você quer baixar e ler os livros de Ruth Rocha em PDF, você pode seguir estes passos:

-
    -
  1. Acesse o site oficial da Ruth Rocha: https://www.ruthrocha.com.br/livros
  2. -
  3. Escolha o livro que você quer ler e clique nele
  4. -
  5. Você será redirecionado para uma página com informações sobre o livro, como sinopse, ficha técnica e ilustrações
  6. -
  7. Clique no botão "Comprar" para adquirir o livro em formato digital
  8. -
  9. Você será redirecionado para uma página de pagamento, onde você poderá escolher entre diversas formas de pagamento
  10. -
  11. Após confirmar o pagamento, você receberá um link para baixar o livro em PDF no seu e-mail
  12. -
  13. Aproveite o livro!
  14. -
-

Você também pode baixar alguns livros de Ruth Rocha em PDF gratuitamente em sites como o Internet Archive ou o Google Drive. Porém, é importante verificar se os livros estão disponíveis legalmente nesses sites, pois alguns podem violar os direitos autorais da escritora.

-
Conclusão: Por que você deve baixar e ler os livros de Ruth Rocha em PDF?
-

Baixar e ler os livros de Ruth Rocha em PDF é uma ótima forma de conhecer e apreciar as obras dessa escritora que é uma referência na literatura infantil brasileira. Os livros de Ruth Rocha são divertidos, educativos e cheios de imaginação. Eles abordam temas importantes para a formação das crianças, como valores morais, éticos e sociais. Eles também valorizam a cultura brasileira, resgatando lendas, mitos, contos populares e personagens históricos do nosso país.

-

Baixar e ler os livros de Ruth Rocha em PDF também é uma forma prática e econômica de ter acesso aos seus livros. Você pode baixar os livros no seu computador ou celular, e lê-los quando quiser e onde quiser. Você também pode economizar dinheiro e tempo ao não ter que comprar ou esperar por uma cópia física dos livros.

-

-

Não perca essa oportunidade de baixar e ler os livros de Ruth Rocha em PDF e se encantar com as histórias dessa escritora que é uma das mais premiadas e admiradas do país. Baixe e leia os livros de Ruth Rocha em PDF hoje mesmo!

-
Quais são os livros mais famosos de Ruth Rocha?
-

Ruth Rocha tem uma vasta e variada obra literária, que agrada a leitores de todas as idades e gostos. Entre os seus livros mais famosos, podemos destacar os seguintes:

-
    -
  • "Marcelo, Marmelo, Martelo": um dos seus primeiros e mais populares livros, que conta as aventuras e descobertas de Marcelo e seus amigos no bairro onde moram
  • -
  • "O Menino que Aprendeu a Ver": um livro emocionante e inspirador, que narra a história de Paulo, um menino cego que aprende a ver o mundo de outra forma com a ajuda de seu amigo Chico
  • -
  • "A Arca de Noé": uma adaptação em versos da história bíblica do dilúvio, que traz poemas divertidos e criativos sobre os animais que entraram na arca
  • -
  • "O Reizinho Mandão": um livro que faz uma crítica bem-humorada à tirania e à injustiça, através da história de um rei que queria mandar em tudo e em todos
  • -
  • "O Amigo do Rei": um livro que retrata a amizade entre um menino escravo e um menino nobre no Brasil colonial, que se tornaria o rei Dom Pedro I
  • -
-Como escolher o melhor livro de Ruth Rocha para cada criança? -

Na hora de escolher o melhor livro de Ruth Rocha para cada criança, é importante levar em conta alguns fatores, como:

-
    -
  • A idade e o nível de leitura da criança: alguns livros são mais adequados para crianças mais novas ou mais velhas, ou para crianças que estão começando ou avançando na leitura
  • -
  • O interesse e o gosto da criança: alguns livros podem despertar mais a atenção e a curiosidade da criança, dependendo do tema, do gênero, do estilo ou dos personagens
  • -
  • O objetivo e a finalidade da leitura: alguns livros podem ter um propósito mais educativo ou recreativo, ou podem servir para trabalhar algum assunto ou valor específico com a criança
  • -
-

Uma boa dica é pesquisar sobre os livros de Ruth Rocha no site oficial da escritora, onde você pode encontrar informações detalhadas sobre cada obra, como sinopse, ficha técnica, ilustrações e trechos. Você também pode ler as opiniões e as recomendações de outros leitores, ou consultar professores, bibliotecários ou livreiros especializados em literatura infantil.

-Conclusão: Por que você deve baixar e ler os livros de Ruth Rocha em PDF? -

Baixar e ler os livros de Ruth Rocha em PDF é uma ótima forma de conhecer e apreciar as obras dessa escritora que é uma referência na literatura infantil brasileira. Os livros de Ruth Rocha são divertidos, educativos e cheios de imaginação. Eles abordam temas importantes para a formação das crianças, como valores morais, éticos e sociais. Eles também valorizam a cultura brasileira, resgatando lendas, mitos, contos populares e personagens históricos do nosso país.

-

Baixar e ler os livros de Ruth Rocha em PDF também é uma forma prática e econômica de ter acesso aos seus livros. Você pode baixar os livros no seu computador ou celular, e lê-los quando quiser e onde quiser. Você também pode economizar dinheiro e tempo ao não ter que comprar ou esperar por uma cópia física dos livros.

-

Não perca essa oportunidade de baixar e ler os livros de Ruth Rocha em PDF e se encantar com as histórias dessa escritora que é uma das mais premiadas e admiradas do país. Baixe e leia os livros de Ruth Rocha em PDF hoje mesmo!

-
Quais são os benefícios de baixar e ler os livros de Ruth Rocha em PDF?
-

Baixar e ler os livros de Ruth Rocha em PDF tem muitas vantagens que vão melhorar a sua experiência de leitura. Algumas dessas vantagens são:

-
    -
  • Você pode economizar dinheiro e tempo ao não ter que comprar ou esperar por uma cópia física dos livros
  • -
  • Você pode acessar os livros mais rápido e fácil, sem ter que lidar com discos, códigos ou problemas de instalação
  • -
  • Você pode ler os livros a qualquer hora e em qualquer lugar, desde que tenha um computador ou celular e uma conexão com a internet
  • -
  • Você pode ajustar as configurações do livro para adequar às suas preferências e necessidades, como o tamanho da fonte, o brilho da tela, o modo de leitura, etc.
  • -
  • Você pode marcar as páginas, fazer anotações, destacar trechos e pesquisar palavras no livro
  • -
-Como escolher o melhor formato de PDF para baixar e ler os livros de Ruth Rocha? -

Existem diferentes formatos de PDF que você pode baixar e ler os livros de Ruth Rocha. Cada formato tem suas características e vantagens, dependendo do seu dispositivo e do seu objetivo de leitura. Alguns dos formatos mais comuns são:

-
    -
  • PDF padrão: é o formato mais simples e universal, que pode ser lido em qualquer dispositivo e programa que suporte PDF. Ele mantém a formatação original do livro, mas não permite alterar o tamanho da fonte ou o layout das páginas
  • -
  • PDF otimizado: é o formato que reduz o tamanho do arquivo e melhora a qualidade da imagem e do texto. Ele é ideal para dispositivos com pouco espaço de armazenamento ou conexões lentas. Ele também permite alterar o tamanho da fonte e o layout das páginas
  • -
  • PDF interativo: é o formato que adiciona recursos interativos ao livro, como links, vídeos, áudios, animações, formulários, etc. Ele é ideal para dispositivos com tela sensível ao toque ou que suportam multimídia. Ele também permite alterar o tamanho da fonte e o layout das páginas
  • -
-

Para escolher o melhor formato de PDF para baixar e ler os livros de Ruth Rocha, você deve considerar o seu dispositivo, a sua conexão, o seu espaço de armazenamento e o seu objetivo de leitura. Você também deve verificar se o site que oferece o download tem os formatos disponíveis e se eles são compatíveis com o seu dispositivo.

-Conclusão: Por que você deve baixar e ler os livros de Ruth Rocha em PDF? -

Baixar e ler os livros de Ruth Rocha em PDF é uma ótima forma de conhecer e apreciar as obras dessa escritora que é uma referência na literatura infantil brasileira. Os livros de Ruth Rocha são divertidos, educativos e cheios de imaginação. Eles abordam temas importantes para a formação das crianças, como valores morais, éticos e sociais. Eles também valorizam a cultura brasileira, resgatando lendas, mitos, contos populares e personagens históricos do nosso país.

-

Baixar e ler os livros de Ruth Rocha em PDF também é uma forma prática e econômica de ter acesso aos seus livros. Você pode baixar os livros no seu computador ou celular, e lê-los quando quiser e onde quiser. Você também pode ajustar as configurações do livro para adequar às suas preferências e necessidades. Você também pode escolher o melhor formato de PDF para baixar e ler os livros de Ruth Rocha.

-

Não perca essa oportunidade de baixar e ler os livros de Ruth Rocha em PDF e se encantar com as histórias dessa escritora que é uma das mais premiadas e admiradas do país. Baixe e leia os livros de Ruth Rocha em PDF hoje mesmo!

-Conclusão: Por que você deve baixar e ler os livros de Ruth Rocha em PDF? -

Baixar e ler os livros de Ruth Rocha em PDF é uma ótima forma de conhecer e apreciar as obras dessa escritora que é uma referência na literatura infantil brasileira. Os livros de Ruth Rocha são divertidos, educativos e cheios de imaginação. Eles abordam temas importantes para a formação das crianças, como valores morais, éticos e sociais. Eles também valorizam a cultura brasileira, resgatando lendas, mitos, contos populares e personagens históricos do nosso país.

-

Baixar e ler os livros de Ruth Rocha em PDF também é uma forma prática e econômica de ter acesso aos seus livros. Você pode baixar os livros no seu computador ou celular, e lê-los quando quiser e onde quiser. Você também pode ajustar as configurações do livro para adequar às suas preferências e necessidades. Você também pode escolher o melhor formato de PDF para baixar e ler os livros de Ruth Rocha.

-

Não perca essa oportunidade de baixar e ler os livros de Ruth Rocha em PDF e se encantar com as histórias dessa escritora que é uma das mais premiadas e admiradas do país. Baixe e leia os livros de Ruth Rocha em PDF hoje mesmo!

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/liuyuan-pal/SyncDreamer/generate.py b/spaces/liuyuan-pal/SyncDreamer/generate.py deleted file mode 100644 index 4a1a30c9909e4f3f977a6cb020239ca143ee599d..0000000000000000000000000000000000000000 --- a/spaces/liuyuan-pal/SyncDreamer/generate.py +++ /dev/null @@ -1,62 +0,0 @@ -import argparse -from pathlib import Path - -import numpy as np -import torch -from omegaconf import OmegaConf -from skimage.io import imsave - -from ldm.models.diffusion.sync_dreamer import SyncMultiviewDiffusion -from ldm.util import instantiate_from_config, prepare_inputs - - -def load_model(cfg,ckpt,strict=True): - config = OmegaConf.load(cfg) - model = instantiate_from_config(config.model) - print(f'loading model from {ckpt} ...') - ckpt = torch.load(ckpt,map_location='cpu') - model.load_state_dict(ckpt['state_dict'],strict=strict) - model = model.cuda().eval() - return model - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('--cfg',type=str, default='configs/syncdreamer.yaml') - parser.add_argument('--ckpt',type=str, default='ckpt/syncdreamer-step80k.ckpt') - parser.add_argument('--output', type=str, required=True) - parser.add_argument('--input', type=str, required=True) - parser.add_argument('--elevation', type=float, required=True) - - parser.add_argument('--sample_num', type=int, default=4) - parser.add_argument('--crop_size', type=int, default=-1) - parser.add_argument('--cfg_scale', type=float, default=2.0) - parser.add_argument('--batch_view_num', type=int, default=8) - parser.add_argument('--seed', type=int, default=6033) - flags = parser.parse_args() - - torch.random.manual_seed(flags.seed) - np.random.seed(flags.seed) - - model = load_model(flags.cfg, flags.ckpt, strict=True) - assert isinstance(model, SyncMultiviewDiffusion) - Path(f'{flags.output}').mkdir(exist_ok=True, parents=True) - - # prepare data - data = prepare_inputs(flags.input, flags.elevation, flags.crop_size) - for k, v in data.items(): - data[k] = v.unsqueeze(0).cuda() - data[k] = torch.repeat_interleave(data[k], flags.sample_num, dim=0) - x_sample = model.sample(data, flags.cfg_scale, flags.batch_view_num) - - B, N, _, H, W = x_sample.shape - x_sample = (torch.clamp(x_sample,max=1.0,min=-1.0) + 1) * 0.5 - x_sample = x_sample.permute(0,1,3,4,2).cpu().numpy() * 255 - x_sample = x_sample.astype(np.uint8) - - for bi in range(B): - output_fn = Path(flags.output)/ f'{bi}.png' - imsave(output_fn, np.concatenate([x_sample[bi,ni] for ni in range(N)], 1)) - -if __name__=="__main__": - main() - diff --git a/spaces/lllqqq/so-vits-svc-models-pcr/vencoder/ContentVec768L9_Onnx.py b/spaces/lllqqq/so-vits-svc-models-pcr/vencoder/ContentVec768L9_Onnx.py deleted file mode 100644 index 7cdac4cd93478d3ddddb4b76dd9d9ccc5d1af2d4..0000000000000000000000000000000000000000 --- a/spaces/lllqqq/so-vits-svc-models-pcr/vencoder/ContentVec768L9_Onnx.py +++ /dev/null @@ -1,28 +0,0 @@ -from vencoder.encoder import SpeechEncoder -import onnxruntime -import torch - -class ContentVec768L9_Onnx(SpeechEncoder): - def __init__(self,vec_path = "pretrain/vec-768-layer-9.onnx",device=None): - print("load model(s) from {}".format(vec_path)) - self.hidden_dim = 768 - if device is None: - self.dev = torch.device("cpu") - else: - self.dev = torch.device(device) - if device == 'cpu' or device == torch.device("cpu") or device is None: - providers = ['CPUExecutionProvider'] - elif device == 'cuda' or device == torch.device("cuda"): - providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] - self.model = onnxruntime.InferenceSession(vec_path, providers=providers) - - def encoder(self, wav): - feats = wav - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - feats = feats.unsqueeze(0).cpu().detach().numpy() - onnx_input = {self.model.get_inputs()[0].name: feats} - logits = self.model.run(None, onnx_input) - return torch.tensor(logits[0]).transpose(1, 2).to(self.dev) \ No newline at end of file diff --git a/spaces/luost26/DiffAb/abnumber/chain.py b/spaces/luost26/DiffAb/abnumber/chain.py deleted file mode 100644 index 6bdc76b9761d2ac4841849435d3e65dc56b041f7..0000000000000000000000000000000000000000 --- a/spaces/luost26/DiffAb/abnumber/chain.py +++ /dev/null @@ -1,781 +0,0 @@ -from collections import OrderedDict -from typing import Union, List, Generator, Tuple -from Bio import SeqIO -from Bio.SeqRecord import SeqRecord -import pandas as pd - -from abnumber.alignment import Alignment -from abnumber.common import _anarci_align, _validate_chain_type, SUPPORTED_SCHEMES, SUPPORTED_CDR_DEFINITIONS, \ - is_integer, SCHEME_BORDERS, _get_unique_chains -from abnumber.exceptions import ChainParseError -import numpy as np -from Bio.Seq import Seq - -from abnumber.position import Position - - -class Chain: - """ - Antibody chain aligned to a chosen antibody numbering scheme - - :example: - - >>> from abnumber import Chain - >>> - >>> seq = 'QVQLQQSGAELARPGASVKMSCKASGYTFTRYTMHWVKQRPGQGLEWIGYINPSRGYTNYNQKFKDKATLTTDKSSSTAYMQLSSLTSEDSAVYYCARYYDDHYCLDYWGQGTTLTVSSAKTTAPSVYPLA' - >>> chain = Chain(seq, scheme='imgt') - >>> chain - QVQLQQSGAELARPGASVKMSCKASGYTFTRYTMHWVKQRPGQGLEWIGYINPSRGYTNYNQKFKDKATLTTDKSSSTAYMQLSSLTSEDSAVYYCARYYDDHYCLDYWGQGTTLTVSS - ^^^^^^^^ ^^^^^^^^ ^^^^^^^^^^^^ - - Chain can be iterated: - - >>> for pos, aa in chain: - >>> print(pos, aa) - H1 Q - H2 V - H3 Q - H4 L - H5 Q - ... - - Chain can also be indexed and sliced using scheme numbering: - - >>> chain['5'] - 'Q' - >>> for pos, aa in chain['H2':'H5']: - >>> print(pos, aa) - H2 V - H3 Q - H4 L - H5 Q - - :param sequence: Unaligned string sequence - :param name: Optional sequence identifier - :param scheme: Numbering scheme: One of ``imgt``, ``chothia``, ``kabat``, ``aho`` - :param cdr_definition: Numbering scheme to be used for definition of CDR regions. Same as ``scheme`` by default. - One of ``imgt``, ``chothia``, ``kabat``, ``north``. Required for ``aho``. - :param assign_germline: Assign germline name using ANARCI based on best sequence identity - :param allowed_species: Allowed species for germline assignment. Use ``None`` to allow all species, or one or more of: ``'human', 'mouse','rat','rabbit','rhesus','pig','alpaca'`` - :param aa_dict: (Internal use only) Create Chain object directly from dictionary of region objects (internal use) - :param tail: (Internal use only) Constant region sequence - :param species: (Internal use only) Species as identified by ANARCI - :param germline: (Internal use only) Germline as identified by ANARCI - """ - - def __init__(self, sequence, scheme, cdr_definition=None, name=None, assign_germline=False, allowed_species=None, **kwargs): - aa_dict = kwargs.pop('aa_dict', None) - chain_type = kwargs.pop('chain_type', None) - tail = kwargs.pop('tail', None) - species = kwargs.pop('species', None) - v_gene = kwargs.pop('v_gene', None) - j_gene = kwargs.pop('j_gene', None) - if isinstance(allowed_species, str): - allowed_species = [allowed_species] - if len(kwargs): - raise TypeError(f'Argument not recognized: {", ".join(kwargs)}') - if aa_dict is not None: - if sequence is not None: - raise ChainParseError('Only one of aa_dict= and sequence= can be provided') - assert isinstance(aa_dict, dict), f'Expected dict, got: {type(aa_dict)}' - assert tail is not None - assert chain_type is not None - else: - if sequence is None: - raise ChainParseError('Expected sequence, got None') - if not isinstance(sequence, str) and not isinstance(sequence, Seq): - raise ChainParseError(f'Expected string or Seq, got {type(sequence)}: {sequence}') - if '-' in sequence: - raise ChainParseError(f'Please provide an unaligned sequence, got: {sequence}') - if chain_type is not None: - raise ChainParseError('Do not use chain_type= when providing sequence=, it will be inferred automatically') - if tail is not None: - raise ChainParseError('Do not use tail= when providing sequence=, it will be inferred automatically') - if isinstance(sequence, Seq): - sequence = str(sequence) - results = _anarci_align(sequence, scheme=scheme, allowed_species=allowed_species, assign_germline=assign_germline) - if len(results) > 1: - raise ChainParseError(f'Found {len(results)} antibody domains in sequence: "{sequence}"') - aa_dict, chain_type, tail, species, v_gene, j_gene = results[0] - - _validate_chain_type(chain_type) - - self.name: str = name - """User-provided sequence identifier""" - self.chain_type: str = chain_type - """Chain type as identified by ANARCI: ``H`` (heavy), ``K`` (kappa light) or ``L`` (lambda light) - - See also :meth:`Chain.is_heavy_chain` and :meth:`Chain.is_light_chain`. - """ - self.scheme: str = scheme - """Numbering scheme used to align the sequence""" - self.cdr_definition: str = cdr_definition or scheme - """Numbering scheme to be used for definition of CDR regions (same as ``scheme`` by default)""" - self.tail: str = tail - """Constant region sequence""" - self.species: str = species - """Species as identified by ANARCI""" - self.v_gene: str = v_gene - """V gene germline as identified by ANARCI (if assign_germline is True)""" - self.j_gene: str = j_gene - """J gene germline as identified by ANARCI (if assign_germline is True)""" - - self.fr1_dict = OrderedDict() - self.cdr1_dict = OrderedDict() - self.fr2_dict = OrderedDict() - self.cdr2_dict = OrderedDict() - self.fr3_dict = OrderedDict() - self.cdr3_dict = OrderedDict() - self.fr4_dict = OrderedDict() - - self._init_from_dict(aa_dict, allowed_species=allowed_species) - - def _init_from_dict(self, aa_dict, allowed_species): - if self.scheme not in SUPPORTED_SCHEMES: - raise NotImplementedError(f'Scheme "{self.scheme}" is not supported. Available schemes: {", ".join(SUPPORTED_SCHEMES)}') - if self.cdr_definition in ['aho']: - raise ValueError('CDR regions are not defined for AHo, ' - 'you need to specify cdr_definition="chothia" or another scheme for CDR extraction.') - if self.cdr_definition not in SUPPORTED_CDR_DEFINITIONS: - raise NotImplementedError(f'CDR definition "{self.scheme}" is not supported. Available definitions: {", ".join(SUPPORTED_SCHEMES)}') - # list of region start positions - borders = SCHEME_BORDERS[self.cdr_definition] if self.cdr_definition in SCHEME_BORDERS else SCHEME_BORDERS[f'{self.cdr_definition}_{self.chain_type}'] - - regions_list = [self.fr1_dict, self.cdr1_dict, self.fr2_dict, self.cdr2_dict, self.fr3_dict, self.cdr3_dict, self.fr4_dict] - region_idx = 0 - - sorted_positions = sorted(aa_dict.keys()) - - cdr_definition_ready = True - for pos in sorted_positions: - assert pos.scheme == self.scheme, f'Schemes of provided position ({pos.scheme}) does not match Chain scheme ({self.scheme})' - if pos.cdr_definition != self.cdr_definition: - cdr_definition_ready = False - - if cdr_definition_ready: - combined_aa_dict = aa_dict - else: - seq = ''.join(aa_dict[pos] for pos in sorted_positions) - renumbered_aa_dict = _anarci_align( - seq, - scheme=self.cdr_definition if self.cdr_definition != 'north' else 'chothia', - allowed_species=allowed_species - )[0][0] - cdr_definition_positions = [pos.number for pos in sorted(renumbered_aa_dict.keys())] - combined_aa_dict = {} - for orig_pos, cdr_definition_position in zip(sorted_positions, cdr_definition_positions): - aa = aa_dict[orig_pos] - pos = orig_pos.copy() - pos.set_cdr_definition(self.cdr_definition, cdr_definition_position) - combined_aa_dict[pos] = aa - - for pos in sorted(combined_aa_dict.keys()): - assert isinstance(pos, Position), f'Expected Position object, got {type(pos)}: {pos}' - aa = combined_aa_dict[pos].upper().strip() - if aa in [None, '*', '-', '', '.']: - continue - while pos.cdr_definition_position >= borders[region_idx]: - region_idx += 1 - regions_list[region_idx][pos] = aa - - def __repr__(self): - return self.format() - - def __str__(self): - return self.seq - - def __iter__(self): - yield from self.positions.items().__iter__() - - def __getitem__(self, item): - if isinstance(item, slice): - if item.step is not None and item.step != 1: - raise IndexError(f'Slicing with step != 1 is not implemented, got: {item}') - return self.slice(start=item.start, stop=item.stop) - pos = self._parse_position(item) - return self.positions[pos] - - def __len__(self): - return len(self.positions) - - def __hash__(self): - return hash(self.positions) - - def __eq__(self, other): - """Check chain equality. Only checks scheme, aligned sequence and tail sequence, ignores name, metadata and CDR definitions.""" - assert isinstance(other, Chain), f'Can only compare Chain to another Chain, got {type(other)}: {other}' - return self.positions == other.positions and self.tail == other.tail - - @classmethod - def to_fasta(cls, chains, path_or_fd, keep_tail=False, description=''): - """Save multiple chains to FASTA""" - if isinstance(chains, Chain): - records = chains.to_seq_record(keep_tail=keep_tail, description=description) - else: - records = (chain.to_seq_record(keep_tail=keep_tail, description=description) for chain in chains) - return SeqIO.write(records, path_or_fd, 'fasta-2line') - - @classmethod - def from_fasta(cls, path_or_handle, scheme, cdr_definition=None, as_series=False, as_generator=False, **kwargs) -> Union[List['Chain'], pd.Series, Generator['Chain', None, None]]: - """Read multiple chains from FASTA""" - generator = (cls(record.seq, name=record.name, scheme=scheme, cdr_definition=cdr_definition, **kwargs) - for record in SeqIO.parse(path_or_handle, 'fasta')) - if as_generator: - return generator - chains = list(generator) - if as_series: - return pd.Series(chains, index=[c.name for c in chains]) - return chains - - def to_seq_record(self, keep_tail=False, description=''): - """Create BioPython SeqRecord object from this Chain""" - if not self.name: - raise ValueError('Name needs to be present to convert to a SeqRecord') - seq = Seq(self.seq + self.tail if keep_tail else self.seq) - return SeqRecord(seq, id=self.name, description=description) - - @classmethod - def to_anarci_csv(cls, chains: List['Chain'], path): - """Save multiple chains to ANARCI-like CSV""" - df = cls.to_dataframe(chains) - df.to_csv(path) - - @classmethod - def to_dataframe(cls, chains: List['Chain']): - """Produce a Pandas dataframe with aligned chain sequences in the columns - - Note: Contains only positions (columns) that are present in the provided chains, - so number of columns can differ based on the input. - """ - series_list = [chain.to_series() for chain in chains] - - # Each chain can have a different set of positions - # so we need to sort the columns to make sure they are in the right order - # this is using the correct Position sorting - columns = set(c for series in series_list for c in series.index) - prop_columns = [c for c in columns if not isinstance(c, Position)] - position_columns = sorted([c for c in columns if isinstance(c, Position)]) - # Columns can come from K and L chain, so we need to convert them to string and remove duplicates here - position_columns_str = pd.Series( - [pos.format(chain_type=False) for pos in position_columns] - ).drop_duplicates().to_list() - - # Get full list of string columns - columns_str = prop_columns + position_columns_str - - # Reindex each series using ordered list of string columns - series_list_ordered = [] - for series in series_list: - series.index = series.index.map(lambda pos: pos.format(chain_type=False)) - series_list_ordered.append(series.reindex(columns_str)) - - df = pd.DataFrame(series_list_ordered)[columns_str].fillna('-') - df.index.name = 'Id' - - return df - - def to_series(self): - props = { - 'chain_type': self.chain_type, - 'species': self.species - } - return pd.Series({**props, **self.positions}, name=self.name) - - @classmethod - def from_series(cls, series, scheme, cdr_definition=None) -> 'Chain': - chain_type = series['chain_type'] - species = series.get('species') - position_index = [c for c in series.index if c[:1].isnumeric()] - aa_dict = {Position.from_string(pos, chain_type=chain_type, scheme=scheme): aa - for pos, aa in series[position_index].items() if aa != '-' and not pd.isna(aa)} - return cls(sequence=None, aa_dict=aa_dict, name=series.name, scheme=scheme, cdr_definition=cdr_definition, - chain_type=chain_type, species=species, tail='') - - @classmethod - def from_anarci_csv(cls, path, scheme, cdr_definition=None, as_series=False) -> Union[List['Chain'], pd.Series]: - df = pd.read_csv(path, index_col=0) - return cls.from_dataframe(df, scheme=scheme, cdr_definition=cdr_definition, as_series=as_series) - - @classmethod - def from_dataframe(cls, df, scheme, cdr_definition=None, as_series=False) -> Union[List['Chain'], pd.Series]: - chains = [cls.from_series(series, scheme=scheme, cdr_definition=cdr_definition) for i, series in df.iterrows()] - if as_series: - return pd.Series(chains, index=[c.name for c in chains]) - return chains - - def format(self, method='wide', **kwargs): - """Format sequence to string - - :param method: use ``"wide"`` for :meth:`Chain.format_wide` or ``"tall"`` for :meth:`Chain.format_tall()` - :return: formatted string - """ - if method == 'wide': - return self.format_wide(**kwargs) - elif method == 'tall': - return self.format_tall(**kwargs) - raise ValueError(f'Use method="wide" or method="tall", unknown method: "{method}"') - - def print(self, method='wide', **kwargs): - """Print string representation using :meth:`Chain.format` - - By default, produces "wide" format with sequence on first line and CDR regions higlighted with ``^`` on second line: - - >>> chain.print() - QVQLQQSGAELARPGASVKMSCKASGYTFTRYTMHWVKQRPGQGLEWIGYINPSRGYTNYNQKFKDKATLTTDKSSSTAYMQLSSLTSEDSAVYYCARYYDDHYCLDYWGQGTTLTVSS - ^^^^^^^^ ^^^^^^^^ ^^^^^^^^^^^^ - - :param method: use ``"wide"`` for :meth:`Chain.format_wide` or ``"tall"`` for :meth:`Chain.format_tall()` - """ - print(self.format(method=method, **kwargs)) - - def format_tall(self, columns=5): - """Create string with one position per line, showing position numbers and amino acids - - :return: formatted string - """ - height = int(np.ceil(len(self) / columns)) - rows = [''] * height - for column, start in enumerate(range(0, len(self), height)): - chain_slice = self.raw[start:start+height] - for row, (pos, aa) in enumerate(chain_slice): - rows[row] = rows[row].ljust(column * 15) - pos_format = (pos.get_region() + ' ' if pos.is_in_cdr() else '') + pos.format() - rows[row] += f'{pos_format.rjust(9)} {aa}' - - return '\n'.join(rows) - - def print_tall(self, columns=5): - """Print string representation using :meth:`Chain.format_tall` - - >>> chain.print_tall() - FR1 H1 Q - FR1 H2 V - FR1 H3 Q - FR1 H4 L - FR1 H5 Q - FR1 H6 Q - FR1 H7 S - ... - """ - print(self.format_tall(columns=columns)) - - def format_wide(self, numbering=False): - """Create string with sequence on first line and CDR regions higlighted with `^` on second line - - :param numbering: Add position numbers on top - :return: formatted string - """ - lines = [] - if numbering: - - first_order = '' - prev_number = None - after_double_digit = False - for pos in self.positions: - number = str(pos.number // 10) - if number != prev_number: - if after_double_digit: - # Special case: when double digits follow another double digits, do not print the first digit - number = number[1:] - first_order += number - if len(number) > 1: - after_double_digit = True - else: - if after_double_digit: - # Special case: After 10, 11, etc, skip adding the space - after_double_digit = False - else: - first_order += ' ' - prev_number = number - - lines.append(first_order) - lines.append(''.join(str(pos.number % 10) for pos in self.positions)) - letters = ''.join(pos.letter or ' ' for pos in self.positions) - if letters.strip(): - lines.append(letters) - lines.append(self.seq) - if self.cdr_definition == 'kabat': - lines.append(''.join('^' if pos.is_in_cdr() else ("°" if pos.is_in_vernier() else ' ') for pos in self.positions)) - else: - lines.append(''.join('^' if pos.is_in_cdr() else ' ' for pos in self.positions)) - return '\n'.join(lines) - - def print_wide(self, numbering=False): - """Print string representation using :meth:`Chain.format_wide` - - >>> chain.print_wide() - QVQLQQSGAELARPGASVKMSCKASGYTFTRYTMHWVKQRPGQGLEWIGYINPSRGYTNYNQKFKDKATLTTDKSSSTAYMQLSSLTSEDSAVYYCARYYDDHYCLDYWGQGTTLTVSS - ^^^^^^^^ ^^^^^^^^ ^^^^^^^^^^^^ - """ - print(self.format_wide(numbering=numbering)) - - def is_heavy_chain(self): - """Check if this chain is heavy chain (``chain_type=="H"``)""" - return self.chain_type == 'H' - - def is_light_chain(self): - """Check if this chain is light chain (``chain_type=="K" or chain_type=="L"``)""" - return self.is_lambda_light_chain() or self.is_kappa_light_chain() - - def is_lambda_light_chain(self): - """Check if this chain is lambda light chain (``chain_type=="L"``)""" - return self.chain_type == 'L' - - def is_kappa_light_chain(self): - """Check if this chain is kappa light chain (``chain_type=="K"``)""" - return self.chain_type == 'K' - - def align(self, *other) -> 'Alignment': - """Align this chain to other chains by using their existing numbering - - >>> from abnumber import Chain - >>> - >>> seq1 = 'QVQLQQSGAELARPGASVKMSCKASGYTFTRYTMHWVKQRPGQGLEWIGYINPSRGYTNYNQKFKDKATLTTDKSSSTAYMQLSSLTSEDSAVYYCARYYDDHYCLDYWGQGTTLTVSSAKTTAP' - >>> chain1 = Chain(seq1, scheme='imgt') - >>> - >>> seq2 = 'QVQLVQSGAELDRPGATVKMSCKASGYTTTRYTMHWVKQRPGQGLDWIGYINPSDRSYTNYNQKFKDKATLTTDKSSSTAYMQKTSLTSEDSAVYYCARYYDDYLDRWGQGTTLTVSSAKTTAP' - >>> chain2 = Chain(seq2, scheme='imgt') - >>> - >>> alignment = chain1.align(chain2) - >>> print(alignment.format()) - QVQLQQSGAELARPGASVKMSCKASGYTFTRYTMHWVKQRPGQGLEWIGYINPS-RGYTNYNQKFKDKATLTTDKSSSTAYMQLSSLTSEDSAVYYCARYYDDHYCLDYWGQGTTLTVSS - ||||.||||||.||||+|||||||||||.||||||||||||||||+||||||||.|.||||||||||||||||||||||||||.+|||||||||||||||||....||.||||||||||| - QVQLVQSGAELDRPGATVKMSCKASGYTTTRYTMHWVKQRPGQGLDWIGYINPSDRSYTNYNQKFKDKATLTTDKSSSTAYMQKTSLTSEDSAVYYCARYYD--DYLDRWGQGTTLTVSS - ^^^^^^^^ ^^^^^^^^^ ^^^^^^^^^^^^ - - :param other: The :class:`Chain` object to align, can be repeated to create a multiple sequence alignment - :return: :class:`Alignment` object - """ - pos_dicts = [self.positions] - for chain in other: - assert isinstance(chain, Chain), f'Expected Chain object, got {type(chain)}: {chain}' - pos_dicts.append(chain.positions) - - unique_cdr_definitions = set(pos.cdr_definition for pos_dict in pos_dicts for pos in pos_dict.keys()) - assert len(unique_cdr_definitions) <= 1, f'Aligned chains should use the same CDR definitions, got: {unique_cdr_definitions}' - - shared_pos = sorted(set(pos for pos_dict in pos_dicts for pos in pos_dict.keys())) - residues = [tuple(pos_dict.get(pos, '-') for pos_dict in pos_dicts) for pos in shared_pos] - return Alignment(shared_pos, residues, chain_type=self.chain_type, scheme=self.scheme) - - def clone(self, replace_seq: str = None): - """Create a copy of this chain, optionally with a replacement sequence - - :param replace_seq: Optional replacement sequence, needs to be the same length - :return: new Chain object - """ - return self.slice(replace_seq=replace_seq) - - def slice(self, replace_seq: str = None, start: Union[str, int, 'Position'] = None, - stop: Union[str, int, 'Position'] = None, stop_inclusive: bool = True, allow_raw: bool = False): - """Create a slice of this chain, optionally with a replacement sequence that is placed into the same numbering - - You can also slice directly using ``chain['111':'112A']`` or ``chain.raw[10:20]``. - - :param replace_seq: Optional replacement sequence, needs to be the same length - :param start: Optional slice start position (inclusive), :class:`Position` or string (e.g. '111A') - :param stop: Optional slice stop position (inclusive), :class:`Position` or string (e.g. '112A') - :param stop_inclusive: Include stop position in slice - :param allow_raw: Allow unaligned numeric indexing from 0 to length of sequence - 1 - :return: new Chain object - """ - aa_dict = {} - positions = self.positions - if replace_seq is not None: - assert len(replace_seq) == len(positions), 'Sequence needs to be the same length' - - start = self._parse_position(start, allow_raw=allow_raw) if start is not None else None - stop = self._parse_position(stop, allow_raw=allow_raw) if stop is not None else None - - for i, (pos, aa) in enumerate(positions.items()): - if start is not None and pos < start: - continue - if stop is not None and (pos > stop or (not stop_inclusive and pos >= stop)): - break - aa_dict[pos] = replace_seq[i] if replace_seq is not None else aa - - return Chain( - sequence=None, - aa_dict=aa_dict, - name=self.name, - scheme=self.scheme, - chain_type=self.chain_type, - cdr_definition=self.cdr_definition, - tail=self.tail, - species=self.species, - v_gene=self.v_gene, - j_gene=self.j_gene - ) - - def renumber(self, scheme=None, cdr_definition=None, allowed_species=None): - """Return copy of this chain aligned using a different numbering scheme or CDR definition - - :param scheme: Change numbering scheme: One of ``imgt``, ``chothia``, ``kabat``, ``aho``. - :param cdr_definition: Change CDR definition scheme: One of ``imgt``, ``chothia``, ``kabat``, ``north``. - :param allowed_species: ``None`` to allow all species, or one or more of: ``'human', 'mouse','rat','rabbit','rhesus','pig','alpaca'`` - """ - - return Chain( - self.seq + self.tail, - name=self.name, - allowed_species=allowed_species, - scheme=scheme or self.scheme, - cdr_definition=cdr_definition or scheme or self.cdr_definition, - assign_germline=self.v_gene is not None - ) - - def graft_cdrs_onto(self, other: 'Chain', backmutate_vernier=False, backmutations: List[Union['Position',str]] = [], name: str = None) -> 'Chain': - """Graft CDRs from this Chain onto another chain - - :param other: Chain to graft CDRs into (source of frameworks and tail sequence) - :param backmutate_vernier: Also graft all Kabat Vernier positions from this chain (perform backmutations) - :param backmutations: List of positions that should additionally be grafted from this chain (str or or :class:`Position`) - :param name: Name of new Chain. If not provided, use name of this chain. - :return: Chain with CDRs grafted from this chain and frameworks from the given chain - """ - assert self.scheme == other.scheme, \ - f'Sequences need to have the same numbering scheme, got {self.scheme} and {other.scheme}' - assert self.cdr_definition == other.cdr_definition, \ - f'Sequences need to have the same CDR definition, got {self.cdr_definition} and {other.cdr_definition}' - assert self.chain_type == other.chain_type, \ - f'Sequences need to have the same chain type, got {self.chain_type} and {other.chain_type}' - - backmutations = [self._parse_position(pos) for pos in backmutations] - - grafted_dict = {pos: aa for pos, aa in other if not pos.is_in_cdr()} - for pos, aa in self: - if pos.is_in_cdr() or (backmutate_vernier and pos.is_in_vernier()) or pos in backmutations: - grafted_dict[pos] = aa - - return Chain(sequence=None, aa_dict=grafted_dict, name=name or self.name, chain_type=self.chain_type, - scheme=self.scheme, cdr_definition=self.cdr_definition, tail=other.tail, - v_gene=other.v_gene, j_gene=other.j_gene) - - def graft_cdrs_onto_human_germline(self, v_gene=None, j_gene=None, - backmutate_vernier=False, backmutations: List[Union['Position',str]] = []): - """Graft CDRs from this Chain onto the nearest human germline sequence - - :param v_gene: Use defined V germline allele (e.g. IGHV1-18*01), gene (e.g. IGHV1-18) or family (e.g. IGHV1) - :param j_gene: Use defined J germline allele (e.g. IGHJ1*01) or gene (e.g. IGHJ1) - :param backmutate_vernier: Also graft all Kabat Vernier positions from this chain (perform backmutations) - :param backmutations: List of positions that should additionally be grafted from this chain (str or or :class:`Position`) - :return: Chain with CDRs grafted from this chain and frameworks from TODO - """ - germline_chain = self.find_merged_human_germline(v_gene=v_gene, j_gene=j_gene) - - if self.scheme != 'imgt' or self.cdr_definition != 'imgt': - germline_chain = germline_chain.renumber(self.scheme, self.cdr_definition) - - return self.graft_cdrs_onto(germline_chain, backmutate_vernier=backmutate_vernier, backmutations=backmutations) - - def _parse_position(self, position: Union[int, str, 'Position'], allow_raw=False): - """Create :class:`Position` key object from string or int. - - Note: The position should only be used for indexing, CDR definition is not preserved! - - :param position: Numeric or string position representation - :param allow_raw: Also allow unaligned numeric (int) indexing from 0 to length of sequence - 1 - :return: new Position object, should only be used for indexing, CDR definition is not preserved! - """ - if isinstance(position, str): - return Position.from_string(position, chain_type=self.chain_type, scheme=self.scheme) - if isinstance(position, Position): - return position - try: - position = int(position) - except TypeError: - raise IndexError(f'Invalid position key, expected Position, string or integer, got {type(position)}: "{position}"') - if not allow_raw: - raise IndexError("Use chain.raw[i] for raw numeric indexing or pass allow_raw=True. " - "For named position indexing, use string (e.g. chain['111A'] or chain['H111A'])") - if position >= len(self.positions): - return None - return self.get_position_by_raw_index(position) - - def get_position_by_raw_index(self, index): - """Get Position object at corresponding raw numeric position""" - return list(self.positions.keys())[index] - - def find_human_germlines(self, limit=10, v_gene=None, j_gene=None, unique=True) -> Tuple[List['Chain'], List['Chain']]: - """Find most identical V and J germline sequences based on IMGT alignment - - :param limit: Number of best matching germlines to return - :param v_gene: Filter germlines to specific V gene name - :param j_gene: Filter germlines to specific J gene name - :param unique: Skip germlines with duplicate amino acid sequence - :return: list of top V chains, list of top J chains - """ - from abnumber.germlines import get_imgt_v_chains, get_imgt_j_chains - - chain = self if self.scheme == 'imgt' and self.cdr_definition == 'imgt' else self.renumber('imgt') - v_chains = list(get_imgt_v_chains(chain.chain_type).values()) - j_chains = list(get_imgt_j_chains(chain.chain_type).values()) - - if v_gene: - if v_gene.startswith('IGKV') and self.chain_type == 'L': - raise NotImplementedError('Cannot graft lambda chain into kappa chain') - if v_gene.startswith('IGLV') and self.chain_type == 'K': - raise NotImplementedError('Cannot graft kappa chain into lambda chain') - v_chains = [chain for chain in v_chains if chain.name.startswith(v_gene)] - if not v_chains: - print('Available V genes:', get_imgt_v_chains(chain.chain_type).keys()) - raise ValueError(f'No V genes found for "{chain.chain_type}" chain gene name "{v_gene}"') - - if j_gene: - j_chains = [chain for chain in j_chains if chain.name.startswith(j_gene)] - if not j_chains: - print('Available J genes:', get_imgt_j_chains(chain.chain_type).keys()) - raise ValueError(f'No J genes found for "{chain.chain_type}" chain gene name "{j_gene}"') - - if unique: - v_chains = _get_unique_chains(v_chains) - j_chains = _get_unique_chains(j_chains) - - v_alignments = [chain.align(germline) for germline in v_chains] - v_ranks = np.array([alignment.num_mutations() for alignment in v_alignments]).argsort(kind='stable')[:limit] - top_v_chains = [v_chains[r] for r in v_ranks] - - j_alignments = [chain.align(germline) for germline in j_chains] - j_ranks = np.array([alignment.num_mutations() for alignment in j_alignments]).argsort(kind='stable')[:limit] - top_j_chains = [j_chains[r] for r in j_ranks] - - return top_v_chains, top_j_chains - - def find_merged_human_germline(self, top=0, v_gene=None, j_gene=None) -> 'Chain': - """Find n-th most identical V and J germline sequence based on IMGT alignment and merge them into one Chain - - :param top: Return top N most identical germline (0-indexed) - :param v_gene: Filter germlines to specific V gene name - :param j_gene: Filter germlines to specific J gene name - :return: merged germline sequence Chain object - """ - v_chains, j_chains = self.find_human_germlines(limit=top+1, v_gene=v_gene, j_gene=j_gene) - v_chain = v_chains[top] - j_chain = j_chains[top] - - merged_dict = { - **{pos: aa for pos, aa in j_chain}, - **{pos: aa for pos, aa in v_chain} - } - - return Chain( - sequence=None, - aa_dict=merged_dict, - chain_type=self.chain_type, - scheme='imgt', - tail='' - ) - - @property - def raw(self): - """Access raw representation of this chain to allow unaligned numeric indexing and slicing - - >>> # String numbering is based on schema numbering - >>> chain['1'] - 'QVQLQQSGAE' - >>> # Numbering of ``chain.raw`` starts at 0 - >>> chain.raw[0] - 'QVQLQQSGAE' - >>> # Slicing with string is based on schema numbering, the end is inclusive - >>> chain['1':'10'] - 'QVQLQQSGAE' - >>> # Slicing with ``chain.raw`` starts at 0, the end is exclusive (Python style) - >>> chain.raw[0:10] - 'QVQLQQSGAE' - - :return: Raw chain accessor that can be sliced or indexed to produce a new :class:`Chain` object - """ - return RawChainAccessor(self) - - @property - def regions(self): - """Dictionary of region dictionaries - - Region is an uppercase string, one of: ``"FR1", "CDR1", "FR2", "CDR2", "FR3", "CDR3", "FR4"`` - - :return: Dictionary of Region name -> Dictionary of (:class:`Position` -> Amino acid) - """ - return OrderedDict( - FR1=self.fr1_dict, - CDR1=self.cdr1_dict, - FR2=self.fr2_dict, - CDR2=self.cdr2_dict, - FR3=self.fr3_dict, - CDR3=self.cdr3_dict, - FR4=self.fr4_dict - ) - - @property - def positions(self): - """Dictionary of :class:`Position` -> Amino acid""" - positions = OrderedDict() - for region, aa_dict in self.regions.items(): - for pos, aa in aa_dict.items(): - positions[pos] = aa - return positions - - @property - def seq(self): - """Unaligned string representation of the variable chain sequence - - :return: Unaligned string representation of the variable chain sequence - """ - return ''.join(self.positions.values()) - - @property - def fr1_seq(self): - """Unaligned string representation of the Framework 1 region sequence""" - return ''.join(self.fr1_dict.values()) - - @property - def cdr1_seq(self): - """Unaligned string representation of the CDR 1 region sequence""" - return ''.join(self.cdr1_dict.values()) - - @property - def fr2_seq(self): - """Unaligned string representation of the Framework 2 region sequence""" - return ''.join(self.fr2_dict.values()) - - @property - def cdr2_seq(self): - """Unaligned string representation of the CDR 2 region sequence""" - return ''.join(self.cdr2_dict.values()) - - @property - def fr3_seq(self): - """Unaligned string representation of the Framework 3 region sequence""" - return ''.join(self.fr3_dict.values()) - - @property - def cdr3_seq(self): - """Unaligned string representation of the CDR 3 region sequence""" - return ''.join(self.cdr3_dict.values()) - - @property - def fr4_seq(self): - """Unaligned string representation of the Framework 4 region sequence""" - return ''.join(self.fr4_dict.values()) - - -class RawChainAccessor: - def __init__(self, chain: Chain): - self.chain = chain - - def __getitem__(self, item): - if isinstance(item, slice): - if item.step is not None and item.step != 1: - raise IndexError(f'Slicing with step != 1 is not implemented, got: {item}') - if item.start is not None and not is_integer(item.start): - raise IndexError(f'Expected int start index for chain.raw, got {type(item.start)}: {item.start}') - if item.stop is not None and not is_integer(item.stop): - raise IndexError(f'Expected int end index for chain.raw, got {type(item.stop)}: {item.stop}') - return self.chain.slice(start=item.start, stop=item.stop, stop_inclusive=False, allow_raw=True) - if not is_integer(item): - raise IndexError(f'Expected int indexing for chain.raw, got {type(item)}: {item}') - pos = self.chain.get_position_by_raw_index(item) - return self.chain[pos] - - - - diff --git a/spaces/luost26/DiffAb/anarci/schemes.py b/spaces/luost26/DiffAb/anarci/schemes.py deleted file mode 100644 index 61f812aeae74b3d0409361a44b3768b70887afd2..0000000000000000000000000000000000000000 --- a/spaces/luost26/DiffAb/anarci/schemes.py +++ /dev/null @@ -1,1691 +0,0 @@ -# ANARCI - Antibody Numbering and Antigen Receptor ClassIfication -# Copyright (C) 2016 Oxford Protein Informatics Group (OPIG) -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details.# -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -''' -Module containing functions to convert hmm alignment to a numbering scheme. - -Currently implemented - -For IG's -IMGT -Chothia -Kabat -Martin (Extended Chothia) -Aho -Wolfguy - -For TR's -IMGT -(Aho) - ---------------------------------------------------------------------------------------------------------------------- -Functions are written to a template: - -There are 128 match states in the HMMs (these are the IMGT states). The alignment to these states must be converted to -correspond to the scheme of choice. - -We define: - - a state string consisting of 'X' and 'I' where: - X means that for the state there is an equivalent position in the numbering scheme. - I means that for the state there is not an equivalent position in the numbering scheme. It should therefore be - considered as an insertion in the scheme. - - - a region string consisting of characters (integers in the currently implemented schemes). Each character -corresponds to a contiguous region. Therefore each state can be assigned a region according to the scheme. - - - a mapping between region characters and region indices as a dictionary. e.g. the first region character maps -to 0, second to 1 ... - - - a dictionary containing the difference between state number (imgt) and scheme number at the *beginning* of -each region using the region indices as keys and the difference as values. - - - the number of regions defined - - - a list for which delete states should not be included in the numbering (typically those for the cdrs). This -will allow the length of the region to be the number of residues found instead of the number of possible states plus -insertions. - - -This all goes into the _number_regions function along with the sequence and the state_vector (the alignment from the -HMM). - -_number regions will then divide the aligned part of the sequence into as many regions as defined above. Within each -region it will give a numbering according to the input parameters. A list of lists will be returned containing the -numbered sequence for each region. - -Some of the regions will not be numbered correctly according to the scheme. For example the insertions for the CDRs -will not necessarily be on the correct residue. For each different scheme these regions are then modified (see code -for implementation) - -Finally the full numbered sequence is compiled and returned to the calling function. ---------------------------------------------------------------------------------------------------------------------- - -Other schemes can be implemented following the template above. - - -''' - -# Alphabet used for insertion (last (-1th) is a blank space for no insertion) -alphabet = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "AA", "BB", "CC", "DD", "EE", "FF", "GG", "HH", "II", "JJ", "KK", "LL", "MM", "NN", "OO", "PP", "QQ", "RR", "SS", "TT", "UU", "VV", "WW", "XX", "YY", "ZZ", " "] - -# Blosum62 matrix. Used in some annotation methods to recognise pre-defined motifs -blosum62 = {('B', 'N'): 3, ('W', 'L'): -2, ('G', 'G'): 6, ('X', 'S'): 0, ('X', 'D'): -1, ('K', 'G'): -2, ('S', 'E'): 0, ('X', 'M'): -1, ('Y', 'E'): -2, ('W', 'R'): -3, ('I', 'R'): -3, ('X', 'Z'): -1, ('H', 'E'): 0, ('V', 'M'): 1, ('N', 'R'): 0, ('I', 'D'): -3, ('F', 'D'): -3, ('W', 'C'): -2, ('N', 'A'): -2, ('W', 'Q'): -2, ('L', 'Q'): -2, ('S', 'N'): 1, ('Z', 'K'): 1, ('V', 'N'): -3, ('Q', 'N'): 0, ('M', 'K'): -1, ('V', 'H'): -3, ('G', 'E'): -2, ('S', 'L'): -2, ('P', 'R'): -2, ('D', 'A'): -2, ('S', 'C'): -1, ('E', 'D'): 2, ('Y', 'G'): -3, ('W', 'P'): -4, ('X', 'X'): -1, ('Z', 'L'): -3, ('Q', 'A'): -1, ('V', 'Y'): -1, ('W', 'A'): -3, ('G', 'D'): -1, ('X', 'P'): -2, ('K', 'D'): -1, ('T', 'N'): 0, ('Y', 'F'): 3, ('W', 'W'): 11, ('Z', 'M'): -1, ('L', 'D'): -4, ('M', 'R'): -1, ('Y', 'K'): -2, ('F', 'E'): -3, ('M', 'E'): -2, ('S', 'S'): 4, ('X', 'C'): -2, ('Y', 'L'): -1, ('H', 'R'): 0, ('P', 'P'): 7, ('K', 'C'): -3, ('S', 'A'): 1, ('P', 'I'): -3, ('Q', 'Q'): 5, ('L', 'I'): 2, ('P', 'F'): -4, ('B', 'A'): -2, ('Z', 'N'): 0, ('M', 'Q'): 0, ('V', 'I'): 3, ('Q', 'C'): -3, ('I', 'H'): -3, ('Z', 'D'): 1, ('Z', 'P'): -1, ('Y', 'W'): 2, ('T', 'G'): -2, ('B', 'P'): -2, ('P', 'A'): -1, ('C', 'D'): -3, ('Y', 'H'): 2, ('X', 'V'): -1, ('B', 'B'): 4, ('Z', 'F'): -3, ('M', 'L'): 2, ('F', 'G'): -3, ('S', 'M'): -1, ('M', 'G'): -3, ('Z', 'Q'): 3, ('S', 'Q'): 0, ('X', 'A'): 0, ('V', 'T'): 0, ('W', 'F'): 1, ('S', 'H'): -1, ('X', 'N'): -1, ('B', 'Q'): 0, ('K', 'A'): -1, ('I', 'Q'): -3, ('X', 'W'): -2, ('N', 'N'): 6, ('W', 'T'): -2, ('P', 'D'): -1, ('B', 'C'): -3, ('I', 'C'): -1, ('V', 'K'): -2, ('X', 'Y'): -1, ('K', 'R'): 2, ('Z', 'R'): 0, ('W', 'E'): -3, ('T', 'E'): -1, ('B', 'R'): -1, ('L', 'R'): -2, ('Q', 'R'): 1, ('X', 'F'): -1, ('T', 'S'): 1, ('B', 'D'): 4, ('Z', 'A'): -1, ('M', 'N'): -2, ('V', 'D'): -3, ('F', 'A'): -2, ('X', 'E'): -1, ('F', 'H'): -1, ('M', 'A'): -1, ('K', 'Q'): 1, ('Z', 'S'): 0, ('X', 'G'): -1, ('V', 'V'): 4, ('W', 'D'): -4, ('X', 'H'): -1, ('S', 'F'): -2, ('X', 'L'): -1, ('B', 'S'): 0, ('S', 'G'): 0, ('P', 'M'): -2, ('Y', 'M'): -1, ('H', 'D'): -1, ('B', 'E'): 1, ('Z', 'B'): 1, ('I', 'E'): -3, ('V', 'E'): -2, ('X', 'T'): 0, ('X', 'R'): -1, ('R', 'R'): 5, ('Z', 'T'): -1, ('Y', 'D'): -3, ('V', 'W'): -3, ('F', 'L'): 0, ('T', 'C'): -1, ('X', 'Q'): -1, ('B', 'T'): -1, ('K', 'N'): 0, ('T', 'H'): -2, ('Y', 'I'): -1, ('F', 'Q'): -3, ('T', 'I'): -1, ('T', 'Q'): -1, ('P', 'L'): -3, ('R', 'A'): -1, ('B', 'F'): -3, ('Z', 'C'): -3, ('M', 'H'): -2, ('V', 'F'): -1, ('F', 'C'): -2, ('L', 'L'): 4, ('M', 'C'): -1, ('C', 'R'): -3, ('D', 'D'): 6, ('E', 'R'): 0, ('V', 'P'): -2, ('S', 'D'): 0, ('E', 'E'): 5, ('W', 'G'): -2, ('P', 'C'): -3, ('F', 'R'): -3, ('B', 'G'): -1, ('C', 'C'): 9, ('I', 'G'): -4, ('V', 'G'): -3, ('W', 'K'): -3, ('G', 'N'): 0, ('I', 'N'): -3, ('Z', 'V'): -2, ('A', 'A'): 4, ('V', 'Q'): -2, ('F', 'K'): -3, ('T', 'A'): 0, ('B', 'V'): -3, ('K', 'L'): -2, ('L', 'N'): -3, ('Y', 'N'): -2, ('F', 'F'): 6, ('L', 'G'): -4, ('B', 'H'): 0, ('Z', 'E'): 4, ('Q', 'D'): 0, ('X', 'B'): -1, ('Z', 'W'): -3, ('S', 'K'): 0, ('X', 'K'): -1, ('V', 'R'): -3, ('K', 'E'): 1, ('I', 'A'): -1, ('P', 'H'): -2, ('B', 'W'): -4, ('K', 'K'): 5, ('H', 'C'): -3, ('E', 'N'): 0, ('Y', 'Q'): -1, ('H', 'H'): 8, ('B', 'I'): -3, ('C', 'A'): 0, ('I', 'I'): 4, ('V', 'A'): 0, ('W', 'I'): -3, ('T', 'F'): -2, ('V', 'S'): -2, ('T', 'T'): 5, ('F', 'M'): 0, ('L', 'E'): -3, ('M', 'M'): 5, ('Z', 'G'): -2, ('D', 'R'): -2, ('M', 'D'): -3, ('W', 'H'): -2, ('G', 'C'): -3, ('S', 'R'): -1, ('S', 'I'): -2, ('P', 'Q'): -1, ('Y', 'A'): -2, ('X', 'I'): -1, ('E', 'A'): -1, ('B', 'Y'): -3, ('K', 'I'): -3, ('H', 'A'): -2, ('P', 'G'): -2, ('F', 'N'): -3, ('H', 'N'): 1, ('B', 'K'): 0, ('V', 'C'): -1, ('T', 'L'): -1, ('P', 'K'): -1, ('W', 'S'): -3, ('T', 'D'): -1, ('T', 'M'): -1, ('P', 'N'): -2, ('K', 'H'): -1, ('T', 'R'): -1, ('Y', 'R'): -2, ('L', 'C'): -1, ('B', 'L'): -4, ('Z', 'Y'): -2, ('W', 'N'): -4, ('G', 'A'): 0, ('S', 'P'): -1, ('E', 'Q'): 2, ('C', 'N'): -3, ('H', 'Q'): 0, ('D', 'N'): 1, ('Y', 'C'): -2, ('L', 'H'): -3, ('E', 'C'): -4, ('Z', 'H'): 0, ('H', 'G'): -2, ('P', 'E'): -1, ('Y', 'S'): -2, ('G', 'R'): -2, ('B', 'M'): -3, ('Z', 'Z'): 4, ('W', 'M'): -1, ('Y', 'T'): -2, ('Y', 'P'): -3, ('Y', 'Y'): 7, ('T', 'K'): -1, ('Z', 'I'): -3, ('T', 'P'): -1, ('V', 'L'): 1, ('F', 'I'): 0, ('G', 'Q'): -2, ('L', 'A'): -1, ('M', 'I'): 1} - - -def smooth_insertions(state_vector): - ''' - The function aims to correct to the expected imgt alignment. Renumbering functions then translate from the imgt scheme to the - appropriate scheme. - - Handle insertions made by HMMER that we suspect may be in the wrong position. - Edge cases include: - - Insertions at the C terminal of fw1, fw3 and fw3 regions. Can occur when 'conserved' residues have been mutated and the - same amino acid appears in the the following CDR (e.g. mutate cysteine at 104 but the CDR3 has one or more cysteines) - - Same as above possible (but not observed in structure seqs) for N terminal of fw2, fw3 and fw4... TODO - - Heavily mutated N terminal regions that are partially recognised (e.g. 3gk8 chain H). Insertions should not be allowed - before N terminal deletions have been used. Preserve deletion locations that are not N terminal (e.g. 10 in IMGT H) if - the gap has been placed by the alignment. - - ''' - # Small overhead doing these corrections but worth it for reducing edge cases. - - # Enforce insertion patterns as below. The CDRs are renumbered in each case so that insertions are placed accoring to the scheme -# '11111111111111111111111111222222222222333333333333333334444444444555555555555555555555555555555555555555666666666666677777777777' -# ' mmmi mmmi mmmi ' -# ' mmmi immm mmmi immm mmmi immm ' - - # Enforce any insertions at the end and beginning of framework regions to be moved into the CDR region for renumbering. - enforced_patterns = [ [(25,'m'),(26,'m'),( 27,'m'),( 28,'i')], - [(38,'i'),(38,'m'),(39,'m'),(40,'m')], - [(54,'m'),(55,'m'),(56,'m'),(57,'i')], - [(65,'i'),(65,'m'),(66,'m'),(67,'m')], - [(103,'m'),(104,'m'),(105,'m'),(106,'i')], - [(117,'i'),(117,'m'),(118,'m'),(119,'m')] ] - - # Insertions in FW1 are only allowed if there are a fewer number of n-terminal deletions made. - - state_buffer = [] - sv = [] - for (state_id, state_type ), si in state_vector: - if state_id < 23: # Everything before the cysteine at 23. - state_buffer.append( ((state_id, state_type ), si) ) - reg = -1 - elif 25 <= state_id < 28: # Add to the buffer - state_buffer.append( ((state_id, state_type ), si) ) - reg = 0 - elif 37 < state_id <= 40: # Add to the buffer - state_buffer.append( ((state_id, state_type ), si) ) - reg = 1 - elif 54 <= state_id < 57: # Add to the buffer - state_buffer.append( ((state_id, state_type ), si) ) - reg = 2 - elif 64 < state_id <= 67: # Add to the buffer - state_buffer.append( ((state_id, state_type ), si) ) - reg = 3 - elif 103 <= state_id < 106: # Add to the buffer - state_buffer.append( ((state_id, state_type ), si) ) - reg = 4 - elif 116 < state_id <= 119: # Add to the buffer - state_buffer.append( ((state_id, state_type ), si) ) - reg = 5 - elif len(state_buffer) != 0: # Add the buffer and reset - - # Find the number of insertions in the buffer - nins = sum( 1 for s in state_buffer if s[0][1] == 'i' ) - - # If there are insertions, adjust the alignment - if nins > 0: # We have insertions - - if reg == -1: # FW1, only adjust if there are the same or more N terminal deletions than insertions - nt_dels = state_buffer[0][0][0] - 1 # Missing states - for (_id, _type ), _si in state_buffer: # Explicit deletion states. - if _type == 'd' or _si == None: - nt_dels +=1 - else: # First residue found - break - if nt_dels >= nins: # More n terminal deletions than insertions found. Likely misalignment. - - # Preserve the deleted states structure by using the same match annotations - new_states = [ s for s, _ in state_buffer if s[1] == 'm'] - _first = new_states[0][0] - - # Remove the deletions so that only residue positions are included - state_buffer = [ s for s in state_buffer if s[0][1] != 'd' ] - - # Extend N terminal states backwards from the first match states - _add = len( state_buffer ) - len( new_states ) - assert _add >= 0, 'Implementation logic error' # Should be adding a positive number of positions - new_states = [ (_,'m') for _ in range( _first - _add, _first ) ] + new_states - assert len(new_states)==len(state_buffer), 'Implementation logic error' # Should have the same length - - # Assign them preserving the order of the sequence. - for i in range( len(state_buffer ) ): - sv.append( ( new_states[i], state_buffer[i][1]) ) - else: - sv += state_buffer # The insertions may be incorrect but unknown what to do. Let the alignment place. - else: - # Remove any deletions in the buffer. Unlikely to happen but do anyway - state_buffer = [ s for s in state_buffer if s[0][1] != 'd' ] - - # Define the new states defined by the enforced pattern and the length of the buffer - if reg % 2: # nterm fw - new_states = [enforced_patterns[reg][0]]*max( 0, len(state_buffer)-3) + enforced_patterns[reg][ max( 4-len(state_buffer), 1):] - else: # cterm fw - new_states = enforced_patterns[reg][:3] + [enforced_patterns[reg][2]]*max( 0, len(state_buffer)-3) - # Assign them preserving the order of the sequence. - for i in range( len(state_buffer ) ): - sv.append( ( new_states[i], state_buffer[i][1]) ) - - else: # Nothing to do - either all match or deletion states. - sv += state_buffer - - # Add the current state - sv.append( ((state_id, state_type ), si) ) - - # Reset state buffer - state_buffer = [] - - else: # Simply append - sv.append( ((state_id, state_type ), si) ) - - - return sv - - -# General function to give annotations for regions that have direct mappings onto the hmm alignment (imgt states) -def _number_regions(sequence, state_vector, state_string , region_string, region_index_dict, rels, n_regions, exclude_deletions): - """ - General function to number a sequence and divide it into different regions - - @param sequence: The sequence string - @param state_vector: The list of states from the aligned hmm - @param state_string: A string of states for the scheme relative to IMGT (this is X for a direct equivalence, I if needs to be treated as insertion) - @param region_string: A string of characters that indicate which hmm states are in each regions for this scheme (i.e. how should the sequence be divided up) - @param region_index_dict: A dictionary converting the characters in region string to an index of the regions. - @param rels: The difference of the numbering integer at the *start* of each region - @param n_regions: The number of regions - @param exclude_deletions: A list of region indices for which deletion states should not be included. Typically the CDRs. - These will be reannotated in the scheme function. Also allows the reset of insertions. - - @return: A list of lists where each region has been numbered according to the scheme. Some regions will need renumbering. This should be taken care of after the function called. - - """ - - state_vector = smooth_insertions( state_vector ) - - _regions = [ [] for _ in range(n_regions) ] - - # Initialise the insertion index (-1 is a blank space) and the previous state. - insertion = -1 - previous_state_id = 1 - previous_state_type = 'd' - start_index, end_index = None, None - - region = None - - # Iterate over the aligned state vector - for (state_id, state_type ), si in state_vector: - - # Retrieve the region index - if state_type != "i" or region is None: # BUG_FIX - JD 9/4/15 - do not allow a new region to start as an insertion. - region = region_index_dict[region_string[state_id-1]] - - - # Check the state_types - if state_type == "m": # It is a match - - # Check whether this position is in the scheme as an independent state - if state_string[state_id-1]=="I": # No, it should be treated as an insertion - if previous_state_type != 'd': # Unless there was a deletion beforehand in which case this should be a real pos. - insertion +=1 # Increment the insertion annotation index - rels[region] -= 1 # Update the relative numbering from the imgt states - else: # Yes - insertion = -1 # Reset the insertions - - # Add the numbering annotation to the appropriate region list - _regions[region].append( ( (state_id + rels[region], alphabet[insertion] ), sequence[si] ) ) - previous_state_id = state_id # Record the previous state ID - if start_index is None: - start_index = si - end_index = si - - previous_state_type = state_type - - elif state_type == "i": # It is an insertion - insertion +=1 # Increment the insertion annotation index - - # Add the numbering annotation to the appropriate region list - _regions[region].append( ( (previous_state_id + rels[region], alphabet[insertion]), sequence[si] ) ) - if start_index is None: - start_index = si - end_index = si - - previous_state_type = state_type - - else: # It is a deletion - previous_state_type = state_type - - # Check whether this position is in the scheme as an independent state - if state_string[state_id-1]=="I": # No, therefore irrelevant to the scheme. - rels[region] -= 1 # Update the relative numbering from the imgt states - continue - - insertion = -1 # Reset the insertions - previous_state_id = state_id # Record the previous state ID, should not be needed (no delete to insert state transition) - - - # Reset the inssertion index if necessary and allowed. (Means the insertion code is meaningless and will be reannotated) - if insertion >= 25 and region in exclude_deletions: - insertion = 0 - - assert insertion < 25, "Too many insertions for numbering scheme to handle" # We ran out of letters. - - return _regions, start_index, end_index - - -# Functions to perform the numbering and the corrections for each of the implemented schemes. -# These have been written fairly verbosely so that the template of how to generate a function for a new scheme is more clear. -# They have two stages: Perform the mapping between imgt and the scheme; Renumber those regions that do not map nicely onto imgt (e.g. CDR insertions) - - - -######## -# IMGT # -######## -# - Renumbering of the CDR 1 and 2 regions in IMGT has now been implemented to ensure consistency with the gapping rules of the -# scheme. Previously gaps were defined using the HMM alignment as the underlying model was already based on the IMGT scheme. This -# worked well in original test cases but appears to give inaccurate annotations in a significant number of cases in NGS size -# sequence sets. We therefore now explicitly renumber the CDR 1 and 2 as with all the other schemes. - -def number_imgt(state_vector, sequence): - """ - Apply the IMGT numbering scheme for heavy or light chains - - Rules should be implemented using two strings - the state string and the region string. - - There are 128 states in the HMMs. Treat X as a direct match in IMGT scheme, I is an insertion. (All X's for IMGT) - XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXX - 11111111111111111111111111 222222222222 33333333333333333 4444444444 555555555555555555555555555555555555555 6666666666666 77777777777 - - Regions - (N.B These do not match up with any particular definition of CDR) - 1. All positions before CDR1 - 2. CDR1 positions - 3. Positions between CDR1/2 - 4. CDR2 positions - 5. Positions between CDR2/3 - 6. CDR positions 105 (inc) to 118 (exc) - 7. Positions after CDR3 - - """ - - # Set up the numbering - - # State string - 'X' means the imgt position exists in the scheme. 'I' means that it should be treated as an insertion of the previous number - state_string = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' - - # Region string - regions that should be treated separately in putting the numbering together - region_string = '11111111111111111111111111222222222222333333333333333334444444444555555555555555555555555555555555555555666666666666677777777777' - - region_index_dict = { - "1":0, - "2":1, - "3":2, - "4":3, - "5":4, - "6":5, - "7":6 - } - - # Define how the scheme's numbering differs from IMGT at the start of each region. - # This is updated in the loop below - rels = {0:0, - 1:0, - 2:0, - 3:0, - 4:0, - 5:0, - 6:0, - 7:0 - } - - n_regions = 7 - - exclude_deletions = [1,3,5] - - _regions, startindex, endindex = _number_regions(sequence, state_vector, state_string , region_string, region_index_dict, rels, n_regions, exclude_deletions) - - ############### - # Renumbering # - ############### - - _numbering = [ _regions[0], # Fw1 - [], # CDR1 - _regions[2], # Fw2 - [], # CDR2 - _regions[4], # Fw3 - [], # CDR3 - _regions[6], # Fw4 - - ] - - # The alignment from HMMER should be correct for CDRs 1 and 2. Testing has shown not always the case and 'manual' renumbering - # is required as with the other schemes. - - # CDR1 - # CDR1 has a range from 27 (inc.) to 39 (exc.) and has a theoretical maximum length of 12. - cdr1seq = "".join([ x[1] for x in _regions[1] if x[1] != "-" ]) - cdr1length = len(cdr1seq) - si = 0 - prev_state = 26 - for ann in get_imgt_cdr(cdr1length, 12, 27, 39): - if not ann: - _numbering[1].append( ((prev_state+1, ' '), '-') ) - prev_state += 1 - else: - _numbering[1].append( (ann, cdr1seq[si]) ) - prev_state = ann[0] - si += 1 - - # CDR2 - # CDR2 has a range from 56 (inc.) to 66 (exc.) and has a theoretical length of 10. - cdr2seq = "".join([ x[1] for x in _regions[3] if x[1] != "-" ]) - cdr2length = len(cdr2seq) - si = 0 - prev_state = 55 - for ann in get_imgt_cdr(cdr2length, 10, 56, 66): - if not ann: - _numbering[3].append( ((prev_state+1, ' '), '-') ) - prev_state += 1 - else: - _numbering[3].append( (ann, cdr2seq[si]) ) - prev_state = ann[0] - si += 1 - - # FW3. We allow the HMM to place insertions. Technically all insertion points are taken care of but in reality insertions can - # and do occur. No specification of where the insertions should be placed. - - - # CDR3 - # CDR3 has a range from 105 (inc.) to 118 (exc.). Insertions are placed on 112 and 111 symetrically. IMGT has a technical - # maximum length of 65 (13 positions, 26*2 insertions) . In practice ANARCI will not recognise CDR3s of this length. - cdr3seq = "".join([ x[1] for x in _regions[5] if x[1] != "-" ]) - cdr3length = len(cdr3seq) - if cdr3length > 117: return [], startindex, endindex # Too many insertions. Do not apply numbering. - si = 0 - previous_state_id = 104 - for ann in get_imgt_cdr(cdr3length, 13, 105, 118): - if ann is None: - _numbering[5].append( ((previous_state_id+1, " "), "-" ) ) - previous_state_id+=1 - else: - _numbering[5].append( (ann, cdr3seq[si] ) ) - previous_state_id = ann[0] - si+=1 - - # Return the full vector and the start and end indices of the numbered region of the sequence - return gap_missing( _numbering ), startindex, endindex - -def get_imgt_cdr(length, maxlength, start, end): - """ - Symmetrically number a CDR loop (e.g. CDRL1/CDRH2 for IMGT) - @param length: Define the length of target CDR - @param maxlength: Define the theoretical limit (e.g. L1 = 12 for the IMGT scheme) - @param start, end: Start and end position numbers - """ - annotations = [ None for _ in range(max(length, maxlength)) ] - if length == 0: - return annotations - elif length == 1: - annotations[0] = (start, ' ') - return annotations - - front, back = 0, -1 - #az = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - #za = "ZYXWVUTSRQPONMLKJIHGFEDCBA" - - az = alphabet[:-1] - za = az[::-1] - - for i in range(min(length, maxlength)): - if i % 2: - annotations[back] = (end + back, " ") - back -= 1 - else: - annotations[front] = (start + front, " ") - front += 1 - - # Add insertions around the centre point - centrepoint = [ i for i,v in enumerate(annotations) if v == None ] - if not centrepoint: - return annotations - - centre_left = annotations[min(centrepoint)-1][0] # Get the index right before the first None - centre_right = annotations[max(centrepoint)+1][0] # Get the index right after the first None - - # For cases with an even max length - if not maxlength % 2: - frontfactor, backfactor = maxlength//2, maxlength//2 - # For cases with an odd max length - else: - frontfactor, backfactor = (maxlength//2)+1, maxlength//2 - - for i in range(max(0, length-maxlength)): - if not i % 2: - annotations[back] = (centre_right, za[back + backfactor]) - back -= 1 - else: - annotations[front] = (centre_left, az[front - frontfactor]) - front += 1 - - return annotations - - -####### -# Aho # -####### -# Heuristic regapping based on the AHo specification as detailed on AAAAA website. Gap order depends on the chain type -def number_aho(state_vector, sequence, chain_type): - """ - Apply the Aho numbering scheme - - Rules should be implemented using two strings - the state string and the region string. - - There are 128 states in the HMMs. Treat X as a direct match in IMGT scheme, I is an insertion. (All X's for IMGT) - - XXXXXXX XXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXX - AAAAAAA BBB CCCCCCCCCCCCCC DDDDDDDDDDDDDDDD EEEEEEEEEEEEEEE FFFFFFFFFFFFFFFFFFFF HHHHHHHHHHHHHHHH IIIIIIIIIIIII JJJJJJJJJJJJJ KKKKKKKKKKK - - - Regions - (N.B These do not match up with any particular definition of CDR) - A. EMPTY (now included in B) - B. 1-10 inclusive. Indel occurs at 8 - C. 11-24 inclusive. - D. 25-42 inclusive (deletion surround 28) 32-42 inclusive (deletions surround 36) - E. 43-57 inclusive - F. 58-77 inclusive (deletions surround 63). Alpha chains have deletions at 74,75 - G. EMPTY (now included in H) - H. 78-93 inclusive gaps on 86 then 85, insertions on 85 linearly - I. 94-106 inclusive - J. 107-138 inclusive gaps on 123 symetrically. - K. 139-149 inclusive. - - """ - - # Set up the numbering - - # State string - 'X' means the imgt position exists in the scheme. 'I' means that it should be treated as an insertion of the previous number - state_string = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' - - # Region string - regions that should be treated separately in putting the numbering together - region_string = 'BBBBBBBBBBCCCCCCCCCCCCCCDDDDDDDDDDDDDDDDEEEEEEEEEEEEEEEFFFFFFFFFFFFFFFFFFFFHHHHHHHHHHHHHHHHIIIIIIIIIIIIIJJJJJJJJJJJJJKKKKKKKKKKK' -# 1 2 3 4 5 7 8 9 10 - - - region_index_dict = dict( list(zip( "ABCDEFGHIJK", list(range(11)) )) ) - - # Define how the scheme's numbering differs from IMGT at the start of each region. - # This is updated in the loop below - rels = {0:0, - 1:0, - 2:0, - 3:0, - 4:2, - 5:2, - 6:2, - 7:2, - 8:2, - 9:2, - 10:21} - - n_regions = 11 - - exclude_deletions = [1,3,4,5,7,9] - - _regions, startindex, endindex = _number_regions(sequence, state_vector, state_string , region_string, region_index_dict, rels, n_regions, exclude_deletions) - - ############### - # Renumbering # - ############### - - _numbering = [ _regions[0], _regions[1], _regions[2],[], _regions[4], [], _regions[6], [], _regions[8],_regions[9],_regions[10] ] - - ################################## - # Move the indel in fw 1 onto 8 # - ################################## - - # Place indels on 8 - # Find the first recognised residue and change the expected length of the stretch given the starting point. - # This prevents n terminal deletions being placed at 8 incorrectly. - length = len( _regions[1] ) - if length > 0: - start = _regions[1][0][0][0] - stretch_len = 10 - (start -1) - if length > stretch_len: # Insertions are present. Place on 8 - annotations = [ (_," ") for _ in range(start,9) ] + [ (8,alphabet[_]) for _ in range( length - stretch_len ) ] + [(9," "),(10," ")] - else: - ordered_deletions = [(8," ")] + [(_," ") for _ in range(start, 11) if _ != 8] - annotations = sorted( ordered_deletions[max(stretch_len-length, 0):] ) - _numbering[1] = [ (annotations[i], _regions[1][i][1]) for i in range(length) ] - - ######### - # CDR 1 # - divided in two parts in the Aho scheme. - ######### - gaps at 28 depending on the chain type. - - # "VH domains, as well as the majority of the VA domains, have a one-residue gap in position 28, VK and VB domains a two-residue - # gap in position 27 and 28." - - # We use the link below as the reference for the scheme. - # https://www.bioc.uzh.ch/plueckthun/antibody/Numbering/Alignment.html - - # Some of the header lines in these images are offset by one (VH)! The gaps really are centered at 28 and 36 - # https://www.bioc.uzh.ch/plueckthun/antibody/Sequences/Rearranged/PDB_VK.html - # https://www.bioc.uzh.ch/plueckthun/antibody/Sequences/Rearranged/PDB_VL.html - # https://www.bioc.uzh.ch/plueckthun/antibody/Sequences/Rearranged/PDB_VH.html - # https://www.bioc.uzh.ch/plueckthun/antibody/Sequences/Rearranged/PDB_VA.html - # https://www.bioc.uzh.ch/plueckthun/antibody/Sequences/Rearranged/PDB_VB.html - # https://www.bioc.uzh.ch/plueckthun/antibody/Sequences/Rearranged/PDB_VG.html - # https://www.bioc.uzh.ch/plueckthun/antibody/Sequences/Rearranged/PDB_VD.html - - # We gap the CDR1 in a heuristic way using the gaps. - # This means that CDR1 gapping will not always be correct. For example if one grafts a Kappa CDR1 loop onto a Lambda framework - # the gapping patter might now be incorrect. - # Not a fan of being so prescriptive. - - # The CDR1 region included here ranges from AHo 25 to AHo 42 inclusive - - # The order in which the two loops are gapped is dependent on the chain type (see alignments in URLs above). - # Not all lengths are defined as not all lengths were crystallised in 2001 (or today). Where no example of the length was - # available the rule followed is to continue gapping the C terminal 'loop', then the N terminal 'loop', then 31 then the fw. - # In all cases I have commented where the gapping is undefined. Note that for alpha chains the gapping rules are inconsistent. - - _L = 28,36,35,37,34,38,27,29,33,39,32,40,26,30,25,31,41,42 - # |-> undefined by AHo. Gapping C terminal loop then N terminal then 31, then fw. - _K = 28,27,36,35,37,34,38,33,39,32,40,29,26,30,25,31,41,42 - # |-> undefined by AHo. Gapping C terminal loop then N terminal then fw. - _H = 28,36,35,37,34,38,27,33,39,32,40,29,26,30,25,31,41,42 - # |-> undefined by AHo. Gapping C terminal loop then N terminal then fw. - # N.B. The header on the alignment image for PDB_VH is offset by 1! - _A = 28,36,35,37,34,38,33,39,27,32,40,29,26,30,25,31,41,42 - # |-> undefined by AHo. Gapping C terminal loop then N terminal then fw. - # N.B The gapping is inconsistent for alpha chains. I follow the paper's statement that most VA have - # one gap at 28 and remove 28 and 27 before removing 40. - _B = 28,36,35,37,34,38,33,39,27,32,40,29,26,30,25,31,41,42 - # |-> undefined by AHo. Gapping C terminal loop then N terminal then 31, then fw. - _D = 28,36,35,37,34,38,27,33,39,32,40,29,26,30,25,31,41,42 - # |-> undefined by AHo. Gapping C terminal loop then N terminal then 31, then fw. - # N.B only two sequence patterns available. - _G = 28,36,35,37,34,38,27,33,39,32,40,29,26,30,25,31,41,42 - # |-> undefined by AHo. Gapping C terminal loop then N terminal then 31, then fw. - # N.B only one sequence patterns available. Delta copied. - - ordered_deletions = { 'L':_L,'K':_K, 'H':_H, 'A':_A, 'B':_B, 'D':_D, 'G':_G } - - length = len( _regions[3] ) - - annotations = [ (i, ' ') for i in sorted( ordered_deletions[chain_type][ max(18-length, 0): ] ) ] - - # Insertions are not described in the AHo scheme but must be included as there is a significant number of CDRH1s that are - # longer than the number of positions. - insertions = max( length-18 , 0 ) - if insertions > 26: - return [], startindex, endindex # Too many insertions. Do not apply numbering. - elif insertions > 0: - # They are placed on residue 36 alphabetically. - insertat = annotations.index( (36, ' ') )+1 # Always 12 - assert insertat == 12, 'AHo numbering failed' - annotations = annotations[:insertat] + [ (36, alphabet[a]) for a in range( insertions ) ] + annotations[insertat:] - - _numbering[3] = [ (annotations[i], _regions[3][i][1]) for i in range(length) ] - - ######### - # CDR 2 # - ######### - # Gaps are placed symetically at 63. - # For VA a second gap is placed at 74 and 75 according to the text in the paper. However, all the reference sequences show a - # gap at 73 and 74 see: - # https://www.bioc.uzh.ch/plueckthun/antibody/Sequences/Rearranged/PDB_VA.html - # and - # https://www.bioc.uzh.ch/plueckthun/antibody/Numbering/Alignment.html - # Either I am mis-interpreting the text in the paper or there is something a little inconsistent here... - # Given that *all* the numbered examples show the VA gap at 73 and 74 on the AAAAA website I have decided to implement this. - # - - # This region describes 58 to 77 inclusive - - if chain_type == 'A': - ordered_deletions = [74,73,63,62,64,61,65,60,66,59,67,58,68,69,70,71,72,75,76,77] - else: - ordered_deletions = [63,62,64,61,65,60,66,59,67,58,68,69,70,71,72,73,74,75,76,77] - - length = len(_regions[5]) - - annotations = [ (i, ' ') for i in sorted( ordered_deletions[ max(20-length, 0): ] ) ] - - # Insertions are not described in the AHo scheme but must be included. - insertions = max( length-20 , 0 ) - if insertions > 26: - return [], startindex, endindex # Too many insertions. Do not apply numbering. - elif insertions > 0: - # They are placed on residue 63 alphabetically. - insertat = annotations.index( (63, ' ') )+1 # Always 6 - assert insertat == 6, 'AHo numbering failed' - annotations = annotations[:insertat] + [ (63, alphabet[a]) for a in range( insertions ) ] + annotations[insertat:] - - _numbering[5] = [ (annotations[i], _regions[5][i][1]) for i in range(length) ] - - ######### - # FW3 ############################################ - # Move deletions onto 86 then 85. Insertions on 85 # - #################################################### - ordered_deletions = [86,85,87,84,88,83,89,82,90,81,91,80,92,79,93,78] - length=len( _regions[7] ) - - annotations = [ (i, ' ') for i in sorted( ordered_deletions[ max(16-length, 0): ] ) ] - - # Insertions are not described in the AHo scheme but must be included. - insertions = max( length-16 , 0 ) - if insertions > 26: - return [], startindex, endindex # Too many insertions. Do not apply numbering. - elif insertions > 0: - # They are placed on residue 85 alphabetically. - insertat = annotations.index( (85, ' ') )+1 # Always 8 - assert insertat == 8, 'AHo numbering failed' - annotations = annotations[:insertat] + [ (85, alphabet[a]) for a in range( insertions ) ] + annotations[insertat:] - - _numbering[7] = [ (annotations[i], _regions[7][i][1]) for i in range(length) ] - - - ######### - # CDR 3 # - ######### - # Deletions on 123. - # Point of the Aho scheme is that they have accounted for all possible positions. - # Assumption is that no more insertions will occur.... - # We'll put insertions on 123 linearly.(i.e.ABCDEF...) if they ever do. - - ordered_deletions = [123,124,122,125,121,126,120,127,119,128,118,129,117,130,116,131,115,132,114,133,113,134,112,135,111, - 136,110,137,109,138,108,107] - - length=len( _regions[9] ) - - annotations = [ (i, ' ') for i in sorted( ordered_deletions[ max(32-length, 0): ] ) ] - - # Insertions are not described in the AHo scheme but must be included. - insertions = max( length-32 , 0 ) - if insertions > 26: - return [], startindex, endindex # Too many insertions. Do not apply numbering. - elif insertions > 0: - # They are placed on residue 123 alphabetically. - insertat = annotations.index( (123, ' ') )+1 # Always 17 - assert insertat == 17, 'AHo numbering failed' - annotations = annotations[:insertat] + [ (123, alphabet[a]) for a in range( insertions ) ] + annotations[insertat:] - - _numbering[9] = [ (annotations[i], _regions[9][i][1]) for i in range(length) ] - - # AHo includes one extra position than IMGT in what it considers the variable domain for light chains. - #If the last state is 148 and there is at least one more residue left, then add the residue to the numbering. - numbering = gap_missing( _numbering ) - if len(numbering) > 0: - if numbering[-1][0] == (148, ' ') and numbering[-1][1] != '-' and endindex+1 < len(sequence): - numbering.append( ( (149, ' '), sequence[endindex+1]) ) - endindex +=1 - - return numbering, startindex, endindex - - -########### -# Chothia # -########### - -# Heavy chains -def number_chothia_heavy(state_vector, sequence): - """ - Apply the Chothia numbering scheme for heavy chains - - Rules should be implemented using two strings - the state string and the region string. - - There are 128 states in the HMMs. Treat X as a direct match in Chothia scheme, I is an insertion. - - XXXXXXXXXI XXXXXXXXXXXXX XXXXXXXIIIIXX XXXXXXXXXXXXXXXXXX XXXIXIIXXXX XXXXXXXIXXXXXXXXXXXXXXXXXXIIIXXXXXXXXXX XXXXXXXXIIIXX XXXXXXXXXXX' - 1111111111 2222222222222 3333333333333 444444444444444444 55555555555 666666666666666666666666666666666666666 7777777777777 88888888888' - - Regions - (N.B These do not match up with any particular definition of CDR) - 1 - Put the insertions at Chothia position 6 - 2 - Simple mapping (treat "I" states as inserts and not own match states) - 3 - CDRH1 - 30 (inc) to 34 (exc) put insertions on 31 - 4 - Simple mapping (treat "I" states as inserts and not own match states) - 5 - CDRH2 - 52 (inc) 58 (exc) put insertions on 52 - 6 - Simple mapping (treat "I" states as inserts and not own match states) - 7 - CDRH3 93 (inc) to 103 (exc) put insertion on 100 - 8 - Simple mapping (treat "I" states as inserts and not own match states) - - - Regions 1,3,5 and 7 are renumbered - - """ - - # State string - 'X' means the imgt position exists in the scheme. 'I' means that it should be treated as an insertion of the previous number - state_string = 'XXXXXXXXXIXXXXXXXXXXXXXXXXXXXXIIIIXXXXXXXXXXXXXXXXXXXXXXXIXIIXXXXXXXXXXXIXXXXXXXXXXXXXXXXXXIIIXXXXXXXXXXXXXXXXXXIIIXXXXXXXXXXXXX' - - # Region string - regions that should be treated separately in putting the numbering together - region_string = '11111111112222222222222333333333333333444444444444444455555555555666666666666666666666666666666666666666777777777777788888888888' - - region_index_dict = {"1":0,"2":1,"3":2,"4":3,"5":4,"6":5,"7":6,"8":7} - - # Define how the scheme's numbering differs from IMGT at the start of each region. - # This is updated in the loop below - rels = {0:0, - 1:-1, - 2:-1, - 3:-5, - 4:-5, - 5:-8, - 6:-12, - 7:-15} - - n_regions = 8 - - exclude_deletions = [0,2,4,6] # Don't put deletions in these regions - - _regions, startindex, endindex = _number_regions(sequence, state_vector, state_string , region_string, region_index_dict, rels, n_regions, exclude_deletions) - - - ############### - # Renumbering # - ############### - - _numbering = [ [], _regions[1] , [], _regions[3] , [], _regions[5], [], _regions[7] ] - - # Chothia H region 1 (index 0) - # Insertions are placed at Chothia position 6. - # Count how many we recognised as insertion by the hmm - insertions = len( [ 1 for _ in _regions[0] if _[0][1] != " " ] ) - # We will place all insertion in this region at Chothia position 6. - if insertions: - start = _regions[0][0][0][0] # The starting Chothia number as found by the HMM (could easily start from 2 for example) - # I have a feeling this may be a source of a bug in very unusual cases. Can't break for now. Will catch mistakes in a validate function. - length = len( _regions[0] ) - annotations = [ (_, " ") for _ in range(start, 7) ] + [ (6, alphabet[_]) for _ in range(insertions) ] + [(7," "),(8," "),(9," ")] - _numbering[0] = [ (annotations[i], _regions[0][i][1]) for i in range(length) ] - else: - _numbering[0] = _regions[0] - - - # CDR1 - # Chothia H region 3 (index 2) - # put insertions onto 31 - length = len( _regions[2] ) - insertions = max(length - 11, 0) # Pulled back to the cysteine as heavily engineered cdr1's are not playing nicely - - if insertions: - annotations = [(_, " ") for _ in range(23,32)] + [(31, alphabet[i]) for i in range(insertions) ] + [(32," "),(33," ")] - else: - annotations = [(_, " ") for _ in range(23,32)][:length-2] + [(32," "),(33," ")][:length] - - _numbering[2] = [ (annotations[i], _regions[2][i][1]) for i in range(length) ] - - # CDR2 - # Chothia H region 5 (index 4) - # put insertions onto 52 - length = len( _regions[4] ) - # 50 to 57 inclusive - insertions = max(length - 8, 0) # Eight positions can be accounted for, the remainder are insertions - # Delete in the order, 52, 51, 50,53, 54 ,55, 56, 57 - annotations = [(50, " "),(51, " "), (52, " ")][:max(0,length-5)] - annotations += [(52, alphabet[i]) for i in range(insertions) ] - annotations += [(53, " "),(54, " "),(55, " "),(56, " "),(57, " ")][ abs( min(0,length-5) ):] - _numbering[4] = [ (annotations[i], _regions[4][i][1]) for i in range(length) ] - - # FW3 - insertions are annotated on 82. The first three are normal positions and annotated automatically. - # Additional insertions do not occur with the kabat or the chothia numbering scheme. - # It does not make sense to place more than A, B, C on 82 as Martin and AHo work show that this is not a place that accepts - # additional insertions. - # The decision here is to allow the alignment to place additional insertions. This is in contrast to Martin where the region - # is renumbered to place insertions on 72. - - # CDR3 - # Chothia H region 7 (index 6) - # put insertions onto 100 - length = len( _regions[6] ) - if length > 36: return [], startindex, endindex # Too many insertions. Do not apply numbering. - annotations = get_cdr3_annotations(length, scheme="chothia", chain_type="heavy") - _numbering[6] = [ (annotations[i], _regions[6][i][1]) for i in range(length) ] - - # Return the full vector and the start and end indices of the numbered region of the sequence - return gap_missing( _numbering ), startindex, endindex - -# Light chains -def number_chothia_light(state_vector, sequence): - """ - Apply the Chothia numbering scheme for light chains - - Rules should be implemented using two strings - the state string and the region string. - - There are 128 states in the HMMs. Treat X as a direct match in Chothia scheme, I is an insertion. - XXXXXXXXXXXXXXXXXXXXXXXXXXXXX IIIIIIX XXXXXXXXXXXXXXXXXXXX XIIIIIIIXXX XXXXXIXXXXXXXIIXXXXXXXXXXXXXXXXXXXXXX XXXXXIIIIXX XXXXXXXXXXXXX - 11111111111111111111111111111 2222222 33333333333333333333 44444444444 5555555555555555555555555555555555555 66666666666 7777777777777 - - - Regions - (N.B These do not match up with any particular definition of CDR) - 1 - Simple mapping (treat "I" states as inserts and not own match states) - 2 - CDRL1 - 24 (inc) to 35 (exc) put insertions on 30 - 3 - Simple mapping (treat "I" states as inserts and not own match states) - 4 - CDRL2 - 51 (inc) 55 (exc) put insertions on 52 - 5 - Simple mapping (treat "I" states as inserts and not own match states) - 6 - CDRL3 89 (inc) to 98 (exc) put insertion on 95 - 7 - Simple mapping (treat "I" states as inserts and not own match states) - - Region 2, 3 and 5 are renumbered - - """ - - # Set up the numbering - - # State string - 'X' means the imgt position exists in the scheme. 'I' means that it should be treated as an insertion of the previous number - state_string = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXIIIIIIXXXXXXXXXXXXXXXXXXXXXXIIIIIIIXXXXXXXXIXXXXXXXIIXXXXXXXXXXXXXXXXXXXXXXXXXXXIIIIXXXXXXXXXXXXXXX' - - # Region string - regions that should be treated separately in putting the numbering together - region_string = '11111111111111111111111222222222222222223333333333333333444444444445555555555555555555555555555555555555666666666666677777777777' - - region_index_dict = {"1":0,"2":1,"3":2,"4":3,"5":4,"6":5,"7":6} - - # Define how the scheme's numbering differs from IMGT at the start of each region. - # This is updated in the loop below - rels = {0:0, - 1: 0, - 2:-6, - 3:-6, - 4:-13, - 5:-16, - 6:-20, - } - - - n_regions = 7 - - exclude_deletions = [1,3,4,5] - - _regions, startindex, endindex = _number_regions(sequence, state_vector, state_string , region_string, region_index_dict, rels, n_regions, exclude_deletions) - - _numbering = [ _regions[0], [], _regions[2], [], _regions[4], [], _regions[6] ] - - - ############### - # Renumbering # - ############### - - # CDR1 - # Chothia L region 2 (index 1) - # put insertions onto 30 - length = len( _regions[1] ) - insertions = max(length - 11, 0) # Eleven positions can be accounted for, the remainder are insertions - # Delete forward from 31 - annotations = [(24, " "),(25, " "), (26, " "), (27, " "), (28, " "),(29, " "),(30, " ")][:max(0,length)] - annotations += [(30, alphabet[i]) for i in range(insertions) ] - annotations += [(31, " "),(32, " "),(33, " "),(34, " ")][ abs( min(0,length-11) ):] - _numbering[1] = [ (annotations[i], _regions[1][i][1]) for i in range(length) ] - - - # CDR2 - # Chothia L region 4 (index 3) - # put insertions onto 52. - length = len( _regions[3] ) - insertions = max( length - 4, 0 ) - if insertions > 0: - annotations = [(51, " "),(52, " ")] + [(52, alphabet[i]) for i in range(insertions) ] + [(53, " "),(54, " ")] - _numbering[3] = [ (annotations[i], _regions[3][i][1]) for i in range(length) ] - else: # How to gap L2 in Chothia/Kabat/Martin is unclear so we let the alignment do it. - _numbering[3] = _regions[3] - - # FW3 - # Insertions on 68. First deletion 68. Otherwise default to alignment - length = len( _regions[4] ) - insertions = max(length - 34, 0) - if insertions > 0: # Insertions on 68 - annotations = [(i," ") for i in range(55,69)]+[(68, alphabet[i]) for i in range(insertions) ]+[(i," ") for i in range(69,89)] - _numbering[4] = [ (annotations[i], _regions[4][i][1]) for i in range(length) ] - elif length == 33: # First deletion on 68 - annotations = [(i," ") for i in range(55,68)]+[(i," ") for i in range(69,89)] - _numbering[4] = [ (annotations[i], _regions[4][i][1]) for i in range(length) ] - else: # More deletions - allow alignment to place them - _numbering[4] = _regions[4] - - - # CDR3 - # Chothia L region 6 (index 5) - # put insertions onto 95 - length = len( _regions[5] ) - - if length > 35: return [], startindex, endindex # Too many insertions. Do not apply numbering. - annotations = get_cdr3_annotations(length, scheme="chothia", chain_type="light") - _numbering[5] = [ (annotations[i], _regions[5][i][1]) for i in range(length) ] - - # Return the full vector and the start and end indices of the numbered region of the sequence - - return gap_missing( _numbering ), startindex, endindex - - -######### -# Kabat # -######### - -# Heavy chains -def number_kabat_heavy(state_vector, sequence): - """ - Apply the Kabat numbering scheme for heavy chains - - Rules should be implemented using two strings - the state string and the region string. - - There are 128 states in the HMMs. Treat X as a direct match in Kabat scheme, I is an insertion. - XXXXXXXXXI XXXXXXXXXXXXXXXXXXXX IIIIXXXXXX XXXXXXXXXXXXXXXX XIXII XXXXXXXXXXXIXXXXXXXXXXXXXXXXXXIIIXXXXXXXXXXXX XXXXXXIII XXXXXXXXXXXXX - 1111111111 22222222222222222222 3333333333 4444444444444444 55555 666666666666666666666666666666666666666666666 777777777 8888888888888 - - - Regions - (N.B These do not match up with any particular definition of CDR) - 1 - Put the insertions at Chothia position 6 - 2 - Simple mapping (treat "I" states as inserts and not own match states) - 3 - CDRH1 - 30 (inc) to 36 (exc) put insertions on 35 - 4 - Simple mapping (treat "I" states as inserts and not own match states) - 5 - CDRH2 - 52 (inc) 58 (exc) put insertions on 52 - 6 - Simple mapping (treat "I" states as inserts and not own match states) - 7 - CDRH3 93 (inc) to 103 (exc) put insertion on 100 - 8 - Simple mapping (treat "I" states as inserts and not own match states) - - """ - - # Set up the numbering - - # State string - 'X' means the imgt position exists in the scheme. 'I' means that it should be treated as an insertion of the previous number - state_string = 'XXXXXXXXXIXXXXXXXXXXXXXXXXXXXXIIIIXXXXXXXXXXXXXXXXXXXXXXXIXIIXXXXXXXXXXXIXXXXXXXXXXXXXXXXXXIIIXXXXXXXXXXXXXXXXXXIIIXXXXXXXXXXXXX' - - # Region string - regions that should be treated separately in putting the numbering together - region_string = '11111111112222222222222333333333333333334444444444444455555555555666666666666666666666666666666666666666777777777777788888888888' - - region_index_dict = {"1":0,"2":1,"3":2,"4":3,"5":4,"6":5,"7":6,"8":7} - - # Define how the scheme's numbering differs from IMGT at the start of each region. - # This is updated in the loop below - rels = {0:0, - 1:-1, - 2:-1, - 3:-5, - 4:-5, - 5:-8, - 6:-12, - 7:-15} - - n_regions = 8 - - exclude_deletions = [2,4,6] - - _regions, startindex, endindex = _number_regions(sequence, state_vector, state_string , region_string, region_index_dict, rels, n_regions, exclude_deletions) - - - ############### - # Renumbering # - ############### - - # Renumbering required for 0, 2, 4, 6 regions in Chothia heavy - - _numbering = [ [], _regions[1] , [], _regions[3] , [], _regions[5], [], _regions[7] ] - - - # Kabat H region 1 (index 0) - # Insertions are placed at Kabat position 6. - # Count how many we recognised as insertion by the hmm - insertions = len( [ 1 for _ in _regions[0] if _[0][1] != " " ] ) - # We will place all insertion in this region at Kabat position 6. - if insertions: - start = _regions[0][0][0][0] # The starting Kabat number as found by the HMM (could easily start from 2 for example) - # I have a feeling this may be a source of a bug in very unusual cases. Can't break for now. Will catch mistakes in a validate function. - length = len( _regions[0] ) - annotations = [ (_, " ") for _ in range(start, 7) ] + [ (6, alphabet[_]) for _ in range(insertions) ] + [(7," "),(8," "),(9," ")] - _numbering[0] = [ (annotations[i], _regions[0][i][1]) for i in range(length) ] - else: - _numbering[0] = _regions[0] - - - # CDR1 - # Kabat H region 3 (index 2) - # Put insertions onto 35. Delete from 35 backwards - length = len( _regions[2] ) - insertions = max(0,length - 13) - annotations = [(_,' ') for _ in range(23, 36)][:length] - annotations += [(35, alphabet[i]) for i in range(insertions) ] - _numbering[2] = [ (annotations[i], _regions[2][i][1]) for i in range(length) ] - - # CDR2 - # Chothia H region 5 (index 4) - # put insertions onto 52 - length = len( _regions[4] ) - # 50 to 57 inclusive - insertions = max(length - 8, 0) # Eight positions can be accounted for, the remainder are insertions - # Delete in the order, 52, 51, 50,53, 54 ,55, 56, 57 - annotations = [(50, " "),(51, " "), (52, " ")][:max(0,length-5)] - annotations += [(52, alphabet[i]) for i in range(insertions) ] - annotations += [(53, " "),(54, " "),(55, " "),(56, " "),(57, " ")][ abs( min(0,length-5) ):] - _numbering[4] = [ (annotations[i], _regions[4][i][1]) for i in range(length) ] - - # FW3 - insertions are annotated on 82. The first three are normal positions and annotated automatically. - # Additional insertions do not occur with the kabat or the chothia numbering scheme. - # It does not make sense to place more than A, B, C on 82 as Martin and AHo work show that this is not a place that accepts - # additional insertions. - # The decision here is to allow the alignment to place additional insertions. This is in contrast to Martin where the region - # is renumbered to place insertions on 72. - - # CDR3 - # Chothia H region 7 (index 6) - # put insertions onto 100 - length = len( _regions[6] ) - if length > 36: return [], startindex, endindex # Too many insertions. Do not apply numbering. - annotations = get_cdr3_annotations(length, scheme="kabat", chain_type="heavy") # Chothia and Kabat the same here - _numbering[6] = [ (annotations[i], _regions[6][i][1]) for i in range(length) ] - - # Return the full vector and the start and end indices of the numbered region of the sequence - return gap_missing( _numbering ), startindex, endindex - -# Light chains -def number_kabat_light(state_vector, sequence): - """ - Apply the Kabat numbering scheme for light chains - - Rules should be implemented using two strings - the state string and the region string. - - There are 128 states in the HMMs. Treat X as a direct match in Kabat scheme, I is an insertion. - XXXXXXXXXXXXXXXXXXXXXXXXXXXXX IIIIIIX XXXXXXXXXXXXXXXXXXXX XIIIIIIIXXX XXXXXIXXXXXXXIIXXXXXXXXXXXXXXXXXXXXXX XXXXXIIIIXX XXXXXXXXXXXXX - 11111111111111111111111111111 2222222 33333333333333333333 44444444444 5555555555555555555555555555555555555 66666666666 7777777777777 - - - Regions - (N.B These do not match up with any particular definition of CDR) - 1 - Simple mapping (treat "I" states as inserts and not own match states) - 2 - CDRL1 - 24 (inc) to 35 (exc) put insertions on 27 - 3 - Simple mapping (treat "I" states as inserts and not own match states) - 4 - CDRL2 - 51 (inc) 55 (exc) put insertions on 52 - 5 - Simple mapping (treat "I" states as inserts and not own match states) - 6 - CDRL3 89 (inc) to 96 (exc) put insertion on 95 - 7 - Simple mapping (treat "I" states as inserts and not own match states) - - """ - - # Set up the numbering - - - # State string - 'X' means the imgt position exists in the scheme. 'I' means that it should be treated as an insertion of the previous number - state_string = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXIIIIIIXXXXXXXXXXXXXXXXXXXXXXIIIIIIIXXXXXXXXIXXXXXXXIIXXXXXXXXXXXXXXXXXXXXXXXXXXXIIIIXXXXXXXXXXXXXXX' - - # Region string - regions that should be treated separately in putting the numbering together - region_string = '11111111111111111111111222222222222222223333333333333333444444444445555555555555555555555555555555555555666666666666677777777777' - - region_index_dict = {"1":0,"2":1,"3":2,"4":3,"5":4,"6":5,"7":6} - - # Define how the scheme's numbering differs from IMGT at the start of each region. - # This is updated in the loop below - rels = {0:0, - 1: 0, - 2:-6, - 3:-6, - 4:-13, - 5:-16, - 6:-20, - } - - n_regions = 7 - - exclude_deletions = [1,3,5] - - _regions, startindex, endindex = _number_regions(sequence, state_vector, state_string , region_string, region_index_dict, rels, n_regions, exclude_deletions) - - _numbering = [ _regions[0], [], _regions[2], [], _regions[4], [], _regions[6] ] - - - ############### - # Renumbering # - ############### - - # CDR1 - # Kabat L region 2 (index 1) - # put insertions onto 27 - length = len( _regions[1] ) - insertions = max(length - 11, 0) # Eleven positions can be accounted for, the remainder are insertions - # Delete forward from 28 - annotations = [(24, " "),(25, " "), (26, " "), (27, " ")][:max(0,length)] - annotations += [(27, alphabet[i]) for i in range(insertions) ] - annotations += [(28, " "),(29, " "),(30, " "),(31, " "),(32, " "),(33, " "),(34, " ")][ abs( min(0,length-11) ):] - _numbering[1] = [ (annotations[i], _regions[1][i][1]) for i in range(length) ] - - # CDR2 - # Chothia L region 4 (index 3) - # put insertions onto 52. - length = len( _regions[3] ) - insertions = max( length - 4, 0 ) - if insertions > 0: - annotations = [(51, " "),(52, " ")] + [(52, alphabet[i]) for i in range(insertions) ] + [(53, " "),(54, " ")] - _numbering[3] = [ (annotations[i], _regions[3][i][1]) for i in range(length) ] - else: # How to gap L2 in Chothia/Kabat/Martin is unclear so we let the alignment do it. - _numbering[3] = _regions[3] - - - # FW3 - # All insertions are placed by alignment. This is in contrast to Martin (and Chothia) where they are placed on 68. - # The kabat scheme was defined using a sequence alignment alone. In keeping with this, insertions in FW3 are also only placed - # with respect to the sequence alignment (the HMM). - - # CDR3 - # Chothia L region 6 (index 5) - # put insertions onto 95 - length = len( _regions[5] ) - - if length > 35: return [], startindex, endindex # Too many insertions. Do not apply numbering. - annotations = get_cdr3_annotations(length, scheme="kabat", chain_type="light") - _numbering[5] = [ (annotations[i], _regions[5][i][1]) for i in range(length) ] - - return gap_missing( _numbering ), startindex, endindex - - - - -############################# -# Martin (extended Chothia) # -############################# - -# Heavy chains -def number_martin_heavy(state_vector, sequence): - """ - Apply the Martin (extended Chothia) numbering scheme for heavy chains - - Rules should be implemented using two strings - the state string and the region string. - - There are 128 states in the HMMs. Treat X as a direct match in Martin scheme, I is an insertion. - XXXXXXXXXI XXXXXXXXXXXXXXXXXXXX IIIIXX XXXXXXXXXXXXXXXXXXXX XIXII XXXXXXXXXXXIXXXXXXXXIIIXXXXXXXXXXXXXXXXXXXXXX XXXXXXIII XXXXXXXXXXXXX - 1111111111 22222222222222222222 333333 44444444444444444444 55555 666666666666666666666666666666666666666666666 777777777 8888888888888 - - - Regions - (N.B These do not match up with any particular definition of CDR) - 1 - Put the insertions at Chothia position 8 - 2 - Simple mapping (treat "I" states as inserts and not own match states) - 3 - CDRH1 - 30 (inc) to 34 (exc) put insertions on 31 - 4 - Simple mapping (treat "I" states as inserts and not own match states) - 5 - CDRH2 - 52 (inc) 58 (exc) put insertions on 52 - 6 - Simple mapping (treat "I" states as inserts and not own match states) - 7 - CDRH3 93 (inc) to 103 (exc) put insertion on 100 - 8 - Simple mapping (treat "I" states as inserts and not own match states) - - - Regions 1,3,5 and 7 are renumbered - - """ - - # Set up the numbering - - - # State string - 'X' means the imgt position exists in the scheme. 'I' means that it should be treated as an insertion of the previous number - state_string = 'XXXXXXXXXIXXXXXXXXXXXXXXXXXXXXIIIIXXXXXXXXXXXXXXXXXXXXXXXIXIIXXXXXXXXXXXIXXXXXXXXIIIXXXXXXXXXXXXXXXXXXXXXXXXXXXXIIIXXXXXXXXXXXXX' - - # Region string - regions that should be treated separately in putting the numbering together - region_string = '11111111112222222222222333333333333333444444444444444455555555555666666666666666666666666666666666666666777777777777788888888888' - - region_index_dict = {"1":0,"2":1,"3":2,"4":3,"5":4,"6":5,"7":6,"8":7} - - # Define how the scheme's numbering differs from IMGT at the start of each region. - # This is updated in the loop below - rels = {0:0, - 1:-1, - 2:-1, - 3:-5, - 4:-5, - 5:-8, - 6:-12, - 7:-15} - - n_regions = 8 - - exclude_deletions = [2,4,5,6] - - _regions, startindex, endindex = _number_regions(sequence, state_vector, state_string , region_string, region_index_dict, rels, n_regions, exclude_deletions) - - - ############### - # Renumbering # - ############### - - # Renumbering required for 0, 2, 4, 6 regions in Chothia heavy - - _numbering = [ [], _regions[1] , [], _regions[3] , [], _regions[5], [], _regions[7] ] - - # Chothia H region 1 (index 0) - # Insertions are placed at Chothia position 8. - # Count how many we recognised as insertion by the hmm - insertions = len( [ 1 for _ in _regions[0] if _[0][1] != " " ] ) - # We will place all insertion in this region at Chothia position 8. - if insertions: - start = _regions[0][0][0][0] # The starting Chothia number as found by the HMM (could easily start from 2 for example) - # I have a feeling this may be a source of a bug in very unusual cases. Can't break for now. Will catch mistakes in a validate function. - length = len( _regions[0] ) - annotations = [ (_, " ") for _ in range(start, 9) ] + [ (8, alphabet[_]) for _ in range(insertions) ] + [(9," ")] - _numbering[0] = [ (annotations[i], _regions[0][i][1]) for i in range(length) ] - else: - _numbering[0] = _regions[0] - - - # CDR1 - # Chothia H region 3 (index 2) - # put insertions onto 31 - length = len( _regions[2] ) - insertions = max(length - 11, 0) # Pulled back to the cysteine as heavily engineered cdr1's are not playing nicely - if insertions: - annotations = [(_, " ") for _ in range(23,32)] + [(31, alphabet[i]) for i in range(insertions) ] + [(32," "),(33," ")] - else: - annotations = [(_, " ") for _ in range(23,32)][:length-2] + [(32," "),(33," ")][:length] - _numbering[2] = [ (annotations[i], _regions[2][i][1]) for i in range(length) ] - - # CDR2 - # Chothia H region 5 (index 4) - # put insertions onto 52 - length = len( _regions[4] ) - # 50 to 57 inclusive - insertions = max(length - 8, 0) # Eight positions can be accounted for, the remainder are insertions - # Delete in the order, 52, 51, 50,53, 54 ,55, 56, 57 - annotations = [(50, " "),(51, " "), (52, " ")][:max(0,length-5)] - annotations += [(52, alphabet[i]) for i in range(insertions) ] - annotations += [(53, " "),(54, " "),(55, " "),(56, " "),(57, " ")][ abs( min(0,length-5) ):] - _numbering[4] = [ (annotations[i], _regions[4][i][1]) for i in range(length) ] - - # FW3 - # Place all insertions on 72 explicitly. - # This is in contrast to Chothia implementation where 3 insertions are on 82 and then further insertions are placed by the - # alignment - # Gaps are placed according to the alignment. - length = len( _regions[5] ) - insertions = max(length - 35, 0) - if insertions > 0: # Insertions on 72 - annotations = [(i,' ') for i in range(58,73)]+[(72, alphabet[i]) for i in range(insertions) ]+[(i,' ') for i in range(73,93)] - _numbering[5] = [ (annotations[i], _regions[5][i][1]) for i in range(length) ] - else: # Deletions - all alignment to place them. - _numbering[4] = _regions[4] - - - # CDR3 - # Chothia H region 7 (index 6) - # put insertions onto 100 - length = len( _regions[6] ) - if length > 36: return [], startindex, endindex # Too many insertions. Do not apply numbering. - annotations = get_cdr3_annotations(length, scheme="chothia", chain_type="heavy") - _numbering[6] = [ (annotations[i], _regions[6][i][1]) for i in range(length) ] - - # Return the full vector and the start and end indices of the numbered region of the sequence - return gap_missing( _numbering ), startindex, endindex - -# Light chains -def number_martin_light(state_vector, sequence): - """ - Apply the Martin numbering scheme for light chains - - Rules should be implemented using two strings - the state string and the region string. - - There are 128 states in the HMMs. Treat X as a direct match in Martin scheme, I is an insertion. - XXXXXXXXXXXXXXXXXXXXXXXXXXXXX IIIIIIX XXXXXXXXXXXXXXXXXXXX XIIIIIIIXXX XXXXXIXXXXXXXIIXXXXXXXXXXXXXXXXXXXXXX XXXXXIIIIXX XXXXXXXXXXXXX - 11111111111111111111111111111 2222222 33333333333333333333 44444444444 5555555555555555555555555555555555555 66666666666 7777777777777 - - - Regions - (N.B These do not match up with any particular definition of CDR) - 1 - Simple mapping (treat "I" states as inserts and not own match states) - 2 - CDRL1 - 30 (inc) to 31 (exc) put insertions on 30 - 3 - Simple mapping (treat "I" states as inserts and not own match states) - 4 - CDRL2 - 51 (inc) 55 (exc) put insertions on 52 - 5 - Simple mapping (treat "I" states as inserts and not own match states) - 6 - CDRL3 89 (inc) to 96 (exc) put insertion on 95 - 7 - Simple mapping (treat "I" states as inserts and not own match states) - - Region 2, 3 and 5 are renumbered - - """ - - # The Martin and Chothia specification for light chains are very similar. Martin is more explicit in the location of indels - # but unlike the heavy chain these are additional instead of changes to the Chothia scheme. Thus, Chothia light is implemented - # as martin light. - return number_chothia_light(state_vector,sequence) - - -########### -# Wolfguy # -########### -# The Wolfguy numbering scheme is an in-house scheme used at Roche. It has been described publicly in the paper: -# Prediction of VH-VL domain orientation for antibody variable domain modeling. Bujotzek A. et al. Protein 2015 83(4) 681-95 -# -# It is similar in gapping as IMGT and is defined only for heavy and light antibody chains. -# Unlike other schemes the numbering denotes both the chain (heavy 101-499, light 501-799) and the region (less than -50 framework -# greater than -50 CDR). All CDRs of length less than 50 can be handled without the need for insertion codes. Numbering of the -# framework behaves similarly to IMGT in that all positions are assumed to be accounted for. Framework insertions are placed by -# the alignment. -# -# Numbering of all CDRs is performed symmetrically with the exception of CDRL1. In this case the CDR is numbered according to a -# pattern specific to the canonical class. This is recognised by length and by sequence similarity to a consensus sequence. If a -# length has not been observed it is numbered symmetrically. - - -def number_wolfguy_heavy(state_vector, sequence): - """ - Apply the wolfguy numbering scheme for heavy chains - - The scheme numbers the sequence using different segments so that the numbering tells you - where in the antibody the sequence is describing. - - XXXXXXXXXIXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXIX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXX - 11111111111111111111111111 22222222222222 33333333333333 44444444444444444444 555555555555555555555555555555 6666666666666 77777777777' - - Regions - (N.B These do not match up with any particular definition of CDR) - 1 - Simple mapping (treat "I" states as inserts and not own match states) - 2 - CDRH1 - 155-199 (inc). Gap symmetrically about 175-176. - 3 - Simple mapping (treat "I" states as inserts and not own match states) - 4 - CDRH2 - 251-299 (inc). Gap symmetrically about 271-272, then gap back from 294. - 5 - Simple mapping (treat "I" states as inserts and not own match states) - 6 - CDRH3 331,332 and 351-399 (inc). Gap according to the - 7 - Simple mapping (treat "I" states as inserts and not own match states) - - Start gaps on rhs each time. - """ - # Set up the numbering - - # State string - 'X' means the imgt position exists in the scheme. 'I' means that it should be treated as an insertion of the previous number - state_string = 'XXXXXXXXXIXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXIXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' - - # Region string - regions that should be treated separately in putting the numbering together - region_string = '11111111111111111111111111222222222222223333333333333344444444444444444444555555555555555555555555555555666666666666677777777777' - - region_index_dict = {"1":0,"2":1,"3":2,"4":3,"5":4,"6":5,"7":6} - - # Define how the scheme's numbering differs from IMGT at the start of each region. - # This is updated in the loop below - rels = {0:100, - 1:124, - 2:160, - 3:196, - 4:226, - 5:244, - 6:283} - - n_regions = 7 - - exclude_deletions = [1,3,5] - - _regions, startindex, endindex = _number_regions(sequence, state_vector, state_string , region_string, region_index_dict, rels, n_regions, exclude_deletions) - - ############### - # Renumbering # - ############### - - # Renumbering required for 1, 3, 5 regions in wolfguy heavy - _numbering = [ _regions[0], [] , _regions[2], [], _regions[4] , [], _regions[6] ] - - # CDRH1 - # Delete symmetrically about 177. Delete right first. - # May have to change this to reflect where the point of symmetry is - ordered_deletions = [151] - for p1,p2 in zip( list(range(152,176)), list(range(199, 175,-1))): ordered_deletions += [ p1,p2 ] - length = len( _regions[1] ) - annotations = sorted(ordered_deletions[:length]) - _numbering[1] = [ ((annotations[i]," "), _regions[1][i][1]) for i in range(length) ] - - # CDRH2 - # Delete symmetrically about 271. Delete right first. - # Then delete right from 288 - ordered_deletions = [251] - for p1,p2 in zip( list(range(252,271)), list(range(290, 271,-1))): ordered_deletions += [ p1,p2 ] - ordered_deletions.append( 271 ) - ordered_deletions = list(range( 299, 290, -1)) + ordered_deletions - length = len( _regions[3] ) - annotations = sorted(ordered_deletions[:length]) - _numbering[3] = [ ((annotations[i]," "), _regions[3][i][1]) for i in range(length) ] - - # CDRH3 - # Delete symmetrically about 374. Delete right first. - # Scheme changes at length 8 - # Scheme changes at length 12 - ordered_deletions = [] - for p1,p2 in zip( list(range(356,374)), list(range(391, 373,-1))): ordered_deletions += [ p1,p2 ] - ordered_deletions = [ 354, 394, 355, 393, 392 ] + ordered_deletions - ordered_deletions = [331,332] + [ 399, 398, 351, 352, 397, 353, 396, 395 ] + ordered_deletions - length = len( _regions[5] ) - - if length > len(ordered_deletions): return [], startindex, endindex # Too many insertions. Do not apply numbering. - annotations = sorted(ordered_deletions[:length]) - _numbering[5] = [ ((annotations[i]," "), _regions[5][i][1]) for i in range(length) ] - - # Return the full vector and the start and end indices of the numbered region of the sequence - return sum( _numbering, [] ), startindex, endindex - - -def number_wolfguy_light(state_vector, sequence): - """ - Apply the wolfguy numbering scheme for light chains - - The scheme numbers the sequence using different segments so that the numbering tells you - where in the antibody the sequence is describing. - - XXXXXXX XXX XXXXXXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXX XXXIXXXXXXX XXXX XXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXX - 1111111 AAA BBBBBBBBBBBBB 22222222222222222 333333333333333 44444444444444 55555555555 6666 77777777777777777777 8888888888888 99999999999 - - Regions - (N.B These do not match up with any particular definition of CDR) - 1 - Simple mapping (treat "I" states as inserts and not own match states) - A - Move indels onto 508 - B - Simple mapping (treat "I" states as inserts and not own match states) - 2 - CDRL1 - 551-599 (inc). Assign via the matching consensus sequence and length. - 3 - Simple mapping (treat "I" states as inserts and not own match states) - 4 - CDRL2 - 651-699 (inc). Gap about 673 then right from 694 - 5 - Simple mapping (treat "I" states as inserts and not own match states) - 6 - Move indels onto 713 and 714 - 7 - Simple mapping (treat "I" states as inserts and not own match states) - 8 - CDRL3 751-799 (inc). Gap symmetrically about 374-375 - 9 - Simple mapping (treat "I" states as inserts and not own match states) - - """ - # Set up the numbering - - # State string - 'X' means the imgt position exists in the scheme. 'I' means that it should be treated as an insertion of the previous number - state_string = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXIXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' - - # Region string - regions that should be treated separately in putting the numbering together - region_string = '1111111AAABBBBBBBBBBBBB222222222222222223333333333333334444444444444455555555555666677777777777777777777888888888888899999999999' - - region_index_dict = {"1":0,"A":1,"B":2,"2":3,"3":4,"4":5,"5":6,"6":7,"7":8,"8":9,"9":10} - - # Define how the scheme's numbering differs from IMGT at the start of each region. - # This is updated in the loop below - rels = {0:500, - 1:500, - 2:500, - 3:527, - 4:560, - 5:595, - 6:631, - 7:630, - 8:630, - 9:646, - 10:683} - - n_regions = 11 - - exclude_deletions = [1,3,5,7,9] - - _regions, startindex, endindex = _number_regions(sequence, state_vector, state_string , region_string, region_index_dict, rels, n_regions, exclude_deletions) - - ############### - # Renumbering # - ############### - - # Renumbering required for 1, 3, 5 regions in wolfguy heavy - _numbering = [ _regions[0], [], _regions[2], [] , _regions[4], [], _regions[6], [], _regions[8], [], _regions[10] ] - - - # Gaps in the first section go 508 instead of the imgt 510 equivalent - length = len(_regions[1] ) - annotations = sorted([ (510,' '), (509, ' '), (508, ' ')][ :length ] + [(508,a) for a in alphabet[:max(0, length-3)]]) - _numbering[1] = [ (annotations[i], _regions[1][i][1]) for i in range(length) ] - - # CDRL1 - # Number by predicting the canonical - length = len(_regions[3] ) - annotations = _get_wolfguy_L1( _regions[3], length) - _numbering[3] = [ ((annotations[i]," "), _regions[3][i][1]) for i in range(length) ] - - # CDRL2 - # Delete about 673. Finally delete right from 694. Maintain 651 as the last deletion - ordered_deletions = [] - for p1,p2 in zip( list(range(652,673)), list(range(694, 672,-1))): ordered_deletions += [ p2,p1 ] - ordered_deletions = [651] + list(range( 699, 694, -1)) + ordered_deletions + [673] - - length = len( _regions[5] ) - annotations = sorted(ordered_deletions[:length]) - _numbering[5] = [ ((annotations[i]," "), _regions[5][i][1]) for i in range(length) ] - - - # The placement of the indel in wolfguy is different to that in imgt - length = len( _regions[7] ) - insertions = max( 0, length - 4 ) - annotations = [(711, ' '), (712, ' '), (713, ' '), (714, ' ')][:length] + [ (714, a) for a in alphabet[:insertions] ] - _numbering[7] = [ (annotations[i], _regions[7][i][1]) for i in range(length) ] - - # CDRL3 - # Delete symmetrically about 775. Delete right first. Finally delete 798 and 799 - ordered_deletions = [] - for p1,p2 in zip( list(range(751,775)), list(range(799, 775,-1))): ordered_deletions += [ p1,p2 ] - ordered_deletions.append( 775 ) - - length = len( _regions[9] ) - if length > len(ordered_deletions): return [], startindex, endindex # Too many insertions. Do not apply numbering. - annotations = sorted(ordered_deletions[:length]) - _numbering[9] = [ ((annotations[i]," "), _regions[9][i][1]) for i in range(length) ] - - # Return the full vector and the start and end indices of the numbered region of the sequence - return sum( _numbering, [] ), startindex, endindex - - -def _get_wolfguy_L1(seq, length): - """ - Wolfguy's L1 annotation is based on recognising the length and the sequence pattern defined - by a set of rules. If the length has not been characterised, we number symmetrically about the - middle of the loop. - """ - - # These are the annotations for different lengths of L1 according to the wolfguy definitions. - L1_sequences = { - 9: [['9', 'XXXXXXXXX', [551, 552, 554, 556, 563, 572, 597, 598, 599]]], - 10: [['10', 'XXXXXXXXXX', [551, 552, 553, 556, 561, 562, 571, 597, 598, 599]]], - 11: [['11a', 'RASQDISSYLA', [551, 552, 553, 556, 561, 562, 571, 596, 597, 598, 599]], - ['11b', 'GGNNIGSKSVH', [551, 552, 554, 556, 561, 562, 571, 572, 597, 598, 599]], - ['11b.2','SGDQLPKKYAY', [551, 552, 554, 556, 561, 562, 571, 572, 597, 598, 599]]], - 12: [['12a', 'TLSSQHSTYTIE', [551, 552, 553, 554, 555, 556, 561, 563, 572, 597, 598, 599]], - ['12b', 'TASSSVSSSYLH', [551, 552, 553, 556, 561, 562, 571, 595, 596, 597, 598, 599]], - ['12c', 'RASQSVxNNYLA', [551, 552, 553, 556, 561, 562, 571, 581, 596, 597, 598, 599]], - ['12d', 'rSShSIrSrrVh', [551, 552, 553, 556, 561, 562, 571, 581, 596, 597, 598, 599]]], - 13: [['13a', 'SGSSSNIGNNYVS', [551, 552, 554, 555, 556, 557, 561, 562, 571, 572, 597, 598, 599]], - ['13b', 'TRSSGSLANYYVQ', [551, 552, 553, 554, 556, 561, 562, 563, 571, 572, 597, 598, 599]]], - 14: [['14a', 'RSSTGAVTTSNYAN', [551, 552, 553, 554, 555, 561, 562, 563, 564, 571, 572, 597, 598, 599]], - ['14b', 'TGTSSDVGGYNYVS', [551, 552, 554, 555, 556, 557, 561, 562, 571, 572, 596, 597, 598, 599]]], - 15: [['15', 'XXXXXXXXXXXXXXX', [551, 552, 553, 556, 561, 562, 563, 581, 582, 594, 595, 596, 597, 598, 599]]], - 16: [['16', 'XXXXXXXXXXXXXXXX', [551, 552, 553, 556, 561, 562, 563, 581, 582, 583, 594, 595, 596, 597, 598, 599]]], - 17: [['17', 'XXXXXXXXXXXXXXXXX', [551, 552, 553, 556, 561, 562, 563, 581, 582, 583, 584, 594, 595, 596, 597, 598, 599]]] - } - - if length in L1_sequences: # Use the pre-defined motif - # Find the maximum scoring canonical form for this length. - curr_max = None, -10000 - for canonical in L1_sequences[length]: - sub_score = 0 - for i in range( length ): - try: - sub_score += blosum62[ (seq[i][1].upper(), canonical[1][i].upper() ) ] - except KeyError: - sub_score += blosum62[ (canonical[1][i].upper(), seq[i][1].upper() ) ] - if sub_score > curr_max[1]: - curr_max = canonical, sub_score - - # return the annotations - return curr_max[0][2] - else: # Use a symmetric numbering about the anchors. - ordered_deletions = [] - for p1,p2 in zip( list(range(551,575)), list(range(599, 575,-1))): ordered_deletions += [ p2,p1 ] - ordered_deletions.append(575) - return sorted( ordered_deletions[:length] ) - -def gap_missing( numbering ): - ''' - Place gaps when a number is missing. All except wolfguy are continuously numbered - ''' - # Gaps placed where a number is not present - num = [ ((0,' '),'-') ] - for p, a in sum( numbering, [] ): - if p[0] > num[-1][0][0]+1: - for _i in range( num[-1][0][0]+1, p[0] ): - num.append( ((_i, ' '), '-' ) ) - num.append( (p,a) ) - return num[1:] - - -###################### -# Annotation of CDR3 # -###################### - -def get_cdr3_annotations(length, scheme="imgt", chain_type=""): - """ - Given a length of a cdr3 give back a list of the annotations that should be applied to the sequence. - - This function should be depreciated - """ - az = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - za = "ZYXWVUTSRQPONMLKJIHGFEDCBA" - - if scheme=="imgt": - start, end = 105, 118 # start (inclusive) end (exclusive) - annotations = [None for _ in range(max(length,13))] - front = 0 - back = -1 - assert (length-13) < 50, "Too many insertions for numbering scheme to handle" # We ran out of letters. - for i in range(min(length,13)): - if i%2: - annotations[back] = (end+back, " ") - back -= 1 - else: - annotations[front] = (start+front, " ") - front += 1 - for i in range(max(0,length-13)): # add insertions onto 111 and 112 in turn - if i%2: - annotations[back] = (112, za[back+6]) - back-=1 - else: - annotations[front] = (111, az[front-7]) - front +=1 - return annotations - - elif scheme in [ "chothia", "kabat"] and chain_type=="heavy": # For chothia and kabat - # Number forwards from 93 - insertions = max(length - 10, 0) - assert insertions < 27, "Too many insertions for numbering scheme to handle" # We ran out of letters. - ordered_deletions = [ (100, ' '), (99,' '), (98,' '), (97,' '), (96,' '), (95,' '), (101,' '),(102,' '),(94,' '), (93,' ') ] - annotations = sorted( ordered_deletions[ max(0, 10-length): ] + [ (100,a) for a in az[:insertions ] ] ) - return annotations - - elif scheme in [ "chothia", "kabat"] and chain_type=="light": - # Number forwards from 89 - insertions = max(length - 9, 0) - assert insertions < 27, "Too many insertions for numbering scheme to handle" # We ran out of letters. - ordered_deletions = [ (95,' '),(94,' '),(93,' '),( 92,' '),(91,' '),(96,' '),(97,' '),(90,' '),(89,' ') ] - annotations = sorted( ordered_deletions[ max(0, 9-length): ] + [ (95,a) for a in az[:insertions ] ] ) - return annotations - - else: - raise AssertionError("Unimplemented scheme") - diff --git a/spaces/luost26/DiffAb/diffab/datasets/_base.py b/spaces/luost26/DiffAb/diffab/datasets/_base.py deleted file mode 100644 index b2cac9ed9ad6591eb7e1240f2e28da1791999431..0000000000000000000000000000000000000000 --- a/spaces/luost26/DiffAb/diffab/datasets/_base.py +++ /dev/null @@ -1,40 +0,0 @@ -from torch.utils.data import Dataset, ConcatDataset -from diffab.utils.transforms import get_transform - - -_DATASET_DICT = {} - - -def register_dataset(name): - def decorator(cls): - _DATASET_DICT[name] = cls - return cls - return decorator - - -def get_dataset(cfg): - transform = get_transform(cfg.transform) if 'transform' in cfg else None - return _DATASET_DICT[cfg.type](cfg, transform=transform) - - -@register_dataset('concat') -def get_concat_dataset(cfg): - datasets = [get_dataset(d) for d in cfg.datasets] - return ConcatDataset(datasets) - - -@register_dataset('balanced_concat') -class BalancedConcatDataset(Dataset): - - def __init__(self, cfg, transform=None): - super().__init__() - assert transform is None, 'transform is not supported.' - self.datasets = [get_dataset(d) for d in cfg.datasets] - self.max_size = max([len(d) for d in self.datasets]) - - def __len__(self): - return self.max_size * len(self.datasets) - - def __getitem__(self, idx): - dataset_idx = idx // self.max_size - return self.datasets[dataset_idx][idx % len(self.datasets[dataset_idx])] diff --git a/spaces/luost26/DiffAb/diffab/utils/protein/writers.py b/spaces/luost26/DiffAb/diffab/utils/protein/writers.py deleted file mode 100644 index 2889e8e7ebe938f2a054a6d1a84b7f50318d8430..0000000000000000000000000000000000000000 --- a/spaces/luost26/DiffAb/diffab/utils/protein/writers.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch -import warnings -from Bio import BiopythonWarning -from Bio.PDB import PDBIO -from Bio.PDB.StructureBuilder import StructureBuilder - -from .constants import AA, restype_to_heavyatom_names - - -def save_pdb(data, path=None): - """ - Args: - data: A dict that contains: `chain_nb`, `chain_id`, `aa`, `resseq`, `icode`, - `pos_heavyatom`, `mask_heavyatom`. - """ - - def _mask_select(v, mask): - if isinstance(v, str): - return ''.join([s for i, s in enumerate(v) if mask[i]]) - elif isinstance(v, list): - return [s for i, s in enumerate(v) if mask[i]] - elif isinstance(v, torch.Tensor): - return v[mask] - else: - return v - - def _build_chain(builder, aa_ch, pos_heavyatom_ch, mask_heavyatom_ch, chain_id_ch, resseq_ch, icode_ch): - builder.init_chain(chain_id_ch[0]) - builder.init_seg(' ') - - for aa_res, pos_allatom_res, mask_allatom_res, resseq_res, icode_res in \ - zip(aa_ch, pos_heavyatom_ch, mask_heavyatom_ch, resseq_ch, icode_ch): - if not AA.is_aa(aa_res.item()): - print('[Warning] Unknown amino acid type at %d%s: %r' % (resseq_res.item(), icode_res, aa_res.item())) - continue - restype = AA(aa_res.item()) - builder.init_residue( - resname = str(restype), - field = ' ', - resseq = resseq_res.item(), - icode = icode_res, - ) - - for i, atom_name in enumerate(restype_to_heavyatom_names[restype]): - if atom_name == '': continue # No expected atom - if (~mask_allatom_res[i]).any(): continue # Atom is missing - if len(atom_name) == 1: fullname = ' %s ' % atom_name - elif len(atom_name) == 2: fullname = ' %s ' % atom_name - elif len(atom_name) == 3: fullname = ' %s' % atom_name - else: fullname = atom_name # len == 4 - builder.init_atom(atom_name, pos_allatom_res[i].tolist(), 0.0, 1.0, ' ', fullname,) - - warnings.simplefilter('ignore', BiopythonWarning) - builder = StructureBuilder() - builder.init_structure(0) - builder.init_model(0) - - unique_chain_nb = data['chain_nb'].unique().tolist() - for ch_nb in unique_chain_nb: - mask = (data['chain_nb'] == ch_nb) - aa = _mask_select(data['aa'], mask) - pos_heavyatom = _mask_select(data['pos_heavyatom'], mask) - mask_heavyatom = _mask_select(data['mask_heavyatom'], mask) - chain_id = _mask_select(data['chain_id'], mask) - resseq = _mask_select(data['resseq'], mask) - icode = _mask_select(data['icode'], mask) - - _build_chain(builder, aa, pos_heavyatom, mask_heavyatom, chain_id, resseq, icode) - - structure = builder.get_structure() - if path is not None: - io = PDBIO() - io.set_structure(structure) - io.save(path) - return structure diff --git a/spaces/lvwerra/hf-review/app.py b/spaces/lvwerra/hf-review/app.py deleted file mode 100644 index 79b31d0fefd61d892407697243fc1da200a8dd70..0000000000000000000000000000000000000000 --- a/spaces/lvwerra/hf-review/app.py +++ /dev/null @@ -1,100 +0,0 @@ -import gradio as gr - - -import pandas as pd -import json -from datasets import load_dataset -import requests -from huggingface_hub import list_datasets, list_models, list_spaces -from collections import Counter -import numpy as np - - -def compute_ranking(df, column, method="sum", keep="last"): - df_rank = df.groupby("author").aggregate({column: method})[[column]] - df_rank = df_rank.sort_values(by=column) - df_rank.reset_index(drop=True, inplace=True) - df_rank["top_perc"] = df_rank.apply(lambda x: f"{100 * (1-(x.name/len(df_rank))):.2f}", axis=1) - df_rank = df_rank.drop_duplicates(subset=column, keep=keep) - df_rank = df_rank.rename({column: "value"}, axis='columns') - return df_rank - -class NpEncoder(json.JSONEncoder): - def default(self, obj): - if isinstance(obj, np.integer): - return int(obj) - if isinstance(obj, np.floating): - return float(obj) - if isinstance(obj, np.ndarray): - return obj.tolist() - return super(NpEncoder, self).default(obj) - -ds = load_dataset("open-source-metrics/model-repos-stats", split="train") -df = ds.to_pandas() - -df_ranks = {} -df_ranks["likes"] = compute_ranking(df, "likes") -df_ranks["downloads"] = compute_ranking(df, "downloads_30d") -df_ranks["repos"] = compute_ranking(df, "repo_id", method="count") - -with open("./html_template.html", "r") as f: - template = f.read() - -def create_user_summary(user_name): - summary = {} - - df_user = df.loc[df["author"]==user_name] - - if len(df_user) == 0: - return """

Unfortunately there is not enough data for your report.


""" - - - r = requests.get(f'https://huggingface.co/api/users/{user_name}/likes') - user_datasets = [dataset for dataset in list_datasets(author=user_name)] - user_spaces = [space for space in list_spaces(author=user_name)] - - summary["likes_user_total"] = df_user["likes"].sum() - summary["likes_user_given"] = len(r.json()) - summary["likes_user_top"] = df_ranks["likes"][df_ranks["likes"]["value"]>=summary["likes_user_total"]].iloc[0]["top_perc"] - summary["likes_repo_most"] = df_user.sort_values(by="likes", ascending=False).iloc[0]["repo_id"] - summary["likes_repo_most_n"] = df_user.sort_values(by="likes", ascending=False).iloc[0]["likes"] - - summary["downloads_user_total"] = df_user["downloads_30d"].sum() - summary["downloads_user_top"] = df_ranks["downloads"][df_ranks["downloads"]["value"]>=summary["downloads_user_total"]].iloc[0]["top_perc"] - summary["downlods_repo_most"] = df_user.sort_values(by="downloads_30d", ascending=False).iloc[0]["repo_id"] - summary["downlods_repo_most_n"] = df_user.sort_values(by="downloads_30d", ascending=False).iloc[0]["downloads_30d"] - - summary["repos_model_total"] = len(df_user) - summary["repos_model_top"] = df_ranks["repos"][df_ranks["repos"]["value"]>=summary["repos_model_total"]].iloc[0]["top_perc"] - summary["repos_model_fav_type"] = Counter(df_user["model_type"].dropna()).most_common(1)[0][0] - - summary["repos_datasets_total"] = len(user_datasets) - summary["repos_spaces_total"] = len(user_spaces) - summary["repos_spaces_fav_sdk"] = Counter([getattr(info, "sdk", "n/a") for info in user_spaces]).most_common(1)[0][0] - - return dict_to_html(summary) - - -def dict_to_html(summary): - - report = template - - for key in summary: - report = report.replace("{{" + key + "}}", str(summary[key])) - return report - - -demo = gr.Blocks( - css=".gradio-container {background-color: #000000}" -) -with demo: - with gr.Row(): - gr.HTML(value="""

Enter your HF user name:

""") - with gr.Row(): - username = gr.Textbox(lines=1, max_lines=1, label="User name") - with gr.Row(): - run = gr.Button() - with gr.Row(): - output = gr.HTML(label="Generated code") - event = run.click(create_user_summary, [username], output) -demo.launch() \ No newline at end of file diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/cpp/vector.h b/spaces/ma-xu/LIVE/thrust/thrust/system/cpp/vector.h deleted file mode 100644 index ee5cfce6aa8d26a2d6d924361f42bfec99cf8601..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/cpp/vector.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/*! \file thrust/system/cpp/vector.h - * \brief A dynamically-sizable array of elements which reside in memory available to - * Thrust's standard C++ system. - */ - -#pragma once - -#include -#include -#include -#include - -namespace thrust -{ - -// forward declaration of host_vector -template class host_vector; - -namespace system -{ -namespace cpp -{ - -/*! \p cpp::vector is a container that supports random access to elements, - * constant time removal of elements at the end, and linear time insertion - * and removal of elements at the beginning or in the middle. The number of - * elements in a \p cpp::vector may vary dynamically; memory management is - * automatic. The elements contained in a \p cpp::vector reside in memory - * available to the \p cpp system. - * - * \tparam T The element type of the \p cpp::vector. - * \tparam Allocator The allocator type of the \p cpp::vector. Defaults to \p cpp::allocator. - * - * \see http://www.sgi.com/tech/stl/Vector.html - * \see host_vector For the documentation of the complete interface which is - * shared by \p cpp::vector - * \see device_vector - */ -template > -using vector = thrust::detail::vector_base; - -} // end cpp -} // end system - -// alias system::cpp names at top-level -namespace cpp -{ - -using thrust::system::cpp::vector; - -} // end cpp - -} // end thrust diff --git a/spaces/manhkhanhUIT/Image_Restoration_Colorization/Face_Enhancement/data/custom_dataset.py b/spaces/manhkhanhUIT/Image_Restoration_Colorization/Face_Enhancement/data/custom_dataset.py deleted file mode 100644 index aa0a0d79a5ca7a1816a2089b82e7ef90b28c0f43..0000000000000000000000000000000000000000 --- a/spaces/manhkhanhUIT/Image_Restoration_Colorization/Face_Enhancement/data/custom_dataset.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from data.pix2pix_dataset import Pix2pixDataset -from data.image_folder import make_dataset - - -class CustomDataset(Pix2pixDataset): - """ Dataset that loads images from directories - Use option --label_dir, --image_dir, --instance_dir to specify the directories. - The images in the directories are sorted in alphabetical order and paired in order. - """ - - @staticmethod - def modify_commandline_options(parser, is_train): - parser = Pix2pixDataset.modify_commandline_options(parser, is_train) - parser.set_defaults(preprocess_mode="resize_and_crop") - load_size = 286 if is_train else 256 - parser.set_defaults(load_size=load_size) - parser.set_defaults(crop_size=256) - parser.set_defaults(display_winsize=256) - parser.set_defaults(label_nc=13) - parser.set_defaults(contain_dontcare_label=False) - - parser.add_argument( - "--label_dir", type=str, required=True, help="path to the directory that contains label images" - ) - parser.add_argument( - "--image_dir", type=str, required=True, help="path to the directory that contains photo images" - ) - parser.add_argument( - "--instance_dir", - type=str, - default="", - help="path to the directory that contains instance maps. Leave black if not exists", - ) - return parser - - def get_paths(self, opt): - label_dir = opt.label_dir - label_paths = make_dataset(label_dir, recursive=False, read_cache=True) - - image_dir = opt.image_dir - image_paths = make_dataset(image_dir, recursive=False, read_cache=True) - - if len(opt.instance_dir) > 0: - instance_dir = opt.instance_dir - instance_paths = make_dataset(instance_dir, recursive=False, read_cache=True) - else: - instance_paths = [] - - assert len(label_paths) == len( - image_paths - ), "The #images in %s and %s do not match. Is there something wrong?" - - return label_paths, image_paths, instance_paths diff --git a/spaces/manhkhanhUIT/Image_Restoration_Colorization/Global/train_mapping.py b/spaces/manhkhanhUIT/Image_Restoration_Colorization/Global/train_mapping.py deleted file mode 100644 index ffff4a5de7622e831989e8cb0daa694325a345b5..0000000000000000000000000000000000000000 --- a/spaces/manhkhanhUIT/Image_Restoration_Colorization/Global/train_mapping.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import time -from collections import OrderedDict -from options.train_options import TrainOptions -from data.data_loader import CreateDataLoader -from models.mapping_model import Pix2PixHDModel_Mapping -import util.util as util -from util.visualizer import Visualizer -import os -import numpy as np -import torch -import torchvision.utils as vutils -from torch.autograd import Variable -import datetime -import random - - - -opt = TrainOptions().parse() -visualizer = Visualizer(opt) -iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt') -if opt.continue_train: - try: - start_epoch, epoch_iter = np.loadtxt(iter_path , delimiter=',', dtype=int) - except: - start_epoch, epoch_iter = 1, 0 - visualizer.print_save('Resuming from epoch %d at iteration %d' % (start_epoch-1, epoch_iter)) -else: - start_epoch, epoch_iter = 1, 0 - -if opt.which_epoch != "latest": - start_epoch=int(opt.which_epoch) - visualizer.print_save('Notice : Resuming from epoch %d at iteration %d' % (start_epoch - 1, epoch_iter)) - -opt.start_epoch=start_epoch -### temp for continue train unfixed decoder - -data_loader = CreateDataLoader(opt) -dataset = data_loader.load_data() -dataset_size = len(dataset) * opt.batchSize -print('#training images = %d' % dataset_size) - - -model = Pix2PixHDModel_Mapping() -model.initialize(opt) - -path = os.path.join(opt.checkpoints_dir, opt.name, 'model.txt') -fd = open(path, 'w') - -if opt.use_skip_model: - fd.write(str(model.mapping_net)) - fd.close() -else: - fd.write(str(model.netG_A)) - fd.write(str(model.mapping_net)) - fd.close() - -if opt.isTrain and len(opt.gpu_ids) > 1: - model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids) - - - -total_steps = (start_epoch-1) * dataset_size + epoch_iter - -display_delta = total_steps % opt.display_freq -print_delta = total_steps % opt.print_freq -save_delta = total_steps % opt.save_latest_freq -### used for recovering training - -for epoch in range(start_epoch, opt.niter + opt.niter_decay + 1): - epoch_s_t=datetime.datetime.now() - epoch_start_time = time.time() - if epoch != start_epoch: - epoch_iter = epoch_iter % dataset_size - for i, data in enumerate(dataset, start=epoch_iter): - iter_start_time = time.time() - total_steps += opt.batchSize - epoch_iter += opt.batchSize - - # whether to collect output images - save_fake = total_steps % opt.display_freq == display_delta - - ############## Forward Pass ###################### - #print(pair) - losses, generated = model(Variable(data['label']), Variable(data['inst']), - Variable(data['image']), Variable(data['feat']), infer=save_fake) - - # sum per device losses - losses = [ torch.mean(x) if not isinstance(x, int) else x for x in losses ] - loss_dict = dict(zip(model.module.loss_names, losses)) - - # calculate final loss scalar - loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5 - loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat',0) + loss_dict.get('G_VGG',0) + loss_dict.get('G_Feat_L2', 0) +loss_dict.get('Smooth_L1', 0)+loss_dict.get('G_Feat_L2_Stage_1',0) - #loss_G = loss_dict['G_Feat_L2'] - - ############### Backward Pass #################### - # update generator weights - model.module.optimizer_mapping.zero_grad() - loss_G.backward() - model.module.optimizer_mapping.step() - - # update discriminator weights - model.module.optimizer_D.zero_grad() - loss_D.backward() - model.module.optimizer_D.step() - - ############## Display results and errors ########## - ### print out errors - if i == 0 or total_steps % opt.print_freq == print_delta: - errors = {k: v.data if not isinstance(v, int) else v for k, v in loss_dict.items()} - t = (time.time() - iter_start_time) / opt.batchSize - visualizer.print_current_errors(epoch, epoch_iter, errors, t,model.module.old_lr) - visualizer.plot_current_errors(errors, total_steps) - - ### display output images - if save_fake: - - if not os.path.exists(opt.outputs_dir + opt.name): - os.makedirs(opt.outputs_dir + opt.name) - - imgs_num = 5 - if opt.NL_use_mask: - mask=data['inst'][:imgs_num] - mask=mask.repeat(1,3,1,1) - imgs = torch.cat((data['label'][:imgs_num], mask,generated.data.cpu()[:imgs_num], data['image'][:imgs_num]), 0) - else: - imgs = torch.cat((data['label'][:imgs_num], generated.data.cpu()[:imgs_num], data['image'][:imgs_num]), 0) - - imgs=(imgs+1.)/2.0 ## de-normalize - - try: - image_grid = vutils.save_image(imgs, opt.outputs_dir + opt.name + '/' + str(epoch) + '_' + str(total_steps) + '.png', - nrow=imgs_num, padding=0, normalize=True) - except OSError as err: - print(err) - - if epoch_iter >= dataset_size: - break - - # end of epoch - epoch_e_t=datetime.datetime.now() - iter_end_time = time.time() - print('End of epoch %d / %d \t Time Taken: %s' % - (epoch, opt.niter + opt.niter_decay, str(epoch_e_t-epoch_s_t))) - - ### save model for this epoch - if epoch % opt.save_epoch_freq == 0: - print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps)) - model.module.save('latest') - model.module.save(epoch) - np.savetxt(iter_path, (epoch+1, 0), delimiter=',', fmt='%d') - - ### instead of only training the local enhancer, train the entire network after certain iterations - if (opt.niter_fix_global != 0) and (epoch == opt.niter_fix_global): - model.module.update_fixed_params() - - ### linearly decay learning rate after certain iterations - if epoch > opt.niter: - model.module.update_learning_rate() \ No newline at end of file diff --git a/spaces/manhngolibo/manhngo/Dockerfile b/spaces/manhngolibo/manhngo/Dockerfile deleted file mode 100644 index 302a99b0ca4470f614686e6b647dbbb84eb9a865..0000000000000000000000000000000000000000 --- a/spaces/manhngolibo/manhngo/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM python:3.9 - -WORKDIR /code - -COPY ./requirements.txt /code/requirements.txt - -RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt - -COPY . . - -CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860"] diff --git a/spaces/matthoffner/chatbot/components/Chat/PromptList.tsx b/spaces/matthoffner/chatbot/components/Chat/PromptList.tsx deleted file mode 100644 index 3e5ad100b11223736446b4187e600fd97f193ac7..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/chatbot/components/Chat/PromptList.tsx +++ /dev/null @@ -1,45 +0,0 @@ -import { FC, MutableRefObject } from 'react'; - -import { Prompt } from '@/types/prompt'; - -interface Props { - prompts: Prompt[]; - activePromptIndex: number; - onSelect: () => void; - onMouseOver: (index: number) => void; - promptListRef: MutableRefObject; -} - -export const PromptList: FC = ({ - prompts, - activePromptIndex, - onSelect, - onMouseOver, - promptListRef, -}) => { - return ( -
    - {prompts.map((prompt, index) => ( -
  • { - e.preventDefault(); - e.stopPropagation(); - onSelect(); - }} - onMouseEnter={() => onMouseOver(index)} - > - {prompt.name} -
  • - ))} -
- ); -}; diff --git a/spaces/mayordp/DeepFakeAI/DeepFakeAI/uis/components/__init__.py b/spaces/mayordp/DeepFakeAI/DeepFakeAI/uis/components/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/mee-asukoht/flan-t5-small/README.md b/spaces/mee-asukoht/flan-t5-small/README.md deleted file mode 100644 index aac3f312ca21198b9e17a383fbae941650261687..0000000000000000000000000000000000000000 --- a/spaces/mee-asukoht/flan-t5-small/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Flan T5 Small -emoji: 📊 -colorFrom: indigo -colorTo: blue -sdk: gradio -sdk_version: 3.28.0 -app_file: app.py -pinned: false -duplicated_from: mee-asukoht/flan-t5-small-test ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/merve/anonymization/source/anonymization/style-graph-scroll.css b/spaces/merve/anonymization/source/anonymization/style-graph-scroll.css deleted file mode 100644 index 7680e8c43222b6993d2bedfe43a682236680541e..0000000000000000000000000000000000000000 --- a/spaces/merve/anonymization/source/anonymization/style-graph-scroll.css +++ /dev/null @@ -1,160 +0,0 @@ -/** { border: 1px solid #f00; }*/ - - -#container{ - position: relative; - width: auto; - margin-left: -25px; - /*margin-bottom: 100px;*/ -} - -#sections{ - width: 330px; - pointer-events: none; -} - -#sections > div{ - background: white; - opacity: .2; - margin-bottom: 400px; - line-height: 1.4em; - transition: opacity .2s; - pointer-events: all; -} -#sections > div:last-child{ - height: 480px; - margin-bottom: 0px; -} -#sections > div.graph-scroll-active{ - opacity: 1; -} - -#graph{ - margin-left: 40px; - width: 500px; - position: -webkit-sticky; - position: sticky; - top: 0px; - float: right; - height: 580px; -} - -.slider-outer { - display: block; - max-width: 300px; -} - -@media (max-width: 925px) { - #container{ - margin-left: 0px; - } - - #graph{ - width: 100%; - float: none; - max-width: 500px; - margin: 0px auto; - } - - #graph > div{ - position: relative; - left:12px; - } - - #sections{ - width: auto; - position: relative; - margin: 0px auto; - } - - #sections > div{ - background: rgba(255,255,255,.8); - padding: 10px; - border-top: 1px solid; - border-bottom: 1px solid; - margin-bottom: 80vh; - width: calc(100vw - 20px); - margin-left: -5px; - } - - #sections > div > *{ - max-width: 750px; - } - - #sections > div:first-child{ - opacity: 1; - margin-top: -260px; - } - - #sections > div:last-child{ - height: auto; - } - - #sections h3{ - margin-top: .5em; - } - - /* Adjust buttons for mobile. */ - - .button-container{ - text-align: center; - left:0px; - } - - /* Adjust sliders for mobile. */ - input[type="range" i] { - width: 280px; - } - .slider-label-container{ - width: 145px; - /* display: inline-block; */ - } - - .slide-container-heads-prob, .slide-container-population { - text-align: center; - } - - .slider-container { - margin-bottom: 5px; - text-align: center; - width: 300px; - /* display:inline-block; */ - } - - .slider-outer { - text-align: center; - display: flex; - max-width: 300px; - } - - .headsProb, .population { - margin-left: 15px; - } - - .slide-container-population { - margin-bottom: -10px; - } - - .pointer div { - left: 10px; - top: 37px; - } - - /* Adjust post summary test for mobile. */ - .post-summary{ - margin-left: 8px; - margin-bottom: 60px; - margin-top: 40px; - } - -} - -#graph > div{ - margin: 20 35px; -} - - -#end{ - height: 15vh; -} - diff --git a/spaces/merve/fill-in-the-blank/public/uncertainty-calibration/draw_weathergraph.js b/spaces/merve/fill-in-the-blank/public/uncertainty-calibration/draw_weathergraph.js deleted file mode 100644 index 068615fb14b8e5d27869a0d270d8f0c5580e4fcc..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/public/uncertainty-calibration/draw_weathergraph.js +++ /dev/null @@ -1,264 +0,0 @@ -window.drawWeatherGraph = function (graphSel, fig_height, fig_width){ - - var threshold = .4 - - var thresholds = [0, .2, .4, .6, .8, 1].map((val, i) => { - var isLocked = val == 0 || val == 1 - return {val, i, isLocked, origVal: val} - }) - - var c = d3.conventions({ - sel: graphSel.html('').append('div'), - height: fig_height, - totalWidth: fig_width, - margin: {top: 100, bottom: 100} - }); - - var {predictionSel, weatherGroupSel} = (function(){ - c.y.domain([0,9]).clamp(true); - - // x-Axis - c.xAxis.ticks(5).tickFormat(d3.format('.2f')) - c.yAxis.ticks(0) - d3.drawAxis(c) - c.svg.select('.x') - .translate(-40, 1) - .selectAll('line').translate(20, 1) - - // x-Axis label - c.svg.append('text.axis-label') - .translate([c.width/2, -50]) - .at({textAnchor: 'middle'}) - .at({fill: '#000', fontSize: 14}) - .text('Model Score'); - - // Weather icons - var weatherGroupSel = c.svg.appendMany('g.weatherdata', weatherdata) - .translate(d => [c.x(d.score), c.y(d.h)]) - //.call(d3.attachTooltip) - // .on("mouseover", function(d) { - // ttSel.html(""); - // var gtSel = ttSel.append("div").html(`ground truth: ${d.label}`); - // ttSel.classed("tt-text", true); - // }) - - weatherGroupSel.append('text.icon') - .text(function(d,i){return emojis[d.label];}) - .at({fontSize: 18, textAnchor: 'middle', dy: 8}) - - // Add prediction circles - weatherGroupSel.append('circle.prediction') - .at({cx: 0, cy: 0, r: 14, opacity: 0, fillOpacity: 0, stroke: 'red'}); - weatherGroupSel.append('path.prediction') - .at({d: d => ['M', -10, 10, 'L', 10, -10].join(' '), stroke: 'red', opacity: 0}) - - var predictionSel = c.svg.selectAll('.prediction'); - - return {predictionSel, weatherGroupSel} - })() - - var {thresholdSel, messageSel, setThreshold} = (function(){ - var thresholdSel = c.svg.append('g.threshold') - - var thresholdGroupSel = thresholdSel.append('g') - .call(d3.drag().on('drag', - () => renderThreshold(c.x.invert(d3.clamp(0, d3.event.x, c.width)))) - ) - - var thesholdTextSel = thresholdGroupSel.append('g.axis').append('text') - .at({ - textAnchor: 'middle', - dy: '.33em', - y: c.height + 30 - }) - .text('Threshold') - - var rw = 16 - thresholdGroupSel.append('rect') - .at({ - width: rw, - x: -rw/2, - y: -10, - height: c.height + 30, - fillOpacity: .07, - }) - - var pathSel = thresholdGroupSel.append('path') - .at({ - stroke: '#000', - strokeDasharray: '2 2', - fill: 'none', - d: `M 0 -10 V ` + (c.height + 20), - }) - - - var accuracyValBox = thresholdSel.append('rect.val-box') - .at({width: 55, height: 20, x: c.width/2 + 32.5, y: c.height + 65, rx: 3, ry: 3}) - - var accuracySel = thresholdSel.append('text.big-text') - .at({x: c.width/2 - 10, y: c.height + 80, textAnchor: 'middle'}) - - var accuracyValSel = thresholdSel.append('text.val-text') - .at({x: c.width/2 + 60, y: c.height + 80, textAnchor: 'middle'}) - - - var messageSel = thresholdSel.append('text.tmessage') - .at({x: c.width/2, y: c.height + 120, textAnchor: 'middle'}) - - function renderThreshold(t){ - if (isNaN(t)) return // TODO debug this - - thresholdGroupSel.translate(c.x(t), 0) - - predictionSel.at({opacity: d => isClassifiedCorrectly(d, t) ? 0 : 1}) - - var acc = d3.mean( - weatherdata, - d => isClassifiedCorrectly(d, t) - ) - accuracySel.text('Accuracy: '); - accuracyValSel.text(d3.format('.1%')(acc)) - messageSel.text('Try dragging the threshold to find the highest accuracy.') - thesholdTextSel.text('Threshold: ' + d3.format('.2f')(t)) - - threshold = t - - function isClassifiedCorrectly(d,t) { - return d.score >= t ? d.label == 1 : d.label == 0; - }; - } - - renderThreshold(threshold) - - var timer = null - function setThreshold(newThreshold, duration){ - var interpolateFn = d3.interpolate(threshold, newThreshold) - - if (timer) timer.stop() - timer = d3.timer(ms => { - var t = Math.min(ms/duration, 1) - if (t == 1) timer.stop() - - renderThreshold(interpolateFn(t)) - }) - } - - return {thresholdSel, messageSel, setThreshold} - })() - - function drawTrueLegend(c){ - var truthAxis = c.svg.append('g').translate([fig_width + 40, 1]) - truthAxis.append('text.legend-title').text('Truth') // TODO: Maybe more of a label? "what actually happened?" or just remove this legend - .at({textAnchor: 'middle', fontWeight: 500, x: 20}) - - truthAxis.append('g').translate([20, 40]) - .append('text.legend-text').text('Sunny').parent() - .at({fontSize: 15}) - .append('text').text(emojis[0]) - .at({fontSize: 25, x: -30, y: 5}) - - truthAxis.append('g').translate([20, 80]) - .append('text.legend-text').text('Rainy').parent() - .at({fontSize: 15}) - .append('text').text(emojis[1]) - .at({fontSize: 25, x: -30, y: 5}) - } - drawTrueLegend(c); - - - var {thresholdsGroupSel, renderThresholds, setThresholds} = (function(){ - var valsCache = [] - var drag = d3.drag() - .on('drag', function(){ - var val = d3.clamp(0, c.x.invert(d3.mouse(c.svg.node())[0]), 1) - - // Force thresholds to stay sorted - valsCache[valsCache.activeIndex] = val - _.sortBy(valsCache).forEach((val, i) => thresholds[i].val = val) - - renderThresholds() - }) - .on('start', d => { - valsCache = thresholds.map(d => d.val) - valsCache.activeIndex = d.i - }) - - var thresholdsGroupSel = c.svg.append('g') - - thresholdsGroupSel.append('text.axis-label') - .text('Calibrated Model Score') - .translate([c.width/2, c.height + 50]) - .at({textAnchor: 'middle'}) - .at({fill: '#000', fontSize: 14}) - - thresholdsSel = thresholdsGroupSel.appendMany('g.thresholds', thresholds) - .call(drag) - .st({pointerEvents: d => d.isLocked ? 'none' : ''}) - - thresholdsSel.append('g.axis').append('text') - .at({ - textAnchor: 'middle', - dy: '.33em', - y: c.height + 20 - }) - .text(d => d3.format('.2f')(d.origVal)) - - var rw = 16 - thresholdsSel.append('rect') - .at({ - width: rw, - x: -rw/2, - height: c.height + 10, - fillOpacity: d => d.isLocked ? 0 : .07, - }) - - var pathSel = thresholdsSel.append('path') - .at({ - stroke: '#000', - strokeDasharray: '2 2', - fill: 'none', - }) - - function renderThresholds(){ - if (thresholds.some(d => isNaN(d.val))) return - - thresholdsSel - .translate(d => c.x(d.val) + .5, 0) - - pathSel.at({ - d: d => [ - 'M', 0, c.height + 10, - 'L', 0, 0, - 'L', c.x(d.origVal - d.val), -12, - ].join(' ') - }) - - if (window.calibrationCurve) calibrationCurve.renderBuckets() - } - - renderThresholds() - - var timer = null - function setThresholds(newThresholds, duration){ - var interpolateFns = thresholds - .map((d, i) => d3.interpolate(d.val, newThresholds[i])) - - if (timer) timer.stop() - timer = d3.timer(ms => { - var t = Math.min(ms/duration, 1) - if (t == 1) timer.stop() - - thresholds.forEach((d, i) => d.val = interpolateFns[i](t)) - - renderThresholds() - }) - } - - return {thresholdsGroupSel, renderThresholds, setThresholds} - })() - - return {c, thresholdSel, messageSel, setThreshold, predictionSel, thresholds, thresholdsGroupSel, renderThresholds, setThresholds, weatherGroupSel}; - -} - -if (window.init) window.init() \ No newline at end of file diff --git a/spaces/merve/fill-in-the-blank/source/private-and-fair/2d-privacy.js b/spaces/merve/fill-in-the-blank/source/private-and-fair/2d-privacy.js deleted file mode 100644 index fc89da57484ca77169f4b7aff1c1f75365bd9093..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/source/private-and-fair/2d-privacy.js +++ /dev/null @@ -1,383 +0,0 @@ -window.state = window.state || { - scoreSteps: 101, - nParams: 11, - nRandLines: 50, - nMaxRand: 0, - nBatches: 4, - learningRate: 22, -} - - -window.pointData = window.pointData || d3.range(100).map(i => { - var color = i % 2 ? 0 : 1 - var color0 = color - var color1 = color - - var σ = .1 - var μ = .2 - if (color){ - var x = d3.randomNormal(1 - μ, σ)() - var y = d3.randomNormal(1 - μ, σ*1)() - } else { - var x = d3.randomNormal(μ, σ)() - var y = d3.randomNormal(μ, σ*1)() - y = d3.clamp(0, y, .4) - } - - x = d3.clamp(.03, x, .97) - y = d3.clamp(.03, y, .97) - - var bucketX = x*(state.nParams - 1) - - if (i == 51){ - x = .25 - y = .55 - color = 0 - color0 = 0 - color1 = 1 - } - - return {i, x, y, bucketX, color, color0, color1} -}) - -var updateAllFns = [] -var updateAll = () => updateAllFns.forEach(fn => fn()) - -var updateCircleFns = [] -var updateCircle = (d) => updateCircleFns.forEach(fn => fn(d)) - -var sel = d3.select('.epoch-graph').html('') - .st({marginTop: 30}) - .at({role: 'graphics-document', 'aria-label': `Grid of charts showing a simple 2d classifer being trained over four epochs. Changing a single outlier point from red to blue makes a big difference in the final model.`}) - -var dbSel = d3.select('.decision-boundry').html('').append('div') - .at({role: 'graphics-document', 'aria-label': `Slides to control the level clipping and noise applied the gradient at each step. Increasing the noise enough makes the decision boundries for the models trained on the red and blue outliers overlap.`}) - -var colorTypes = [{key: 'color1'}, {key: 'color0'}] -sel.appendMany('div', colorTypes) - .each(drawColorType) - -drawBatch( - dbSel.append('div').parent().append('div'), - 3, - colorTypes[0], - colorTypes[1] -) - - -function drawColorType(ct){ - function calcBatches(){ - var buckets = d3.nestBy(pointData, d => Math.floor(d.bucketX)) - buckets = _.sortBy(buckets, d => +d.key) - - pointData.forEach(d => { - d.bucketX = d.x*(state.nParams - 1) - }) - - buckets.forEach((bucket, i) => { - bucket.i = i - bucket.x = +bucket.key - - bucket.pointData = pointData.filter(d => Math.abs(d.bucketX - bucket.key) < 1) - - bucket.scores = d3.range(state.scoreSteps).map(i => { - var y = i/(state.scoreSteps - 1) - var pad = 0 - - var score = d3.sum(bucket.pointData, (d, i) => { - // return d[ct.key] == 0 ? d.y < y - pad : d.y > y + pad - - var dif = 1 - Math.abs(d.bucketX - bucket.x) - dif = Math.min(dif, .5) - if (d[ct.key] == 0){ - return d.y < y - pad ? dif : -dif - } else { - return d.y > y + pad ? dif : -dif - } - }) - - return {y, i, score} - }) - - bucket.best = _.maxBy(bucket.scores, d => d.score) - - bucket.scores.forEach(score => { - var nextScoreIndex = score.i - var charge = 0 - - for (var j = 0; j < state.learningRate; j++){ - var dif = bucket.best.score - bucket.scores[nextScoreIndex]?.score - charge += dif || 5 - if (bucket.scores[nextScoreIndex | 0].score == bucket.best.score){ - j = state.learningRate - } else if (charge > 2) { - nextScoreIndex += nextScoreIndex < bucket.best.i ? 1 : -1 - charge = 0 - } - } - - score.nextScoreIndex = nextScoreIndex - }) - - bucket.x = (bucket.i +.5)/(state.nParams - 1) - }) - - var rng = new alea(ct.key) - - // random lines x batches x buckets - var randLines = d3.range(state.nRandLines).map(() => { - return [buckets.map(d => Math.floor(d.x*state.scoreSteps))] - }) - - function calcNextBatch(){ - randLines.forEach(line => { - var next = _.last(line).map((scoreIndex, i) => { - var randInt = Math.round((rng() - .5)*state.nMaxRand) - return d3.clamp( - 0, - buckets[i].scores[scoreIndex | 0].nextScoreIndex + randInt, - state.scoreSteps - 1) - }) - - line.push(next) - }) - } - d3.range(state.nBatches - 1).forEach(calcNextBatch) - - ct.buckets = buckets - ct.randLines = randLines - } - calcBatches() - - var sel = d3.select(this) - - var render = (function(){ - ct.renderFns = [] - - sel - .append('div.chart-title').text(ct.key == 'color1' ? 'Training a model with an isolated red point' : 'Training a model with an isolated blue point') - .st({marginLeft: 10, marginBottom: -18, marginTop: -5}) - .parent() - .appendMany('div', ct.randLines[0]) - .st({display: 'inline-block'}) - .each(function(d, i){ drawBatch(d3.select(this), i, ct)}) - - return () => ct.renderFns.forEach(d => d()) - })() - - updateAllFns.push(() => { - calcBatches() - render() - }) -} - - -function drawBatch(sel, batchIndex, ct, ct2){ - - var size = ct2 ? 300 : 150 - var mScale = ct2 ? 0 : 1 - var c = d3.conventions({ - sel, - width: size, - height: size, - margin: {left: 10*mScale, right: 10*mScale, top: 20*mScale, bottom: ct2 ? 50 : 20}, - layers: 'scsd', - }) - - var divSel = c.layers[3].st({pointerEvents: 'none'}) - - c.layers[0].append('rect') - .at({width: c.width, height: c.height, fill: '#efefef'}) - - c.svg = c.layers[2] - - c.svg.append('rect') - .at({width: c.width, height: c.height, fill: 'rgba(0,0,0,0)'}) - - c.svg.append('text') - .text('Step ' + (batchIndex + 1)) - .translate([c.width/2, c.height + 13]) - .at({textAnchor: 'middle', fontSize: 10, fill: '#999'}) - .st({opacity: ct2 ? 0 : 1}) - - c.x.domain([0, 1]).clamp(1) - c.y.domain([0, 1]).clamp(1) - - var drag = d3.drag() - .on('start', () => c.svg.classed('dragging', 1)) - .on('end', () => c.svg.classed('dragging', 0)) - .on('drag', function(d){ - d.x = d3.clamp(.03, c.x.invert(d3.event.x), .97) - d.y = d3.clamp(.03, c.y.invert(d3.event.y), .97) - - updateCircle(d) - updateAll() - }) - .subject(function(d){ return {x: c.x(d.x), y: c.y(d.y)} }) - - var circleSel = c.svg.appendMany('circle.point', pointData) - .at({r: 4, fill: d => util.colors[d[ct.key]]}) - .call(drag) - .classed('swapped', d => d.color0 != d.color1) - .translate(d => [c.x(d.x), c.y(d.y)]) - // .call(d3.attachTooltip) - - updateCircleFns.push(d => { - circleSel - .filter(e => e == d) // rendering circles is dropping frames ? - .translate(d => [c.x(d.x), c.y(d.y)]) - }) - - if (ct2){ - var defs = c.svg.append('defs'); - defs.append('linearGradient#red-blue-def') - .append('stop').at({offset: '0%', 'stop-color': util.colors[0]}).parent() - .append('stop').at({offset: '45%', 'stop-color': util.colors[0]}).parent() - .append('stop').at({offset: '55%', 'stop-color': util.colors[1]}).parent() - .append('stop').at({offset: '100%', 'stop-color': util.colors[1]}) - defs.append('linearGradient#blue-red-def') - .append('stop').at({offset: '0%', 'stop-color': util.colors[1]}).parent() - .append('stop').at({offset: '45%', 'stop-color': util.colors[1]}).parent() - .append('stop').at({offset: '55%', 'stop-color': util.colors[0]}).parent() - .append('stop').at({offset: '100%', 'stop-color': util.colors[0]}) - - circleSel - // .at({r: 1.2}) - .filter(d => d.color0 != d.color1) - .st({r: 7, fillOpacity: 1}) - .st({fill: 'url(#red-blue-def)'})//, stroke: 'url(#blue-red-def)'}) - - var gradientClipAnnoSel = c.svg.append('text.annotation') - .translate([c.width + 20, -40]) - .tspans(d3.wordwrap('Completely clipping the gradient stops the model from learning anything from the training data.', 25), 14) - - divSel.append('div.annotation') - .translate([30, c.height + 5]) - .html(` - Models trained with the isolated blue point -
- Models trained with the isolated red point - `) - .st({lineHeight: '1.3em'}) - .selectAll('span').st({fontSize: 20, height: 0, display: 'inline-block', top: 3, position: 'relative', fontWeight: 700}) - - - } - - function getRandLines(){ - return ct2 ? ct.randLines.concat(ct2.randLines) : ct.randLines - } - - var ctx = c.layers[1] - - var lineGen = d3.line() - .x(d => c.x(d.x)) - .y(d => c.y(d.y)) - .curve(d3.curveNatural) - .context(ctx) - - ct.renderFns.push(() => { - var scores = ct.buckets[0].scores - var paddedLineData = getRandLines().map(line => { - var xyData = line[batchIndex].map((scoreIndex, i) => { - return {x: ct.buckets[i].x, y: scores[scoreIndex | 0].y} - }) - - return [ - {x: 0, y: batchIndex*state.learningRate ? xyData[0].y : 0}, - ...xyData, - {x: 1, y: batchIndex*state.learningRate ? _.last(xyData).y : 1} - ] - }) - - ctx.clearRect(-c.margin.left, -c.margin.top, c.width + c.margin.left + c.margin.right, c.height + c.margin.top + c.margin.bottom) - paddedLineData.forEach((d, i) => { - ctx.beginPath() - ctx.lineWidth = .1 - ctx.strokeStyle = !ct2 ? '#000' : i < ct.randLines.length ? util.colors[1] : util.colors[0] - lineGen(d) - ctx.stroke() - }) - - if (ct2){ - gradientClipAnnoSel.st({opacity: state.learningRate == 0 ? 1 : 0}) - } - }) -} - - -function addSliders(){ - var width = 180 - var height = 30 - var color = '#000' - - var sliders = [ - {key: 'nMaxRand', label: 'Random Noise', r: [0, 30]}, - {key: 'learningRate', label: 'Gradient Clip', r: [30, 0]}, - ] - sliders.forEach(d => { - d.value = state[d.key] - d.xScale = d3.scaleLinear().range([0, width]).domain(d.r).clamp(1) - }) - - var svgSel = dbSel.append('div.sliders').lower() - .st({marginTop: 5, marginBottom: 5}) - .appendMany('div.slider-container', sliders) - .append('svg').at({width, height}) - .append('g').translate(120, 0) - - svgSel.append('text.chart-title') - .text(d => d.label) - .at({textAnchor: 'end', dy: '.33em', x: -15}) - - var sliderSel = svgSel - .on('click', function(d){ - d.value = d.xScale.invert(d3.mouse(this)[0]) - renderSliders(d) - }) - .classed('slider', true) - .st({cursor: 'pointer'}) - - var textSel = sliderSel.append('text.slider-label-container') - .at({y: -20, fontWeight: 500, textAnchor: 'middle', x: 180/2}) - - sliderSel.append('rect') - .at({width, height, y: -height/2, fill: 'rgba(0,0,0,0)'}) - - sliderSel.append('path').at({ - d: `M 0 -.5 H ${width}`, - stroke: color, - strokeWidth: 1 - }) - - var leftPathSel = sliderSel.append('path').at({ - d: `M 0 -.5 H ${width}`, - stroke: color, - strokeWidth: 3 - }) - - var drag = d3.drag() - .on('drag', function(d){ - var x = d3.mouse(this)[0] - d.value = d.xScale.invert(x) - - renderSliders(d) - }) - - var circleSel = sliderSel.append('circle').call(drag) - .at({r: 7, stroke: '#000'}) - - function renderSliders(d){ - if (d) state[d.key] = d.value - - circleSel.at({cx: d => d.xScale(d.value)}) - leftPathSel.at({d: d => `M 0 -.5 H ${d.xScale(d.value)}`}) - - updateAll() - } - renderSliders() -} -addSliders() - - -updateAll() diff --git a/spaces/merve/measuring-fairness/public/fill-in-the-blank/index.html b/spaces/merve/measuring-fairness/public/fill-in-the-blank/index.html deleted file mode 100644 index b1ff5d0c943d3457ad18afefa53be4a9d0155f24..0000000000000000000000000000000000000000 --- a/spaces/merve/measuring-fairness/public/fill-in-the-blank/index.html +++ /dev/null @@ -1,192 +0,0 @@ - - - - - - - - - - - - - - - - - - What Have Language Models Learned? - - - - - - - - - - - - - - - -
- -
- -

What Have Language Models Learned?

-
By asking language models to fill in the blank, we can probe their understanding of the world.
-

Large language models are making it possible for computers to write stories, program a website and turn captions into images.

-

One of the first of these models, BERT, is trained by taking sentences, splitting them into individual words, randomly hiding some of them, and predicting what the hidden words are. After doing this millions of times, BERT has “read” enough Shakespeare to predict how this phrase usually ends:

-
- -

This page is hooked up to a version of BERT trained on Wikipedia and books.¹ Try clicking on different words to see how they’d be filled in or typing in another sentence to see what else has BERT picked up on.

-
- -

Cattle or Clothes?

-

Besides Hamlet’s existential dread, the text BERT was trained on also contains more patterns:

-
- -

Cattle and horses aren’t top purchase predictions in every state, though! In New York, some of the most likely words are clothes, books and art:

-
- -

There are more than 30,000 words, punctuation marks and word fragments in BERT’s vocabulary. Every time BERT fills in a hidden word, it assigns each of them a probability. By looking at how slightly different sentences shift those probabilities, we can get a glimpse at how purchasing patterns in different places are understood.

-
- -

You can edit these sentences. Or try one of these comparisons to get started:

-

To the extent that a computer program can “know” something, what does BERT know about where you live?

-

What’s in a Name?

-

This technique can also probe what associations BERT has learned about different groups of people. For example, it predicts people named Elsie are older than people named Lauren:

-
- -

It’s also learned that people named Jim have more typically masculine jobs than people named Jane:

-
- -

These aren’t just spurious correlations — Elsies really are more likely to be older than Laurens. And occupations the model associates with feminine names are held by a higher percentage of women.

-

Should we be concerned about these correlations? BERT was trained to fill in blanks in Wikipedia articles and books — it does a great job at that! The problem is that the internal representations of language these models have learned are used for much more – by some measures, they’re the best way we have of getting computers to understand and manipulate text.

-

We wouldn’t hesitate to call a conversation partner or recruiter who blithely assumed that doctors are men sexist, but that’s exactly what BERT might do if heedlessly incorporated into a chatbot or HR software:

-
- -

Adjusting for assumptions like this isn’t trivial. Why machine learning systems produce a given output still isn’t well understood – determining if a credit model built on top of BERT rejected a loan application because of gender discrimation might be quite difficult.

-

Deploying large language models at scale also risks amplifying and perpetuating today’s harmful stereotypes. When prompted with “Two Muslims walked into a…”, for example, GPT-3 typically finishes the sentence with descriptions of violence.

-

How Can We Fix This?

-

One conceptually straightforward approach: reduce unwanted correlations from the training data to mitigate model bias.

-

Last year a version of BERT called Zari was trained with an additional set of generated sentences. For every sentence with a gendered noun, like boy or aunt, another sentence that replaced the noun with its gender-partner was added to the training data: in addition to “The lady doth protest too much,” Zari was also trained on “The gentleman doth protest too much.”

-
- -

Unlike BERT, Zari assigns nurses and doctors an equal probability of being a “she” or a “he” after being trained on the swapped sentences. This approach hasn’t removed all the gender correlations; because names weren’t swapped, Zari’s association between masculine names and doctors has only slightly decreased from BERT’s. And the retraining doesn’t change how the model understands nonbinary gender.

-

Something similar happened with other attempts to remove gender bias from models’ representations of words. It’s possible to mathematically define bias and perform “brain surgery” on a model to remove it, but language is steeped in gender. Large models can have billions of parameters in which to learn stereotypes — slightly different measures of bias have found the retrained models only shifted the stereotypes around to be undetectable by the initial measure.

-

As with other applications of machine learning, it’s helpful to focus instead on the actual harms that could occur. Tools like AllenNLP, LMdiff and the Language Interpretability Tool make it easier to interact with language models to find where they might be falling short. Once those shortcomings are spotted, task specific mitigation measures can be simpler to apply than modifying the entire model.

-

It’s also possible that as models grow more capable, they might be able to explain and perform some of this debiasing themselves. Instead of forcing the model to tell us the gender of “the doctor,” we could let it respond with uncertainty that’s shown to the user and controls to override assumptions.

-

Credits

-

Adam Pearce // July 2021

-

Thanks to Ben Wedin, Emily Reif, James Wexler, Fernanda Viégas, Ian Tenney, Kellie Webster, Kevin Robinson, Lucas Dixon, Ludovic Peran, Martin Wattenberg, Michael Terry, Tolga Bolukbasi, Vinodkumar Prabhakaran, Xuezhi Wang, Yannick Assogba, and Zan Armstrong for their help with this piece.

-

Footnotes

-

The BERT model used on this page is the Hugging Face version of bert-large-uncased-whole-word-masking. “BERT” also refers to a type of model architecture; hundreds of BERT models have been trained and published. The model and chart code used here are available on GitHub.

-

Notice that “1800”, “1900” and “2000” are some of the top predictions, though. People aren’t actually more likely to be born at the start of a century, but in BERT’s training corpus of books and Wikipedia articles round numbers are more common.

-

Comparing BERT and Zari in this interface requires carefully tracking tokens during a transition. The BERT Difference Plots colab has ideas for extensions to systemically look at differences between the models’ output.

-

This analysis shouldn’t stop once a model is deployed — as language and model usage shifts, it’s important to continue studying and mitigating potential harms.

-

Appendix: Differences Over Time

-

In addition to looking at how predictions for men and women are different for a given sentence, we can also chart how those differences have changed over time:

-
- -

The convergence in more recent years suggests another potential mitigation technique: using a prefix to steer the model away from unwanted correlations while preserving its understanding of natural language.

-

Using “In $year” as the prefix is quite limited, though, as it doesn’t handle gender-neutral pronouns and potentially increases other correlations. However, it may be possible to find a better prefix that mitigates a specific type of bias with just a couple of dozen examples.

-
- -

Closer examination of these differences in differences also shows there’s a limit to the facts we can pull out of BERT this way.

-

Below, the top row of charts shows how predicted differences in occupations between men and women change between 1908 and 2018. The rightmost chart shows the he/she difference in 1908 against the he/she difference in 2018.

-

The flat slope of the rightmost chart indicates that the he/she difference has decreased for each job by about the same amount. But in reality, shifts in occupation weren’t nearly so smooth and some occupations, like accounting, switched from being majority male to majority female.

-
- -

This reality-prediction mismatch could be caused by lack of training data, model size or the coarseness of the probing method. There’s an immense amount of general knowledge inside of these models — with a little bit of focused training, they can even become expert trivia players.

-

More Explorables

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/spaces/mithril-security/blind_chat/src/lib/types/WebSearch.ts b/spaces/mithril-security/blind_chat/src/lib/types/WebSearch.ts deleted file mode 100644 index 7416f01f1a2c7ea9b94f525aec473312bc3deefd..0000000000000000000000000000000000000000 --- a/spaces/mithril-security/blind_chat/src/lib/types/WebSearch.ts +++ /dev/null @@ -1,36 +0,0 @@ -import type { Conversation } from "./Conversation"; -import type { Timestamps } from "./Timestamps"; - -export interface WebSearch extends Timestamps { - prompt: string; - - searchQuery: string; - results: string[]; - knowledgeGraph: string; - answerBox: string; - summary: string; - - messages: WebSearchMessage[]; -} - -export type WebSearchMessageUpdate = { - type: "update"; - message: string; - args?: string[]; -}; - -export type WebSearchMessageError = { - type: "error"; - message: string; - args?: string[]; -}; - -export type WebSearchMessageResult = { - type: "result"; - id: string; -}; - -export type WebSearchMessage = - | WebSearchMessageUpdate - | WebSearchMessageResult - | WebSearchMessageError; diff --git a/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/modeling/transformer/transformer_predictor.py b/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/modeling/transformer/transformer_predictor.py deleted file mode 100644 index 72378abe29c01809a00fa1b87d275258ee9c91fa..0000000000000000000000000000000000000000 --- a/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/modeling/transformer/transformer_predictor.py +++ /dev/null @@ -1,179 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/detr.py -# Copyright (c) Meta Platforms, Inc. All Rights Reserved - -import fvcore.nn.weight_init as weight_init -import torch -from torch import nn -from torch.nn import functional as F - -from detectron2.config import configurable -from detectron2.layers import Conv2d - -from .position_encoding import PositionEmbeddingSine -from .transformer import Transformer - - -class TransformerPredictor(nn.Module): - @configurable - def __init__( - self, - in_channels, - mask_classification=True, - *, - num_classes: int, - hidden_dim: int, - num_queries: int, - nheads: int, - dropout: float, - dim_feedforward: int, - enc_layers: int, - dec_layers: int, - pre_norm: bool, - deep_supervision: bool, - mask_dim: int, - enforce_input_project: bool, - ): - """ - NOTE: this interface is experimental. - Args: - in_channels: channels of the input features - mask_classification: whether to add mask classifier or not - num_classes: number of classes - hidden_dim: Transformer feature dimension - num_queries: number of queries - nheads: number of heads - dropout: dropout in Transformer - dim_feedforward: feature dimension in feedforward network - enc_layers: number of Transformer encoder layers - dec_layers: number of Transformer decoder layers - pre_norm: whether to use pre-LayerNorm or not - deep_supervision: whether to add supervision to every decoder layers - mask_dim: mask feature dimension - enforce_input_project: add input project 1x1 conv even if input - channels and hidden dim is identical - """ - super().__init__() - - self.mask_classification = mask_classification - - # positional encoding - N_steps = hidden_dim // 2 - self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True) - - transformer = Transformer( - d_model=hidden_dim, - dropout=dropout, - nhead=nheads, - dim_feedforward=dim_feedforward, - num_encoder_layers=enc_layers, - num_decoder_layers=dec_layers, - normalize_before=pre_norm, - return_intermediate_dec=deep_supervision, - ) - - self.num_queries = num_queries - self.transformer = transformer - hidden_dim = transformer.d_model - - self.query_embed = nn.Embedding(num_queries, hidden_dim) - - if in_channels != hidden_dim or enforce_input_project: - self.input_proj = Conv2d(in_channels, hidden_dim, kernel_size=1) - weight_init.c2_xavier_fill(self.input_proj) - else: - self.input_proj = nn.Sequential() - self.aux_loss = deep_supervision - - # output FFNs - if self.mask_classification: - self.class_embed = nn.Linear(hidden_dim, num_classes + 1) - self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3) - - @classmethod - def from_config(cls, cfg, in_channels, mask_classification): - ret = {} - ret["in_channels"] = in_channels - ret["mask_classification"] = mask_classification - - ret["num_classes"] = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES - ret["hidden_dim"] = cfg.MODEL.MASK_FORMER.HIDDEN_DIM - ret["num_queries"] = cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES - # Transformer parameters: - ret["nheads"] = cfg.MODEL.MASK_FORMER.NHEADS - ret["dropout"] = cfg.MODEL.MASK_FORMER.DROPOUT - ret["dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD - ret["enc_layers"] = cfg.MODEL.MASK_FORMER.ENC_LAYERS - ret["dec_layers"] = cfg.MODEL.MASK_FORMER.DEC_LAYERS - ret["pre_norm"] = cfg.MODEL.MASK_FORMER.PRE_NORM - ret["deep_supervision"] = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION - ret["enforce_input_project"] = cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ - - ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM - - return ret - - def forward(self, x, mask_features): - pos = self.pe_layer(x) - - src = x - mask = None - hs, memory = self.transformer( - self.input_proj(src), mask, self.query_embed.weight, pos - ) - - if self.mask_classification: - outputs_class = self.class_embed(hs) - out = {"pred_logits": outputs_class[-1]} - else: - out = {} - - if self.aux_loss: - # [l, bs, queries, embed] - mask_embed = self.mask_embed(hs) - outputs_seg_masks = torch.einsum( - "lbqc,bchw->lbqhw", mask_embed, mask_features - ) - out["pred_masks"] = outputs_seg_masks[-1] - out["aux_outputs"] = self._set_aux_loss( - outputs_class if self.mask_classification else None, outputs_seg_masks - ) - else: - # FIXME h_boxes takes the last one computed, keep this in mind - # [bs, queries, embed] - mask_embed = self.mask_embed(hs[-1]) - outputs_seg_masks = torch.einsum( - "bqc,bchw->bqhw", mask_embed, mask_features - ) - out["pred_masks"] = outputs_seg_masks - return out - - @torch.jit.unused - def _set_aux_loss(self, outputs_class, outputs_seg_masks): - # this is a workaround to make torchscript happy, as torchscript - # doesn't support dictionary with non-homogeneous values, such - # as a dict having both a Tensor and a list. - if self.mask_classification: - return [ - {"pred_logits": a, "pred_masks": b} - for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1]) - ] - else: - return [{"pred_masks": b} for b in outputs_seg_masks[:-1]] - - -class MLP(nn.Module): - """Very simple multi-layer perceptron (also called FFN)""" - - def __init__(self, input_dim, hidden_dim, output_dim, num_layers): - super().__init__() - self.num_layers = num_layers - h = [hidden_dim] * (num_layers - 1) - self.layers = nn.ModuleList( - nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) - ) - - def forward(self, x): - for i, layer in enumerate(self.layers): - x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) - return x diff --git a/spaces/mohamedemam/QA_GeneraToR/README.md b/spaces/mohamedemam/QA_GeneraToR/README.md deleted file mode 100644 index cada189d19abf2ec5fb974eadd42628ed3faa43e..0000000000000000000000000000000000000000 --- a/spaces/mohamedemam/QA_GeneraToR/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: QA GeneraTor -emoji: 👀 -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 3.41.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/multilingual/data_scripts/download_wmt19_and_before.py b/spaces/mshukor/UnIVAL/fairseq/examples/multilingual/data_scripts/download_wmt19_and_before.py deleted file mode 100644 index 3465731eb3e55047c44d1b336a97e99cb3a89a53..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/multilingual/data_scripts/download_wmt19_and_before.py +++ /dev/null @@ -1,899 +0,0 @@ -from typing import NamedTuple, List -from urllib.parse import urlparse -import os, sys -import subprocess -from subprocess import check_call, check_output -import glob -import wget -import re -import multiprocessing as mp -from functools import partial -import pathlib -from collections import OrderedDict - -WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None) - -if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip(): - print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."') - sys.exit(-1) - -# scripts and data locations -CWD = os.getcwd() -UTILS = f"{CWD}/utils" - -MOSES = f"{UTILS}/mosesdecoder" -SGM_TOOL = f'{MOSES}/scripts/ems/support/input-from-sgm.perl' - -TMX2CORPUS = f"{UTILS}/tmx2corpus" -TMX_TOOL = f'python {TMX2CORPUS}/tmx2corpus.py' - -to_data_path = f'{WORKDIR_ROOT}/wmt' -download_to = f'{to_data_path}/downloads' -manually_downloads = f'{to_data_path}/downloads' -extract_to = f'{to_data_path}/extracted' -#DESTDIR=${WORKDIR_ROOT}/ML50/raw/ -raw_data = f'{WORKDIR_ROOT}/ML50/raw' -#### - -class DLDataset(NamedTuple): - name: str - train_urls: List[str] - valid_urls: List[str] - test_urls: List[str] - train_files_patterns: List[str] = [] - valid_files_patterns: List[str] = [] - test_files_patterns: List[str] = [] - - - -def bar_custom(current, total, width=80): - print("Downloading: %d%% [%d / %d] Ks" % (current / total * 100, current / 1000, total / 1000), end='\r') - -def get_downloaded_file(dl_folder, url): - if isinstance(url, tuple): - url, f = url - else: - url_f = urlparse(url) - # f = os.path.split(url_f.path)[-1] - f = '_'.join(url_f.path.split('/')[1:]) - return url, f"{dl_folder}/{f}" - -def download_parts_and_combine(dl_folder, urls, filename): - parts = [] - for url_record in urls: - url, part_file = get_downloaded_file(dl_folder, url_record) - if os.path.exists(part_file): - print(f'{part_file} has already been downloaded so skip') - else: - part_file = wget.download(url, part_file, bar=bar_custom) - parts.append(part_file) - - def get_combine_cmd(parts): - #default as tar.gz.?? - return f'cat {" ".join(parts)} > {filename}' - - combine_cmd = get_combine_cmd(parts) - call(combine_cmd, debug=True) - return filename - -def download_a_url(dl_folder, url): - url, filename = get_downloaded_file(dl_folder, url) - if os.path.exists(filename): - print(f'{filename} has already been downloaded so skip') - return filename - - print(f'downloading {url} to {filename}') - if isinstance(url, list) or isinstance(url, tuple): - download_parts_and_combine(dl_folder, url, filename) - else: - wget.download(url, filename, bar=bar_custom) - print(f'dowloaded: {filename}') - return filename - -def download_files(dl_folder, urls, completed_urls={}): - for url_record in urls: - url, _ = get_downloaded_file(dl_folder, url_record) - filename = download_a_url(dl_folder, url_record) - completed_urls[str(url)] = filename - return completed_urls - -def check_need_manual_downalod(dl_folder, to_manually_download_urls): - to_be_manually_dowloaded = [] - manually_completed_urls = {} - for url_record, instruction in to_manually_download_urls: - url, filename = get_downloaded_file(dl_folder, url_record) - if not os.path.exists(filename): - print(f'{url} need to be download manually, please download it manually following {instruction}; and copy it to {filename}') - to_be_manually_dowloaded.append((url, filename)) - else: - manually_completed_urls[url] = filename - # if len(to_be_manually_dowloaded) > 0: - # raise ValueError('Missing files that need to be downloaded manually; stop the process now.') - return to_be_manually_dowloaded - -def download_dataset(to_folder, dl_dataset, completed_urls={}): - download_files(to_folder, dl_dataset.train_urls, completed_urls) - download_files(to_folder, dl_dataset.valid_urls, completed_urls) - download_files(to_folder, dl_dataset.test_urls, completed_urls) - print('completed downloading') - return completed_urls - -def call(cmd, debug=False): - if debug: - print(cmd) - check_call(cmd, shell=True) - - -def get_extract_name(file_path): - path = os.path.split(file_path) - return path[-1] + '_extract' #.split('.')[0] - -def extract_file(downloaded_file, extract_folder, get_extract_name=get_extract_name, debug=False): - extract_name = get_extract_name(downloaded_file) - extract_to = f'{extract_folder}/{extract_name}' - os.makedirs(extract_to, exist_ok=True) - if os.path.exists(f'{extract_to}/DONE'): - print(f'{downloaded_file} has already been extracted to {extract_to} so skip') - return extract_to - def get_extract_cmd(filename): - if filename.endswith('.tgz') or filename.endswith('tar.gz'): - return f'tar xzfv {filename} -C {extract_to}' - elif filename.endswith('.gz.tar'): - return f'tar xfv {filename} -C {extract_to}; (cd {extract_to}; gzip -d *.gz; [ $? -eq 0 ] || gzip -d */*.gz)' - elif filename.endswith('.tar'): - return f'tar xfv {filename} -C {extract_to}' - elif filename.endswith('.gz'): - return f'cp {filename} {extract_to}; (cd {extract_to}; gzip -d *.gz)' - elif filename.endswith('.zip'): - return f'unzip {filename} -d {extract_to}' - extract_cmd = get_extract_cmd(downloaded_file) - print(f'extracting {downloaded_file}') - if isinstance(extract_cmd, list): - for c in extract_cmd: - call(c, debug=debug) - else: - call(extract_cmd, debug=debug) - call(f'echo DONE > {extract_to}/DONE') - return extract_to - - -def extract_all_files( - completed_urls, extract_folder, - get_extract_name=get_extract_name, - completed_extraction={}, - debug=False): - extracted_folders = OrderedDict() - for url, downloaded_file in set(completed_urls.items()): - if downloaded_file in completed_extraction: - print(f'{downloaded_file} is already extracted; so skip') - continue - folder = extract_file(downloaded_file, extract_folder, get_extract_name, debug) - extracted_folders[url] = folder - return extracted_folders - - -def my_glob(folder): - for p in [f'{folder}/*', f'{folder}/*/*', f'{folder}/*/*/*']: - for f in glob.glob(p): - yield f - - -def sgm2raw(sgm, debug): - to_file = sgm[0:len(sgm) - len('.sgm')] - if os.path.exists(to_file): - debug and print(f'{sgm} already converted to {to_file}; so skip') - return to_file - cmd = f'{SGM_TOOL} < {sgm} > {to_file}' - call(cmd, debug) - return to_file - -def tmx2raw(tmx, debug): - to_file = tmx[0:len(tmx) - len('.tmx')] - to_folder = os.path.join(*os.path.split(tmx)[:-1]) - if os.path.exists(f'{to_folder}/bitext.en'): - debug and print(f'{tmx} already extracted to {to_file}; so skip') - return to_file - cmd = f'(cd {to_folder}; {TMX_TOOL} {tmx})' - call(cmd, debug) - return to_file - -CZENG16_REGEX = re.compile(r'.*?data.plaintext-format/0[0-9]train$') -WMT19_WIKITITLES_REGEX = re.compile(r'.*?wikititles-v1.(\w\w)-en.tsv.gz') -TSV_REGEX = re.compile(r'.*?(\w\w)-(\w\w).tsv$') - - - -def cut_wikitles(wiki_file, debug): - # different languages have different file names: - if wiki_file.endswith('wiki/fi-en/titles.fi-en'): - to_file1 = f'{wiki_file}.fi' - to_file2 = f'{wiki_file}.en' - BACKSLASH = '\\' - cmd1 = f"cat {wiki_file} | sed 's/|||/{BACKSLASH}t/g' |cut -f1 |awk '{{$1=$1}};1' > {to_file1}" - cmd2 = f"cat {wiki_file} | sed 's/|||/{BACKSLASH}t/g' |cut -f2 |awk '{{$1=$1}};1' > {to_file2}" -# elif WMT19_WIKITITLES_REGEX.match(wiki_file): -# src = WMT19_WIKITITLES_REGEX.match(wiki_file).groups()[0] -# to_file1 = f'{wiki_file}.{src}' -# to_file2 = f'{wiki_file}.en' -# cmd1 = f"cat {wiki_file} | cut -f1 |awk '{{$1=$1}};1' > {to_file1}" -# cmd2 = f"cat {wiki_file} | cut -f2 |awk '{{$1=$1}};1' > {to_file2}" - else: - return None - if os.path.exists(to_file1) and os.path.exists(to_file2): - debug and print(f'{wiki_file} already processed to {to_file1} and {to_file2}; so skip') - return wiki_file - - call(cmd1, debug=debug) - call(cmd2, debug=debug) - return wiki_file - -def cut_tsv(file, debug): - m = TSV_REGEX.match(file) - if m is None: - raise ValueError(f'{file} is not matching tsv pattern') - src = m.groups()[0] - tgt = m.groups()[1] - - to_file1 = f'{file}.{src}' - to_file2 = f'{file}.{tgt}' - cmd1 = f"cat {file} | cut -f1 |awk '{{$1=$1}};1' > {to_file1}" - cmd2 = f"cat {file} | cut -f2 |awk '{{$1=$1}};1' > {to_file2}" - if os.path.exists(to_file1) and os.path.exists(to_file2): - debug and print(f'{file} already processed to {to_file1} and {to_file2}; so skip') - return file - - call(cmd1, debug=debug) - call(cmd2, debug=debug) - return file - - -def convert_file_if_needed(file, debug): - if file.endswith('.sgm'): - return sgm2raw(file, debug) - elif file.endswith('.tmx'): - return tmx2raw(file, debug) - elif file.endswith('wiki/fi-en/titles.fi-en'): - return cut_wikitles(file, debug) -# elif WMT19_WIKITITLES_REGEX.match(file): -# return cut_wikitles(file, debug) - elif file.endswith('.tsv'): - return cut_tsv(file, debug) - elif CZENG16_REGEX.match(file): - return convert2czeng17(file, debug) - else: - return file - - -def convert_files_if_needed(extracted_foldrs, my_glob=my_glob, debug=False): - return { - url: list(sorted(set(convert_file_if_needed(f, debug)) for f in sorted(set(my_glob(folder))))) - for url, folder in extracted_foldrs.items() - } - -def match_patt(file_path, file_pattern, src, tgt, lang): - return file_pattern.format(src=src, tgt=tgt, lang=lang) in file_path - -def match_patts(file_path, file_patterns, src, tgt, lang): - for file_pattern in file_patterns: - params = { k: v for k, v in [('src', src), ('tgt', tgt), ('lang', lang)] if k in file_pattern} - matching = file_pattern.format(**params) - - if isinstance(file_pattern, tuple): - pattern, directions = file_pattern - if f'{src}-{tgt}' in directions and matching in file_path: - return True - else: - if matching in file_path: - return True - return False - -def extracted_glob(extracted_folder, file_patterns, src, tgt, lang): - def get_matching_pattern(file_pattern): - params = { - k: v - for k, v in [('src', src), ('tgt', tgt), ('lang', lang)] - if '{' + k + '}' in file_pattern - } - file_pattern = re.sub(r'{src:(.*?)}', r'\1' if lang == src else '', file_pattern) - file_pattern = re.sub(r'{tgt:(.*?)}', r'\1' if lang == tgt else '', file_pattern) - file_pattern = file_pattern.format(**params) - return file_pattern - for file_pattern in file_patterns: - if isinstance(file_pattern, tuple): - file_pattern, lang_pairs = file_pattern - if f'{src}-{tgt}' not in lang_pairs: - continue -# print('working on pattern: ', file_pattern, lang_pairs ) - matching_pattern = get_matching_pattern(file_pattern) - if matching_pattern is None: - continue - glob_patterns = f'{extracted_folder}/{matching_pattern}' -# print('glob_patterns: ', glob_patterns) - for f in glob.glob(glob_patterns): - yield f - -# for debug usage -def all_extracted_files(split, src, tgt, extracted_folders, split_urls): - def get_url(url): - if isinstance(url, tuple): - url, downloaded_file = url - return url - return [ - f - for url in split_urls - for f in my_glob(extracted_folders[str(get_url(url))]) - ] - -def concat_files(split, src, tgt, extracted_folders, split_urls, path_patterns, to_folder, debug=False): -# if debug: -# print('extracted files to be filtered by patterns: ', -# '\n\t'.join(sorted(all_extracted_files(split, src, tgt, extracted_folders, split_urls)))) - for lang in [src, tgt]: - to_file = f'{to_folder}/{split}.{src}-{tgt}.{lang}' - s_src, s_tgt, s_lang = src.split('_')[0], tgt.split('_')[0], lang.split('_')[0] - files = [] - for url in split_urls: - if isinstance(url, tuple): - url, downloaded_file = url - if str(url) not in extracted_folders: - print(f'warning: {url} not in extracted files') - for extracted_file in set( - extracted_glob( - extracted_folders[str(url)], path_patterns, - s_src, s_tgt, s_lang)): - files.append(extracted_file) - if len(files) == 0: - print('warning: ', f'No files found for split {to_file}') - continue - files = sorted(set(files)) - print(f'concating {len(files)} files into {to_file}') - cmd = ['cat'] + [f'"{f}"' for f in files] + [f'>{to_file}'] - cmd = " ".join(cmd) - call(cmd, debug=debug) - -UTILS = os.path.join(pathlib.Path(__file__).parent, 'utils') -LID_MODEL = f'{download_to}/lid.176.bin' -LID_MULTI = f'{UTILS}/fasttext_multi_filter.py' - -def lid_filter(split, src, tgt, from_folder, to_folder, debug=False): - if not os.path.exists(LID_MODEL): - call(f'wget -nc https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin -O {LID_MODEL}') - from_prefix = f'{from_folder}/{split}.{src}-{tgt}' - to_prefix = f'{to_folder}/{split}.{src}-{tgt}' - if os.path.exists(f'{from_prefix}.{src}') and os.path.exists(f'{from_prefix}.{tgt}'): - s_src, s_tgt = src.split('_')[0], tgt.split('_')[0] - cmd = ( - f'python {LID_MULTI} --model {LID_MODEL} --inputs {from_prefix}.{src} {from_prefix}.{tgt} ' - f'--langs {s_src} {s_tgt} --outputs {to_prefix}.{src} {to_prefix}.{tgt}' - ) - print(f'filtering {from_prefix}') - call(cmd, debug=debug) - -def concat_into_splits(dl_dataset, src, tgt, extracted_folders, to_folder, debug): - to_folder_tmp = f"{to_folder}_tmp" - os.makedirs(to_folder_tmp, exist_ok=True) - concat_files('train', src, tgt, - extracted_folders, - split_urls=dl_dataset.train_urls, - path_patterns=dl_dataset.train_files_patterns, - to_folder=to_folder_tmp, debug=debug) - lid_filter('train', src, tgt, to_folder_tmp, to_folder, debug) - - concat_files('valid', src, tgt, - extracted_folders, - split_urls=dl_dataset.valid_urls, - path_patterns=dl_dataset.valid_files_patterns, - to_folder=to_folder, debug=debug) - concat_files('test', src, tgt, - extracted_folders, - split_urls=dl_dataset.test_urls, - path_patterns=dl_dataset.test_files_patterns, - to_folder=to_folder, debug=debug) - - -def download_multi(dl_folder, extract_folder, urls, num_processes=8, debug=False): - pool = mp.Pool(processes=num_processes) - download_f = partial(download_a_url, dl_folder) - downloaded_files = pool.imap_unordered(download_f, urls) - pool.close() - pool.join() - -BLEU_REGEX = re.compile("^BLEU\\S* = (\\S+) ") -def run_eval_bleu(cmd): - output = check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode("utf-8").strip() - print(output) - bleu = -1.0 - for line in output.strip().split('\n'): - m = BLEU_REGEX.search(line) - if m is not None: - bleu = m.groups()[0] - bleu = float(bleu) - break - return bleu - -def check_wmt_test_bleu(raw_folder, wmt_lang_pairs): - not_matchings = [] - for wmt, src_tgts in wmt_lang_pairs: - for src_tgt in src_tgts: - print(f'checking test bleus for: {src_tgt} at {wmt}') - src, tgt = src_tgt.split('-') - ssrc, stgt = src[:2], tgt[:2] - if os.path.exists(f'{raw_folder}/test.{tgt}-{src}.{src}'): - # reversed direction may have different test set - test_src = f'{raw_folder}/test.{tgt}-{src}.{src}' - else: - test_src = f'{raw_folder}/test.{src}-{tgt}.{src}' - cmd1 = f'cat {test_src} | sacrebleu -t "{wmt}" -l {stgt}-{ssrc}; [ $? -eq 0 ] || echo ""' - test_tgt = f'{raw_folder}/test.{src}-{tgt}.{tgt}' - cmd2 = f'cat {test_tgt} | sacrebleu -t "{wmt}" -l {ssrc}-{stgt}; [ $? -eq 0 ] || echo ""' - bleu1 = run_eval_bleu(cmd1) - if bleu1 != 100.0: - not_matchings.append(f'{wmt}:{src_tgt} source side not matching: {test_src}') - bleu2 = run_eval_bleu(cmd2) - if bleu2 != 100.0: - not_matchings.append(f'{wmt}:{src_tgt} target side not matching: {test_tgt}') - return not_matchings - -def download_and_extract( - to_folder, lang_pairs, dl_dataset, - to_manually_download_urls, - completed_urls={}, completed_extraction={}, - debug=False): - - dl_folder = f'{to_folder}/downloads' - extract_folder = f'{to_folder}/extracted' - raw_folder = f'{to_folder}/raw' - lid_filtered = f'{to_folder}/lid_filtered' - - os.makedirs(extract_folder, exist_ok=True) - os.makedirs(raw_folder, exist_ok=True) - os.makedirs(lid_filtered, exist_ok=True) - - - to_be_manually_dowloaded = check_need_manual_downalod(dl_folder, to_manually_download_urls) - - completed_urls = download_dataset( - dl_folder, dl_dataset, completed_urls) - if debug: - print('completed urls: ', completed_urls) - - - extracted_folders = extract_all_files( - completed_urls, - extract_folder=extract_folder, - completed_extraction=completed_extraction, - debug=debug) - if debug: - print('download files have been extracted to folders: ', extracted_folders) - - converted_files = convert_files_if_needed(extracted_folders, debug=False) - for src_tgt in lang_pairs: - print(f'working on {dl_dataset.name}: {src_tgt}') - src, tgt = src_tgt.split('-') - concat_into_splits(dl_dataset, - src=src, tgt=tgt, - extracted_folders=extracted_folders, - to_folder=raw_folder, debug=debug) - print('completed data into: ', raw_folder) - -def download_czang16(download_to, username=None): - wgets = [ - f'wget --user={username} --password=czeng -P {download_to} http://ufallab.ms.mff.cuni.cz/~bojar/czeng16-data/data-plaintext-format.{i}.tar' - for i in range(10)] - cmds = [] - for i, cmd in enumerate(wgets): - filename = f'{download_to}/data-plaintext-format.{i}.tar' - if os.path.exists(filename): - print(f'{filename} has already been downloaded; so skip') - continue - cmds.append(cmd) - if cmds and username is None: - raise ValueError('No czeng username is given; please register at http://ufal.mff.cuni.cz/czeng/czeng16 to obtain username to download') - for cmd in cmds: - call(cmd) - print('done with downloading czeng1.6') - -def download_czeng17_script(download_to, extract_folder, debug=False): - url = 'http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip' - filename = f'{download_to}/convert_czeng16_to_17.pl.zip' - extract_to = f'{extract_folder}/{get_extract_name(filename)}' - script_path = f'{extract_to}/convert_czeng16_to_17.pl' - - if not os.path.exists(script_path): - wget.download(url, filename, bar=bar_custom) - extract_to = extract_file(f'{download_to}/convert_czeng16_to_17.pl.zip', extract_folder, get_extract_name=get_extract_name, debug=debug) - return script_path - -czeng17_script_path = "" -def convert2czeng17(file, debug): - en_file = f'{file}.en' - cs_file = f'{file}.cs' - - if not os.path.exists(en_file) or not os.path.exists(cs_file): - cs_cmd = f'cat {file} | perl {czeng17_script_path} | cut -f3 > {cs_file}' - en_cmd = f'cat {file} | perl {czeng17_script_path} | cut -f4 > {en_file}' - call(cs_cmd, debug) - call(en_cmd, debug) - else: - print(f'already extracted: {en_file} and {cs_file}') - return file - -def extract_czeng17(extract_folder, debug=False): - url = 'http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip' - filename = f'{download_to}/convert_czeng16_to_17.pl.zip' - extract_to = f'{extract_folder}/{get_extract_name(filename)}' - script_path = f'{extract_to}/convert_czeng16_to_17.pl' - - if not os.path.exists(script_path): - wget.download(url, filename, bar=bar_custom) - extract_to = extract_file(f'{download_to}/convert_czeng16_to_17.pl.zip', extract_folder, get_extract_name=get_extract_name, debug=debug) - return script_path - -######### -# definitions of wmt data sources -# for es-en -# Punctuation in the official test sets will be encoded with ASCII characters (not complex Unicode characters) as much as possible. You may want to normalize your system's output before submission. You are able able to use a rawer version of the test sets that does not have this normalization. -# script to normalize punctuation: http://www.statmt.org/wmt11/normalize-punctuation.perl -wmt13_es_en = DLDataset( - name='wmt13_es-en', - train_urls=[ - 'http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz', - 'http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz', - 'http://www.statmt.org/wmt13/training-parallel-un.tgz', - 'http://www.statmt.org/wmt13/training-parallel-nc-v8.tgz', - ], - valid_urls=[ - ('http://www.statmt.org/wmt13/dev.tgz', 'wmt13_dev.tgz') - ], - test_urls=[ - ('http://www.statmt.org/wmt13/test.tgz', 'wmt13_test.tgz') - ], - train_files_patterns=[ - ('*/europarl-v7.{src}-{tgt}.{lang}', ['es-en']), - ('*commoncrawl.{src}-{tgt}.{lang}', ['es-en']), - ('*/news-commentary-v8.{src}-{tgt}.{lang}', ['es-en']), - ('un/*undoc.2000.{src}-{tgt}.{lang}', ['es-en']), - ] , - valid_files_patterns=[ - ('dev/newstest2012.{lang}', ['es-en']) - ], - test_files_patterns=[ - ('test/newstest*.{lang}', ['es-en']) - ], -) - -wmt14_de_fr_en = DLDataset( - name='wmt14_de_fr_en', - train_urls=[ - 'http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz', - 'http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz', - 'http://www.statmt.org/wmt13/training-parallel-un.tgz', - 'http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz', - ('http://www.statmt.org/wmt10/training-giga-fren.tar', 'training-giga-fren.gz.tar'), #it is actuall a gz.tar - ], - valid_urls=[ - ('http://www.statmt.org/wmt14/dev.tgz', 'wmt14_dev.tgz'), - ], - test_urls=[ - ('http://www.statmt.org/wmt14/test-full.tgz', 'wmt14_test_full.tgz'), # cleaned test sets - ], - train_files_patterns=[ - ('*/europarl-v7.{src}-{tgt}.{lang}', ['fr-en', 'de-en']), - ('*commoncrawl.{src}-{tgt}.{lang}', ['fr-en', 'de-en']), - ('*/*news-commentary-v9.{src}-{tgt}.{lang}', ['fr-en', 'de-en']), - ('un/undoc.2000.{src}-{tgt}.{lang}', ['fr-en']), - ('*giga-{src}{tgt}*{lang}', ['fr-en']) - ], - valid_files_patterns=[ - ('dev/newstest2013.{lang}', ['fr-en', 'de-en']) - ], - test_files_patterns=[ - ('test-full/newstest*{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['en-de', 'de-en', 'fr-en', 'en-fr']), - ], -) - -# pip install git+https://github.com/amake/tmx2corpus.git -wmt16_ro_en = DLDataset( - name='wmt16_ro-en', - train_urls=[ - ('http://data.statmt.org/wmt16/translation-task/training-parallel-ep-v8.tgz', 'wmt16_training-parallel-ep-v8.tgz'), - ('http://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-ro.tmx.gz', 'en-ro.tmx.gz'), - ], - valid_urls=[ - ('http://data.statmt.org/wmt16/translation-task/dev-romanian-updated.tgz', 'wmt16_dev.tgz') - ], - test_urls=[ - ('http://data.statmt.org/wmt16/translation-task/test.tgz', 'wmt16_test.tgz') - ], - train_files_patterns=[ - ('*/*europarl-v8.{src}-{tgt}.{lang}', ['ro-en']), - ('bitext.{lang}', ['ro-en']) #setimes from tmux - ] , - valid_files_patterns=[ - ('dev/newsdev2016*{src}{tgt}*.{lang}', ['ro-en', 'ro-en']) - ], - test_files_patterns=[ - ('test/newstest*{src}{tgt}*.{lang}', ['ro-en', 'en-ro']) - ], -) - -cwmt_wmt_instruction = 'cwmt download instruction at: http://nlp.nju.edu.cn/cwmt-wmt' -wmt17_fi_lv_tr_zh_en_manual_downloads = [ - # fake urls to have unique keys for the data - ( ('http://nlp.nju.edu.cn/cwmt-wmt/CASIA2015.zip', 'CASIA2015.zip'), cwmt_wmt_instruction), - ( ('http://nlp.nju.edu.cn/cwmt-wmt/CASICT2011.zip', 'CASICT2011.zip'), cwmt_wmt_instruction), - ( ('http://nlp.nju.edu.cn/cwmt-wmt/CASICT2015.zip', 'CASICT2015.zip'), cwmt_wmt_instruction), - ( ('http://nlp.nju.edu.cn/cwmt-wmt/Datum2015.zip', 'Datum2015.zip'), cwmt_wmt_instruction), - ( ('http://nlp.nju.edu.cn/cwmt-wmt/Datum2017.zip', 'Datum2017.zip'), cwmt_wmt_instruction), - ( ('http://nlp.nju.edu.cn/cwmt-wmt/NEU2017.zip', 'NEU2017.zip'), cwmt_wmt_instruction), -] -wmt17_fi_lv_tr_zh_en = DLDataset( - name='wmt17_fi_lv_tr_zh_en', - train_urls=[ - ('http://data.statmt.org/wmt17/translation-task/training-parallel-ep-v8.tgz', 'wmt17_training-parallel-ep-v8.tgz'), - 'http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz', - 'http://www.statmt.org/wmt15/wiki-titles.tgz', - ('http://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-tr.tmx.gz', 'en-tr.tmx.gz'), - ('http://data.statmt.org/wmt17/translation-task/rapid2016.tgz', 'wmt17_rapid2016.tgz'), - 'http://data.statmt.org/wmt17/translation-task/leta.v1.tgz', - 'http://data.statmt.org/wmt17/translation-task/dcep.lv-en.v1.tgz', - 'http://data.statmt.org/wmt17/translation-task/books.lv-en.v1.tgz', - (('https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-zh.tar.gz.00', - 'https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-zh.tar.gz.01',), 'UNv1.0.en-zh.tar.gz'), - #manually download files: - ('http://nlp.nju.edu.cn/cwmt-wmt/CASIA2015.zip', 'CASIA2015.zip'), - ('http://nlp.nju.edu.cn/cwmt-wmt/CASICT2011.zip', 'CASICT2011.zip'), - ('http://nlp.nju.edu.cn/cwmt-wmt/CASICT2015.zip', 'CASICT2015.zip'), - ('http://nlp.nju.edu.cn/cwmt-wmt/Datum2015.zip', 'Datum2015.zip'), - ('http://nlp.nju.edu.cn/cwmt-wmt/Datum2017.zip', 'Datum2017.zip'), - ('http://nlp.nju.edu.cn/cwmt-wmt/NEU2017.zip', 'NEU2017.zip'), - ], - valid_urls=[ - ('http://data.statmt.org/wmt17/translation-task/dev.tgz', 'wmt17_dev.tgz'), - ], - test_urls=[ - #NEW: Improved translations for zh test sets - ('http://data.statmt.org/wmt17/translation-task/test-update-1.tgz', 'wmt17_test_zh_en.tgz'), - ('http://data.statmt.org/wmt17/translation-task/test.tgz', 'wmt17_test_others.tgz') - ], - train_files_patterns=[ - ('casict*/cas*{src:ch}{tgt:en}.txt', ['zh-en', 'zh-en'] ), - ('casia*/cas*{src:ch}{tgt:en}.txt', ['zh-en', 'zh-en'] ), - ('dataum*/Book*{src:cn}{tgt:en}.txt', ['zh-en', 'zh-en']), - ('neu*/NEU*{src:cn}{tgt:en}.txt', ['zh-en', 'zh-en'] ), - ('*/*UNv1.0.en-zh.{src:zh}{tgt:en}', ['zh-en']), - ('training/*news-commentary-v12.{src}-{tgt}.{lang}', ['zh-en', ]), - - ('*/*europarl-v8.{src}-{tgt}.{lang}', ['fi-en', 'lv-en']), - ('wiki/fi-en/titles.{src}-{tgt}.{lang}', ['fi-en', ]), - ('rapid2016.{tgt}-{src}.{lang}', ['fi-en', 'lv-en']), - ('*/leta.{lang}', ['lv-en']), - ('*/dcep.{lang}', ['lv-en']), - ('*/farewell.{lang}', ['lv-en']), - ('bitext.{lang}', ['tr-en']), - ] , - valid_files_patterns=[ - ('dev/newsdev2017*{src}{tgt}-{src:src}{tgt:ref}.{lang}', - [ - 'fi-en', 'lv-en', 'tr-en', 'zh-en', - 'en-fi', 'en-lv', 'en-tr', 'en-zh' - ]), - ('dev/newstest2016*{src}{tgt}-{src:src}{tgt:ref}.{lang}', - [ - 'fi-en', 'tr-en', - 'en-fi', 'en-tr', - ]), - ], - test_files_patterns=[ - ('test/newstest2017-{src}{tgt}-{src:src}{tgt:ref}.{lang}', - [ - 'fi-en', 'lv-en', 'tr-en', - 'en-fi', 'en-lv', 'en-tr', - ]), - ('newstest2017-{src}{tgt}-{src:src}{tgt:ref}.{lang}', - [ - 'zh-en', - 'en-zh' - ]), - ], -) - -czeng_instruction = 'download instruction at: http://ufal.mff.cuni.cz/czeng/czeng16' -#alternative: use the prepared data but detokenize it? -wmt18_cs_et_en_manual_downloads = [ -#for cs, need to register and download; Register and download CzEng 1.6. -#Better results can be obtained by using a subset of sentences, released under a new version name CzEng 1.7. - # ((f'http://ufallab.ms.mff.cuni.cz/~bojar/czeng16-data/data-plaintext-format.{i}.tar', - # f'data-plaintext-format.{i}.tar'), czeng_instruction) - # for i in range(10) -] - -wmt18_cs_et_en = DLDataset( - name='wmt18_cs_et_en', - train_urls=[ - 'http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz', - 'http://data.statmt.org/wmt18/translation-task/training-parallel-ep-v8.tgz', - 'https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-cs.zipporah0-dedup-clean.tgz', - 'https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-et.zipporah0-dedup-clean.tgz', - 'http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz', - 'http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz', - ('http://data.statmt.org/wmt18/translation-task/rapid2016.tgz', 'wmt18_rapid2016.tgz'), - # (tuple( - # (f'http://ufallab.ms.mff.cuni.cz/~bojar/czeng16-data/data-plaintext-format.{i}.tar', - # f'data-plaintext-format.{i}.tar') - # for i in range(10) - # ), - # 'czeng16_data_plaintext.gz.tar'), - ], - valid_urls=[ - ('http://data.statmt.org/wmt18/translation-task/dev.tgz', 'wmt18_dev.tgz'), - ], - test_urls=[ - ('http://data.statmt.org/wmt18/translation-task/test.tgz', 'wmt18_test.tgz'), - ], - train_files_patterns=[ - # ('*/*europarl-v7.{src}-{tgt}.{lang}', ['cs-en']), - ('*/*europarl-v8.{src}-{tgt}.{lang}', ['et-en']), - # ('*paracrawl-release1.{tgt}-{src}.zipporah0-dedup-clean.{lang}', ['cs-en', 'et-en']), - ('*paracrawl-release1.{tgt}-{src}.zipporah0-dedup-clean.{lang}', ['et-en']), - # ('*commoncrawl.{src}-{tgt}.{lang}', ['cs-en']), - # ('*/news-commentary-v13.{src}-{tgt}.{lang}', ['cs-en']), - # ('data.plaintext-format/*train.{lang}', ['cs-en']), - ('rapid2016.{tgt}-{src}.{lang}', ['et-en']), - ] , - valid_files_patterns=[ - ('dev/newsdev2018*{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['et-en']), - # ('dev/newstest2017*{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['cs-en']) - ], - test_files_patterns=[ - ('test/newstest2018-{src}{tgt}-{src:src}{tgt:ref}.{lang}', - # ['cs-en', 'et-en']), - ['et-en']), - ] -) - -ru_en_yandex_instruction = 'Yandex Corpus download instruction at: https://translate.yandex.ru/corpus?lang=en' -wmt19_ru_gu_kk_lt_manual_downloads = [ - (('https://translate.yandex.ru/corpus?lang=en', 'wmt19_1mcorpus.zip'), ru_en_yandex_instruction) -] -wmt19_ru_gu_kk_lt = DLDataset( - name='wmt19_ru_gu_kk_lt', - train_urls=[ - 'http://www.statmt.org/europarl/v9/training/europarl-v9.lt-en.tsv.gz', - 'https://s3.amazonaws.com/web-language-models/paracrawl/release3/en-lt.bicleaner07.tmx.gz', - 'https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz', - 'http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz', - 'http://data.statmt.org/news-commentary/v14/training/news-commentary-v14-wmt19.en-kk.tsv.gz', - 'http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.en-ru.tsv.gz', - 'http://data.statmt.org/wikititles/v1/wikititles-v1.kk-en.tsv.gz', - 'http://data.statmt.org/wikititles/v1/wikititles-v1.ru-en.tsv.gz', - 'http://data.statmt.org/wikititles/v1/wikititles-v1.kk-en.tsv.gz', - 'http://data.statmt.org/wikititles/v1/wikititles-v1.lt-en.tsv.gz', - 'http://data.statmt.org/wikititles/v1/wikititles-v1.gu-en.tsv.gz', - (('https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-ru.tar.gz.00', - 'https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-ru.tar.gz.01', - 'https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-ru.tar.gz.02',), - 'wmt19_UNv1.0.en-ru.tar.gz'), - 'https://tilde-model.s3-eu-west-1.amazonaws.com/rapid2016.en-lt.tmx.zip', - ('https://translate.yandex.ru/corpus?lang=en', 'wmt19_1mcorpus.zip'), - ], - valid_urls=[ - ('http://data.statmt.org/wmt19/translation-task/dev.tgz', 'wmt19_dev.tgz'), - ], - test_urls=[ - ('http://data.statmt.org/wmt19/translation-task/test.tgz', 'wmt19_test.tgz'), - ], - train_files_patterns=[ - ('*europarl-v9.{src}-{tgt}.tsv.{lang}', ['lt-en']), - #paracrawl - ('*paracrawl-release1.{tgt}-{src}.zipporah0-dedup-clean.{lang}', ['ru-en']), - ('bitext.{lang}', ['lt-en',]), - ('*commoncrawl.{src}-{tgt}.{lang}', ['ru-en',]), - ('*news-commentary-v14-wmt19.{tgt}-{src}.tsv.{lang}', ['kk-en', ]), - ('*news-commentary-v14.{tgt}-{src}.tsv.{lang}', ['ru-en']), - #yandex - ('corpus.{tgt}_{src}.1m.{lang}', ['ru-en']), - ('wikititles_v1_wikititles-v1.{src}-{tgt}.tsv.{lang}', ['ru-en', 'kk-en', 'lt-en', 'gu-en']), - ('*/UNv1.0.{tgt}-{src}.{lang}', ['ru-en']), - #rapid - ('bitext.{lang}', ['lt-en']) - ], - valid_files_patterns=[ - ('dev/newsdev2019*{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['gu-en', 'kk-en', 'lt-en']), - ('dev/newstest2018*{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['ru-en']), - ], - test_files_patterns=[ - ('sgm/newstest2019-{src}{tgt}-{src:src}{tgt:ref}.{lang}', - ['ru-en', 'gu-en', 'kk-en', 'lt-en', 'en-ru', 'en-gu', 'en-kk', 'en-lt']), - ] -) - - -######### - -if __name__ == "__main__": - # speed up the downloads with multiple processing - dl_folder = f'{to_data_path}/downloads' - extract_folder = f'{to_data_path}/extracted' - - urls = [ - url - for dataset in [wmt13_es_en, wmt14_de_fr_en, wmt16_ro_en, wmt18_cs_et_en, wmt19_ru_gu_kk_lt] - for urls in [dataset.train_urls, dataset.valid_urls, dataset.test_urls] - for url in urls - ] - urls = set(urls) - download_multi(dl_folder, extract_folder, urls, num_processes=8, debug=True) - - # check manually downlaods - to_manually_download_urls = ( - wmt17_fi_lv_tr_zh_en_manual_downloads + wmt18_cs_et_en_manual_downloads + wmt19_ru_gu_kk_lt_manual_downloads - ) - to_be_manually_dowloaded = check_need_manual_downalod(dl_folder, to_manually_download_urls) - if len(to_be_manually_dowloaded) > 0: - print('Missing files that need to be downloaded manually; stop the process now.') - exit(-1) - - completed_urls = {} - completed_extraction = {} - def work_on_wmt(directions, wmt_data): - download_and_extract( - to_data_path, - directions, - wmt_data, - to_manually_download_urls=to_manually_download_urls, - completed_urls=completed_urls, completed_extraction=completed_extraction, debug=True) - - work_on_wmt( - ['es_XX-en_XX'], - wmt13_es_en,) - work_on_wmt( - [ - 'fr_XX-en_XX', 'en_XX-fr_XX', - # 'en_XX-de_DE', 'de_DE-en_XX', - ], - wmt14_de_fr_en,) - work_on_wmt( - ['ro_RO-en_XX', 'en_XX-ro_XX'], - wmt16_ro_en,) - work_on_wmt( - [ - # 'zh_CN-en_XX', - 'lv_LV-en_XX', 'fi_FI-en_XX', 'tr_TR-en_XX', - #in case the reversed directions have different train/valid/test data - # 'en_XX-zh_CN', - 'en_XX-lv_LV', 'en_XX-fi_FI', 'en_XX-tr_TR', - ], - wmt17_fi_lv_tr_zh_en, ) - # czeng17_script_path = download_czeng17_script(download_to, extract_to, debug=False) - # cz_username = None - work_on_wmt( - [ - # 'cs_CZ-en_XX', - 'et_EE-en_XX'], - wmt18_cs_et_en,) - work_on_wmt( - [ - # 'ru_RU-en_XX', 'en_XX-ru_RU', - 'gu_IN-en_XX', 'kk_KZ-en_XX', 'lt_LT-en_XX', - #in case the reversed directions have different train/valid/test data - 'en_XX-gu_IN', 'en_XX-kk_KZ', 'en_XX-lt_LT' - ], - wmt19_ru_gu_kk_lt,) - - not_matching = check_wmt_test_bleu( - f'{to_data_path}/raw', - [ - ('wmt13', ['es_XX-en_XX']), - ('wmt14/full', ['fr_XX-en_XX',]), - ('wmt16', ['ro_RO-en_XX',]), - # ('wmt17/improved', ['zh_CN-en_XX']), - ('wmt17', [ 'lv_LV-en_XX', 'fi_FI-en_XX', 'tr_TR-en_XX']), - ('wmt18', ['cs_CZ-en_XX', 'et_EE-en_XX']), - ('wmt19', ['gu_IN-en_XX', 'kk_KZ-en_XX', 'lt_LT-en_XX']), - #'ru_RU-en_XX', - ] - ) - if len(not_matching) > 0: - print('the following datasets do not have matching test datasets:\n\t', '\n\t'.join(not_matching)) - diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/data/offset_tokens_dataset.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/data/offset_tokens_dataset.py deleted file mode 100644 index 6fabbdcdaa1a8f70d8d8c07db4cd53754503c194..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/data/offset_tokens_dataset.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from . import BaseWrapperDataset - - -class OffsetTokensDataset(BaseWrapperDataset): - def __init__(self, dataset, offset): - super().__init__(dataset) - self.offset = offset - - def __getitem__(self, idx): - return self.dataset[idx] + self.offset diff --git a/spaces/mueller-franzes/medfusion-app/tests/dataset/test_dataset_3d.py b/spaces/mueller-franzes/medfusion-app/tests/dataset/test_dataset_3d.py deleted file mode 100644 index 54166501fca10d85c99d09fe94c36532e93f5713..0000000000000000000000000000000000000000 --- a/spaces/mueller-franzes/medfusion-app/tests/dataset/test_dataset_3d.py +++ /dev/null @@ -1,25 +0,0 @@ -from medical_diffusion.data.datasets import SimpleDataset3D - -import matplotlib.pyplot as plt -from pathlib import Path -from torchvision.utils import save_image -import torch - -path_out = Path().cwd()/'results'/'test' -path_out.mkdir(parents=True, exist_ok=True) - - -ds = SimpleDataset3D( - crawler_ext='nii.gz', - image_resize=None, - image_crop=None, - path_root='/mnt/hdd/datasets/breast/DUKE/dataset_lr_256_256_32', - use_znorm=False -) - -image = ds[0]['source'] # [C, D, H, W] - -image = image.swapaxes(0, 1) # [D, C, H, W] -> treat D as Batch Dimension -image = image/2+0.5 - -save_image(image, path_out/'test.png') \ No newline at end of file diff --git a/spaces/muellerzr/accelerate-presentation/README.md b/spaces/muellerzr/accelerate-presentation/README.md deleted file mode 100644 index 76922404a71bb554f7a04cdfe64cccb441234bb9..0000000000000000000000000000000000000000 --- a/spaces/muellerzr/accelerate-presentation/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Accelerate Presentation -emoji: 🔥 -colorFrom: green -colorTo: blue -sdk: static -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/mun-ahmd/HairType/app.py b/spaces/mun-ahmd/HairType/app.py deleted file mode 100644 index 26c4e47342c64b799dc82703148fc86a759e3c25..0000000000000000000000000000000000000000 --- a/spaces/mun-ahmd/HairType/app.py +++ /dev/null @@ -1,28 +0,0 @@ -import gradio as gr -from fastcore.all import * -from fastai.vision.all import * - -learn_inf = load_learner("export.pkl") - -def Perform(img): - global learn_inf - pred, pred_idx, probs = learn_inf.predict(PILImage.create(img)) - return f"Hair Type: {pred}\nProbability: {(100*probs[pred_idx]):.2f}%" - -uploadTab = gr.Interface( - fn=Perform, - inputs=[gr.Image(shape=(520,520), source="upload")], - outputs=["text"], - title="Hair Type Detector" -) -clickTab = gr.Interface( - fn=Perform, - inputs=[gr.Image(shape=(520,520), source="webcam")], - outputs=["text"], - title="Hair Type Detector" -) -tface = gr.TabbedInterface( - [uploadTab, clickTab], - tab_names=["upload","click"] -) -tface.launch() diff --git a/spaces/myrad01/Inpaint-Anything/third_party/lama/saicinpainting/training/data/aug.py b/spaces/myrad01/Inpaint-Anything/third_party/lama/saicinpainting/training/data/aug.py deleted file mode 100644 index b1246250924e79511b58cd3d7ab79de8012f8949..0000000000000000000000000000000000000000 --- a/spaces/myrad01/Inpaint-Anything/third_party/lama/saicinpainting/training/data/aug.py +++ /dev/null @@ -1,84 +0,0 @@ -from albumentations import DualIAATransform, to_tuple -import imgaug.augmenters as iaa - -class IAAAffine2(DualIAATransform): - """Place a regular grid of points on the input and randomly move the neighbourhood of these point around - via affine transformations. - - Note: This class introduce interpolation artifacts to mask if it has values other than {0;1} - - Args: - p (float): probability of applying the transform. Default: 0.5. - - Targets: - image, mask - """ - - def __init__( - self, - scale=(0.7, 1.3), - translate_percent=None, - translate_px=None, - rotate=0.0, - shear=(-0.1, 0.1), - order=1, - cval=0, - mode="reflect", - always_apply=False, - p=0.5, - ): - super(IAAAffine2, self).__init__(always_apply, p) - self.scale = dict(x=scale, y=scale) - self.translate_percent = to_tuple(translate_percent, 0) - self.translate_px = to_tuple(translate_px, 0) - self.rotate = to_tuple(rotate) - self.shear = dict(x=shear, y=shear) - self.order = order - self.cval = cval - self.mode = mode - - @property - def processor(self): - return iaa.Affine( - self.scale, - self.translate_percent, - self.translate_px, - self.rotate, - self.shear, - self.order, - self.cval, - self.mode, - ) - - def get_transform_init_args_names(self): - return ("scale", "translate_percent", "translate_px", "rotate", "shear", "order", "cval", "mode") - - -class IAAPerspective2(DualIAATransform): - """Perform a random four point perspective transform of the input. - - Note: This class introduce interpolation artifacts to mask if it has values other than {0;1} - - Args: - scale ((float, float): standard deviation of the normal distributions. These are used to sample - the random distances of the subimage's corners from the full image's corners. Default: (0.05, 0.1). - p (float): probability of applying the transform. Default: 0.5. - - Targets: - image, mask - """ - - def __init__(self, scale=(0.05, 0.1), keep_size=True, always_apply=False, p=0.5, - order=1, cval=0, mode="replicate"): - super(IAAPerspective2, self).__init__(always_apply, p) - self.scale = to_tuple(scale, 1.0) - self.keep_size = keep_size - self.cval = cval - self.mode = mode - - @property - def processor(self): - return iaa.PerspectiveTransform(self.scale, keep_size=self.keep_size, mode=self.mode, cval=self.cval) - - def get_transform_init_args_names(self): - return ("scale", "keep_size") diff --git a/spaces/myscale/ChatData/helper.py b/spaces/myscale/ChatData/helper.py deleted file mode 100644 index 5144a5ccd3575faa185c19f8158973a954c2ed18..0000000000000000000000000000000000000000 --- a/spaces/myscale/ChatData/helper.py +++ /dev/null @@ -1,506 +0,0 @@ - -import json -import time -import hashlib -from typing import Dict, Any -import re -import pandas as pd -from os import environ -import streamlit as st -import datetime - -from sqlalchemy import Column, Text, create_engine, MetaData -from langchain.agents import AgentExecutor -try: - from sqlalchemy.orm import declarative_base -except ImportError: - from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.orm import sessionmaker -from clickhouse_sqlalchemy import ( - Table, make_session, get_declarative_base, types, engines -) -from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain -from langchain_experimental.retrievers.vector_sql_database import VectorSQLDatabaseChainRetriever -from langchain.utilities.sql_database import SQLDatabase -from langchain.chains import LLMChain -from sqlalchemy import create_engine, MetaData -from langchain.prompts import PromptTemplate, ChatPromptTemplate, \ - SystemMessagePromptTemplate, HumanMessagePromptTemplate -from langchain.prompts.prompt import PromptTemplate -from langchain.chat_models import ChatOpenAI -from langchain.schema import BaseRetriever -from langchain import OpenAI -from langchain.chains.query_constructor.base import AttributeInfo, VirtualColumnName -from langchain.retrievers.self_query.base import SelfQueryRetriever -from langchain.retrievers.self_query.myscale import MyScaleTranslator -from langchain.embeddings import HuggingFaceInstructEmbeddings, SentenceTransformerEmbeddings -from langchain.vectorstores import MyScaleSettings -from chains.arxiv_chains import MyScaleWithoutMetadataJson -from langchain.schema import Document -from langchain.prompts.prompt import PromptTemplate -from langchain.prompts.chat import MessagesPlaceholder -from langchain.agents.openai_functions_agent.agent_token_buffer_memory import AgentTokenBufferMemory -from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent -from langchain.schema import BaseMessage, HumanMessage, AIMessage, FunctionMessage, SystemMessage -from langchain.memory import SQLChatMessageHistory -from langchain.memory.chat_message_histories.sql import \ - BaseMessageConverter, DefaultMessageConverter -from langchain.schema.messages import BaseMessage, _message_to_dict, messages_from_dict -from langchain.agents.agent_toolkits import create_retriever_tool -from prompts.arxiv_prompt import combine_prompt_template, _myscale_prompt -from chains.arxiv_chains import ArXivQAwithSourcesChain, ArXivStuffDocumentChain -from chains.arxiv_chains import VectorSQLRetrieveCustomOutputParser -environ['TOKENIZERS_PARALLELISM'] = 'true' -environ['OPENAI_API_BASE'] = st.secrets['OPENAI_API_BASE'] - -# query_model_name = "gpt-3.5-turbo-instruct" -query_model_name = "text-davinci-003" -chat_model_name = "gpt-3.5-turbo-16k" - - -OPENAI_API_KEY = st.secrets['OPENAI_API_KEY'] -OPENAI_API_BASE = st.secrets['OPENAI_API_BASE'] -MYSCALE_USER = st.secrets['MYSCALE_USER'] -MYSCALE_PASSWORD = st.secrets['MYSCALE_PASSWORD'] -MYSCALE_HOST = st.secrets['MYSCALE_HOST'] -MYSCALE_PORT = st.secrets['MYSCALE_PORT'] - -COMBINE_PROMPT = ChatPromptTemplate.from_strings( - string_messages=[(SystemMessagePromptTemplate, combine_prompt_template), - (HumanMessagePromptTemplate, '{question}')]) - -def hint_arxiv(): - st.info("We provides you metadata columns below for query. Please choose a natural expression to describe filters on those columns.\n\n" - "For example: \n\n" - "*If you want to search papers with complex filters*:\n\n" - "- What is a Bayesian network? Please use articles published later than Feb 2018 and with more than 2 categories and whose title like `computer` and must have `cs.CV` in its category.\n\n" - "*If you want to ask questions based on papers in database*:\n\n" - "- What is PageRank?\n" - "- Did Geoffrey Hinton wrote paper about Capsule Neural Networks?\n" - "- Introduce some applications of GANs published around 2019.\n" - "- 请根据 2019 年左右的文章介绍一下 GAN 的应用都有哪些\n" - "- Veuillez présenter les applications du GAN sur la base des articles autour de 2019 ?\n" - "- Is it possible to synthesize room temperature super conductive material?") - - -def hint_sql_arxiv(): - st.info("You can retrieve papers with button `Query` or ask questions based on retrieved papers with button `Ask`.", icon='💡') - st.markdown('''```sql -CREATE TABLE default.ChatArXiv ( - `abstract` String, - `id` String, - `vector` Array(Float32), - `metadata` Object('JSON'), - `pubdate` DateTime, - `title` String, - `categories` Array(String), - `authors` Array(String), - `comment` String, - `primary_category` String, - VECTOR INDEX vec_idx vector TYPE MSTG('fp16_storage=1', 'metric_type=Cosine', 'disk_mode=3'), - CONSTRAINT vec_len CHECK length(vector) = 768) -ENGINE = ReplacingMergeTree ORDER BY id -```''') - - -def hint_wiki(): - st.info("We provides you metadata columns below for query. Please choose a natural expression to describe filters on those columns.\n\n" - "For example: \n\n" - "- Which company did Elon Musk found?\n" - "- What is Iron Gwazi?\n" - "- What is a Ring in mathematics?\n" - "- 苹果的发源地是那里?\n") - - -def hint_sql_wiki(): - st.info("You can retrieve papers with button `Query` or ask questions based on retrieved papers with button `Ask`.", icon='💡') - st.markdown('''```sql -CREATE TABLE wiki.Wikipedia ( - `id` String, - `title` String, - `text` String, - `url` String, - `wiki_id` UInt64, - `views` Float32, - `paragraph_id` UInt64, - `langs` UInt32, - `emb` Array(Float32), - VECTOR INDEX vec_idx emb TYPE MSTG('fp16_storage=1', 'metric_type=Cosine', 'disk_mode=3'), - CONSTRAINT emb_len CHECK length(emb) = 768) -ENGINE = ReplacingMergeTree ORDER BY id -```''') - - -sel_map = { - 'Wikipedia': { - "database": "wiki", - "table": "Wikipedia", - "hint": hint_wiki, - "hint_sql": hint_sql_wiki, - "doc_prompt": PromptTemplate( - input_variables=["page_content", "url", "title", "ref_id", "views"], - template="Title for Doc #{ref_id}: {title}\n\tviews: {views}\n\tcontent: {page_content}\nSOURCE: {url}"), - "metadata_cols": [ - AttributeInfo( - name="title", - description="title of the wikipedia page", - type="string", - ), - AttributeInfo( - name="text", - description="paragraph from this wiki page", - type="string", - ), - AttributeInfo( - name="views", - description="number of views", - type="float" - ), - ], - "must_have_cols": ['id', 'title', 'url', 'text', 'views'], - "vector_col": "emb", - "text_col": "text", - "metadata_col": "metadata", - "emb_model": lambda: SentenceTransformerEmbeddings( - model_name='sentence-transformers/paraphrase-multilingual-mpnet-base-v2',), - "tool_desc": ("search_among_wikipedia", "Searches among Wikipedia and returns related wiki pages"), - }, - 'ArXiv Papers': { - "database": "default", - "table": "ChatArXiv", - "hint": hint_arxiv, - "hint_sql": hint_sql_arxiv, - "doc_prompt": PromptTemplate( - input_variables=["page_content", "id", "title", "ref_id", - "authors", "pubdate", "categories"], - template="Title for Doc #{ref_id}: {title}\n\tAbstract: {page_content}\n\tAuthors: {authors}\n\tDate of Publication: {pubdate}\n\tCategories: {categories}\nSOURCE: {id}"), - "metadata_cols": [ - AttributeInfo( - name=VirtualColumnName(name="pubdate"), - description="The year the paper is published", - type="timestamp", - ), - AttributeInfo( - name="authors", - description="List of author names", - type="list[string]", - ), - AttributeInfo( - name="title", - description="Title of the paper", - type="string", - ), - AttributeInfo( - name="categories", - description="arxiv categories to this paper", - type="list[string]" - ), - AttributeInfo( - name="length(categories)", - description="length of arxiv categories to this paper", - type="int" - ), - ], - "must_have_cols": ['title', 'id', 'categories', 'abstract', 'authors', 'pubdate'], - "vector_col": "vector", - "text_col": "abstract", - "metadata_col": "metadata", - "emb_model": lambda: HuggingFaceInstructEmbeddings( - model_name='hkunlp/instructor-xl', - embed_instruction="Represent the question for retrieving supporting scientific papers: "), - "tool_desc": ("search_among_scientific_papers", "Searches among scientific papers from ArXiv and returns research papers"), - } -} - -def build_embedding_model(_sel): - """Build embedding model - """ - with st.spinner("Loading Model..."): - embeddings = sel_map[_sel]["emb_model"]() - return embeddings - - -def build_chains_retrievers(_sel: str) -> Dict[str, Any]: - """build chains and retrievers - - :param _sel: selected knowledge base - :type _sel: str - :return: _description_ - :rtype: Dict[str, Any] - """ - metadata_field_info = sel_map[_sel]["metadata_cols"] - retriever = build_self_query(_sel) - chain = build_qa_chain(_sel, retriever, name="Self Query Retriever") - sql_retriever = build_vector_sql(_sel) - sql_chain = build_qa_chain(_sel, sql_retriever, name="Vector SQL") - - return { - "metadata_columns": [{'name': m.name.name if type(m.name) is VirtualColumnName else m.name, 'desc': m.description, 'type': m.type} for m in metadata_field_info], - "retriever": retriever, - "chain": chain, - "sql_retriever": sql_retriever, - "sql_chain": sql_chain - } - -def build_self_query(_sel: str) -> SelfQueryRetriever: - """Build self querying retriever - - :param _sel: selected knowledge base - :type _sel: str - :return: retriever used by chains - :rtype: SelfQueryRetriever - """ - with st.spinner(f"Connecting DB for {_sel}..."): - myscale_connection = { - "host": MYSCALE_HOST, - "port": MYSCALE_PORT, - "username": MYSCALE_USER, - "password": MYSCALE_PASSWORD, - } - config = MyScaleSettings(**myscale_connection, - database=sel_map[_sel]["database"], - table=sel_map[_sel]["table"], - column_map={ - "id": "id", - "text": sel_map[_sel]["text_col"], - "vector": sel_map[_sel]["vector_col"], - "metadata": sel_map[_sel]["metadata_col"] - }) - doc_search = MyScaleWithoutMetadataJson(st.session_state[f"emb_model_{_sel}"], config, - must_have_cols=sel_map[_sel]['must_have_cols']) - - with st.spinner(f"Building Self Query Retriever for {_sel}..."): - metadata_field_info = sel_map[_sel]["metadata_cols"] - retriever = SelfQueryRetriever.from_llm( - OpenAI(model_name=query_model_name, openai_api_key=OPENAI_API_KEY, temperature=0), - doc_search, "Scientific papers indexes with abstracts. All in English.", metadata_field_info, - use_original_query=False, structured_query_translator=MyScaleTranslator()) - return retriever - -def build_vector_sql(_sel: str)->VectorSQLDatabaseChainRetriever: - """Build Vector SQL Database Retriever - - :param _sel: selected knowledge base - :type _sel: str - :return: retriever used by chains - :rtype: VectorSQLDatabaseChainRetriever - """ - with st.spinner(f'Building Vector SQL Database Retriever for {_sel}...'): - engine = create_engine( - f'clickhouse://{MYSCALE_USER}:{MYSCALE_PASSWORD}@{MYSCALE_HOST}:{MYSCALE_PORT}/{sel_map[_sel]["database"]}?protocol=https') - metadata = MetaData(bind=engine) - PROMPT = PromptTemplate( - input_variables=["input", "table_info", "top_k"], - template=_myscale_prompt, - ) - output_parser = VectorSQLRetrieveCustomOutputParser.from_embeddings( - model=st.session_state[f'emb_model_{_sel}'], must_have_columns=sel_map[_sel]["must_have_cols"]) - sql_query_chain = VectorSQLDatabaseChain.from_llm( - llm=OpenAI(model_name=query_model_name, openai_api_key=OPENAI_API_KEY, temperature=0), - prompt=PROMPT, - top_k=10, - return_direct=True, - db=SQLDatabase(engine, None, metadata, max_string_length=1024), - sql_cmd_parser=output_parser, - native_format=True - ) - sql_retriever = VectorSQLDatabaseChainRetriever( - sql_db_chain=sql_query_chain, page_content_key=sel_map[_sel]["text_col"]) - return sql_retriever - -def build_qa_chain(_sel: str, retriever: BaseRetriever, name: str="Self-query") -> ArXivQAwithSourcesChain: - """_summary_ - - :param _sel: selected knowledge base - :type _sel: str - :param retriever: retriever used by chains - :type retriever: BaseRetriever - :param name: display name, defaults to "Self-query" - :type name: str, optional - :return: QA chain interacts with user - :rtype: ArXivQAwithSourcesChain - """ - with st.spinner(f'Building QA Chain with {name} for {_sel}...'): - chain = ArXivQAwithSourcesChain( - retriever=retriever, - combine_documents_chain=ArXivStuffDocumentChain( - llm_chain=LLMChain( - prompt=COMBINE_PROMPT, - llm=ChatOpenAI(model_name=chat_model_name, - openai_api_key=OPENAI_API_KEY, temperature=0.6), - ), - document_prompt=sel_map[_sel]["doc_prompt"], - document_variable_name="summaries", - - ), - return_source_documents=True, - max_tokens_limit=12000, - ) - return chain - -@st.cache_resource -def build_all() -> Dict[str, Any]: - """build all resources - - :return: sel_map_obj - :rtype: Dict[str, Any] - """ - sel_map_obj = {} - for k in sel_map: - st.session_state[f'emb_model_{k}'] = build_embedding_model(k) - sel_map_obj[k] = build_chains_retrievers(k) - return sel_map_obj - -def create_message_model(table_name, DynamicBase): # type: ignore - """ - Create a message model for a given table name. - - Args: - table_name: The name of the table to use. - DynamicBase: The base class to use for the model. - - Returns: - The model class. - - """ - - # Model decleared inside a function to have a dynamic table name - class Message(DynamicBase): - __tablename__ = table_name - id = Column(types.Float64) - session_id = Column(Text) - msg_id = Column(Text, primary_key=True) - type = Column(Text) - addtionals = Column(Text) - message = Column(Text) - __table_args__ = ( - engines.ReplacingMergeTree( - partition_by='session_id', - order_by=('id', 'msg_id')), - {'comment': 'Store Chat History'} - ) - - return Message - -class DefaultClickhouseMessageConverter(DefaultMessageConverter): - """The default message converter for SQLChatMessageHistory.""" - - def __init__(self, table_name: str): - self.model_class = create_message_model(table_name, declarative_base()) - - def to_sql_model(self, message: BaseMessage, session_id: str) -> Any: - tstamp = time.time() - msg_id = hashlib.sha256(f"{session_id}_{message}_{tstamp}".encode('utf-8')).hexdigest() - return self.model_class( - id=tstamp, - msg_id=msg_id, - session_id=session_id, - type=message.type, - addtionals=json.dumps(message.additional_kwargs), - message=json.dumps({ - "type": message.type, - "additional_kwargs": {"timestamp": tstamp}, - "data": message.dict()}) - ) - def from_sql_model(self, sql_message: Any) -> BaseMessage: - msg_dump = json.loads(sql_message.message) - msg = messages_from_dict([msg_dump])[0] - msg.additional_kwargs = msg_dump["additional_kwargs"] - return msg - - def get_sql_model_class(self) -> Any: - return self.model_class - - -def create_agent_executor(name, session_id, llm, tools, **kwargs): - name = name.replace(" ", "_") - conn_str = f'clickhouse://{MYSCALE_USER}:{MYSCALE_PASSWORD}@{MYSCALE_HOST}:{MYSCALE_PORT}' - chat_memory = SQLChatMessageHistory( - session_id, - connection_string=f'{conn_str}/chat?protocol=https', - custom_message_converter=DefaultClickhouseMessageConverter(name)) - memory = AgentTokenBufferMemory(llm=llm, chat_memory=chat_memory) - - _system_message = SystemMessage( - content=( - "Do your best to answer the questions. " - "Feel free to use any tools available to look up " - "relevant information. Please keep all details in query " - "when calling search functions." - ) - ) - prompt = OpenAIFunctionsAgent.create_prompt( - system_message=_system_message, - extra_prompt_messages=[MessagesPlaceholder(variable_name="history")], - ) - agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt) - return AgentExecutor( - agent=agent, - tools=tools, - memory=memory, - verbose=True, - return_intermediate_steps=True, - **kwargs - ) - -@st.cache_resource -def build_tools(): - """build all resources - - :return: sel_map_obj - :rtype: Dict[str, Any] - """ - sel_map_obj = {} - for k in sel_map: - if f'emb_model_{k}' not in st.session_state: - st.session_state[f'emb_model_{k}'] = build_embedding_model(k) - if "sel_map_obj" not in st.session_state: - st.session_state["sel_map_obj"] = {} - if k not in st.session_state.sel_map_obj: - st.session_state["sel_map_obj"][k] = {} - if "langchain_retriever" not in st.session_state.sel_map_obj[k] or "vecsql_retriever" not in st.session_state.sel_map_obj[k]: - st.session_state.sel_map_obj[k].update(build_chains_retrievers(k)) - sel_map_obj[k] = { - "langchain_retriever_tool": create_retriever_tool(st.session_state.sel_map_obj[k]["retriever"], *sel_map[k]["tool_desc"],), - "vecsql_retriever_tool": create_retriever_tool(st.session_state.sel_map_obj[k]["sql_retriever"], *sel_map[k]["tool_desc"],), - } - return sel_map_obj - -@st.cache_resource(max_entries=1) -def build_agents(username): - chat_llm = ChatOpenAI(model_name=chat_model_name, temperature=0.6, openai_api_base=OPENAI_API_BASE, openai_api_key=OPENAI_API_KEY) - agents = {} - cnt = 0 - p = st.progress(0.0, "Building agents with different knowledge base...") - for k in [*sel_map.keys(), 'ArXiv + Wikipedia']: - for m, n in [("langchain_retriever_tool", "Self-querying retriever"), ("vecsql_retriever_tool", "Vector SQL")]: - if k == 'ArXiv + Wikipedia': - tools = [st.session_state.tools[k][m] for k in sel_map.keys()] - elif k == 'Null': - tools = [] - else: - tools = [st.session_state.tools[k][m]] - if k not in agents: - agents[k] = {} - agents[k][n] = create_agent_executor( - "chat_memory", - username, - chat_llm, - tools=tools, - ) - cnt += 1/6 - p.progress(cnt, f"Building with Knowledge Base {k} via Retriever {n}...") - p.empty() - return agents - - -def display(dataframe, columns_=None, index=None): - if len(dataframe) > 0: - if index: - dataframe.set_index(index) - if columns_: - st.dataframe(dataframe[columns_]) - else: - st.dataframe(dataframe) - else: - st.write("Sorry 😵 we didn't find any articles related to your query.\n\nMaybe the LLM is too naughty that does not follow our instruction... \n\nPlease try again and use verbs that may match the datatype.", unsafe_allow_html=True) \ No newline at end of file diff --git a/spaces/nandodeomkar/Project/README.md b/spaces/nandodeomkar/Project/README.md deleted file mode 100644 index 8d5b9861c62630ffc4848337f4d743c391e33c25..0000000000000000000000000000000000000000 --- a/spaces/nandodeomkar/Project/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Project -emoji: 📉 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.28.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ncduy/emotion-classifier/app.py b/spaces/ncduy/emotion-classifier/app.py deleted file mode 100644 index 52900aac1f223ffeb80814570b64259e57ed6b6c..0000000000000000000000000000000000000000 --- a/spaces/ncduy/emotion-classifier/app.py +++ /dev/null @@ -1,39 +0,0 @@ -import gradio as gr -from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline - -class Emotionclass: - def __init__(self, model: str): - self.model = AutoModelForSequenceClassification.from_pretrained(model) - self.tokenizer = AutoTokenizer.from_pretrained(model) - self.pipeline = pipeline( - "text-classification", - model=self.model, - tokenizer=self.tokenizer, - return_all_scores=True, - ) - - def predict(self, input: str): - output = self.pipeline(input)[0] - result = { - "sad": output[0]["score"], - "joy": output[1]["score"], - "love": output[2]["score"], - "anger": output[3]["score"], - "fear": output[4]["score"], - "surprise": output[5]["score"], - } - return result - -if __name__ == "__main__": - model = Emotionclass("ncduy/bert-base-cased-finetuned-emotion") - iface = gr.Interface( - fn=model.predict, - inputs=gr.inputs.Textbox( - lines=3, - placeholder="type here ...", - label="Input", - ), - outputs="label", - title="Emotion Classifier", - ) - iface.launch() \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Facebookpasswordrevealeronline.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Facebookpasswordrevealeronline.md deleted file mode 100644 index ddd69afb3df60b52cfb6156156c18fc0602ab24c..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Facebookpasswordrevealeronline.md +++ /dev/null @@ -1,103 +0,0 @@ - -

Facebook Password Revealer Online: Is It Possible and Safe?

-

Have you ever wondered if there is a way to reveal someone's Facebook password online? Maybe you forgot your own password and need to access your account urgently. Maybe you want to check on your partner, friend, or colleague's Facebook activity. Maybe you just want to have some fun or satisfy your curiosity.

-

Whatever your reason, you may have come across some tools or websites that claim to be able to reveal anyone's Facebook password by simply entering their email address or phone number. But are these tools really effective and reliable? And more importantly, are they safe and legal to use?

-

facebookpasswordrevealeronline


DOWNLOADhttps://urlcod.com/2uIa7o



-

In this article, we will explore what Facebook password revealer online is, why someone would want to use it, what are the risks and challenges of using such a tool, and what are some alternatives and best practices for Facebook security.

-

What is Facebook Password Revealer Online?

-

Facebook password revealer online is a term that refers to any tool or website that claims to be able to reveal someone's Facebook password by entering their email address or phone number. These tools or websites usually promise to provide the password in a matter of minutes or seconds, without requiring any download or installation.

-

Some examples of such tools or websites are:

-

-
    -
  • Forgot password | Can't log in | Facebook: This is the official Facebook page for recovering your account if you forgot your password. However, some people may mistake it for a tool that can reveal anyone's password.
  • -
  • Pass Revelator | Find passwords from Facebook, Instagram, GMail...: This is a website that claims to be able to retrieve all your lost passwords from various social media platforms, including Facebook. It claims to use advanced algorithms and techniques to hack into the accounts.
  • -
  • Facebook Password Sniper: This is another website that claims to be able to hack any Facebook account by using a method called Rainbow Tables. It asks you to enter the profile URL of the target account and complete a verification process.
  • -
-

How do these tools or websites work and what do they require from the user? The answer may vary depending on the specific tool or website, but generally they follow these steps:

-
    -
  1. The user enters the email address or phone number of the target account.
  2. -
  3. The tool or website attempts to connect to the Facebook server and find the - The tool or website attempts to connect to the Facebook server and find the password associated with the email address or phone number. - The tool or website displays the password to the user or asks them to complete a survey, a captcha, or a payment to access the password.
-

While these tools or websites may seem tempting and convenient, they are not as simple and harmless as they appear. In fact, they may pose serious risks and challenges for both the user and the target account.

-

Why Would Someone Want to Use Facebook Password Revealer Online?

-

There are many possible scenarios and motivations for someone to use Facebook password revealer online. Some of them are:

-

To access their own account if they forgot their password

-

One of the most common reasons for someone to use Facebook password revealer online is to access their own account if they forgot their password. This may happen if they have not logged in for a long time, if they have changed their password recently, or if they have lost their phone or email access.

-

In this case, the user may think that using a tool or a website that can reveal their password is easier and faster than using Facebook's official features and tools for password recovery. However, this is not true, as we will see later.

-

To access someone else's account for personal or professional reasons

-

Another reason for someone to use Facebook password revealer online is to access someone else's account for personal or professional reasons. This may happen if they want to check on their partner, friend, or colleague's Facebook activity, messages, photos, or contacts. They may want to do this out of curiosity, jealousy, suspicion, or distrust.

-

In this case, the user may think that using a tool or a website that can reveal someone else's password is harmless and justified. However, this is not true, as we will see later.

-

To prank or spy on someone for fun or curiosity

-

A third reason for someone to use Facebook password revealer online is to prank or spy on someone for fun or curiosity. This may happen if they want to play a joke on their friend, family member, or co-worker by posting something funny, embarrassing, or inappropriate on their Facebook account. They may also want to see what someone else is doing on Facebook without their knowledge or consent.

-

In this case, the user may think that using a tool or a website that can reveal someone else's password is amusing and harmless. However, this is not true, as we will see later.

-

What are the Risks and Challenges of Using Facebook Password Revealer Online?

-

Using Facebook password revealer online may seem like an easy and convenient way to access any Facebook account. However, it comes with many risks and challenges that may outweigh any potential benefits. Some of these are:

-

It may not work or be accurate

-

One of the main risks of using Facebook password revealer online is that it may not work or be accurate. There is no guarantee that these tools or websites can actually reveal the correct password of any Facebook account. They may fail to connect to the Facebook server, they may provide a wrong or outdated password, or they may display a fake or random password.

-

In some cases, these tools or websites may even be scams that are designed to trick the user into providing their own personal information, such as their email address, phone number, credit card details, or even their own Facebook password. They may then use this information to hack into the user's account, steal their identity, money, or data, or sell it to third parties.

-

It may be illegal or unethical

-

Another risk of using Facebook password revealer online is that it may be illegal or unethical. Depending on the laws and regulations of your country or region, accessing someone else's Facebook account without their permission may constitute a crime of hacking, cyberstalking, identity theft, fraud, invasion of privacy, harassment, defamation, or other offenses. You may face legal consequences such as fines, lawsuits, arrests, imprisonment, or other penalties.

-

Even if you are not breaking any laws by using Facebook password revealer online, you are still violating the moral and social norms of respecting other people's privacy and personal boundaries. You are also betraying the trust and confidence of your partner, friend, or colleague, who may feel hurt, angry, or betrayed by your actions. You are also risking damaging your reputation, relationships, and credibility.

-

It may compromise your own security and privacy

-

A third risk of using Facebook password revealer online is that it may compromise your own security and privacy. By using these tools or websites, you are exposing yourself to various threats and vulnerabilities that may harm your device, data, or identity. Some of these are:

-
    -
  • Malware: These tools or websites may contain malicious software that can infect your device and cause it to malfunction, crash, or leak your information. They may also hijack your browser, redirect you to unwanted or harmful websites, or display annoying or inappropriate ads.
  • -
  • Phishing: These tools or websites may try to trick you into providing your personal information, such as your email address, phone number, credit card details, or even your own Facebook password. They may then use this information to hack into your account, steal your identity, money, or data, or sell it to third parties.
  • -
  • Spyware: These tools or websites may monitor your online activity, such as the websites you visit, the messages you send, the photos you view, or the passwords you enter. They may then use this information to spy on you, blackmail you, extort you, or expose you.
  • -
-

By compromising your own security and privacy, you are not only putting yourself at risk, but also anyone else who is connected to you, such as your family, friends, co-workers, or customers.

-

It may violate Facebook's terms and policies

-

A fourth risk of using Facebook password revealer online is that it may violate Facebook's terms and policies. Facebook is a social media platform that has its own rules and regulations that govern how its users can access and use its services. By using these tools or websites, you are breaking these rules and regulations and risking losing access to your account or facing other consequences.

-

Some of the terms and policies that you may violate by using Facebook password revealer online are:

-
    -
  • Facebook Terms of Service: This is the agreement that you accept when you create a Facebook account. It states that you are responsible for keeping your password secure and confidential and that you will not share it with anyone else. It also states that you will not access or attempt to access an account that does not belong to you.
  • -
  • Facebook Data Policy: This is the policy that explains how Facebook collects, uses, and shares your data. It states that you have control over who can see your information and how you can manage your privacy settings. It also states that Facebook will protect your data from unauthorized access or misuse.
  • -
  • Facebook Community Standards: This is the policy that outlines what kind of content and behavior is acceptable on Facebook. It states that you will respect other people's rights and dignity and that you will not engage in any harmful or abusive activities. It also states that Facebook will remove any content or accounts that violate these standards.
  • -
-

By violating Facebook's terms and policies, you are not only disrespecting Facebook as a platform, but also its users as a community.

-

What are Some Alternatives and Best Practices for Facebook Security?

-

As we have seen, using Facebook password revealer online is not a good idea for anyone who wants to access any Facebook account. It is ineffective, It is ineffective, risky, illegal, and unethical. It may not only harm the target account, but also your own account, device, data, identity, reputation, relationships, and credibility. It may also violate Facebook's terms and policies and cause you to lose access to your account or face other consequences. So, what are some alternatives and best practices for Facebook security? How can you access your own account if you forgot your password? How can you protect your account from unauthorized access or misuse? How can you respect other people's privacy and personal boundaries? Here are some possible solutions and tips:

Use Facebook's official features and tools for password recovery and account protection

-

The first and best alternative for Facebook security is to use Facebook's official features and tools for password recovery and account protection. These are the features and tools that Facebook provides to help you access and secure your account. They are easy, fast, reliable, and safe to use.

-

Some of these features and tools are:

-
    -
  • Forgot password | Can't log in | Facebook: This is the official Facebook page for recovering your account if you forgot your password. You can enter your email address, phone number, username, or full name and Facebook will send you a link or a code to reset your password. You can also use your trusted contacts or your identity document to verify your identity.
  • -
  • Login & Password | Facebook Help Center: This is the official Facebook page for managing your login and password. You can change or update your password, log out of your account from other devices, review your login history, and see where you're logged in.
  • -
  • Hacked Accounts | Facebook Help Center: This is the official Facebook page for dealing with hacked accounts. You can report a hacked account, secure a compromised account, recover a deleted account, or prevent hacking.
  • -
-

By using these features and tools, you can access and secure your account without relying on any third-party tools or websites that may be unreliable or harmful.

-

Use a strong and unique password and change it regularly

-

The second best practice for Facebook security is to use a strong and unique password and change it regularly. A strong and unique password is one that is hard to guess or crack by anyone else. It should be at least eight characters long, include uppercase and lowercase letters, numbers, and symbols, and avoid common words or phrases. A unique password is one that is different from any other passwords that you use for other accounts or services.

-

By using a strong and unique password, you can prevent anyone from accessing your account by guessing or cracking your password. You should also change your password regularly, at least every few months, to keep it fresh and secure.

-

Enable two-factor authentication and login alerts

-

The third best practice for Facebook security is to enable two-factor authentication and login alerts. Two-factor authentication is a feature that adds an extra layer of security to your login process. It requires you to enter a code that is sent to your phone or email address after you enter your password. This way, even if someone knows your password, they won't be able to access your account without the code.

-

Login alerts are a feature that notifies you whenever someone logs into your account from a new device or browser. You can choose to receive these alerts by email, text message, or notification. This way, you can quickly detect any suspicious or unauthorized login attempts and take action accordingly.

-

By enabling two-factor authentication and login alerts, you can enhance the security of your account and prevent anyone from accessing it without your knowledge or consent.

-

Avoid phishing and malware attacks

-

The fourth best practice for Facebook security is to avoid phishing and malware attacks. Phishing is a type of attack that tries to trick you into providing your personal information, such as your email address, phone number, credit card details, or even your own Facebook password. They may do this by sending you fake or spoofed emails, messages, or websites that look like they are from Facebook or other trusted sources. They may also ask you to click on a link, download a file, or fill out a form. Malware is a type of software that can infect your device and cause it to malfunction, crash, or leak your information. They may also hijack your browser, redirect you to unwanted or harmful websites, or display annoying or inappropriate ads. They may do this by hiding in files, attachments, or links that you download or click on. By avoiding phishing and malware attacks, you can protect your personal information and your device from being stolen or damaged.

Be wary of strangers and suspicious links or messages

-

The fifth best practice for Facebook security is to be wary of strangers and suspicious links or messages. You may receive requests, messages, or notifications from people you don't know or trust on Facebook. They may claim to be your friend, family member, co-worker, customer, or someone else who needs your help or wants to offer you something. They may also send you links or attachments that look interesting or useful. However, these may be attempts to trick you into providing your personal information, clicking on a malicious link, downloading a harmful file, or accessing a fake website. They may also be trying to infect your device with malware, spy on your activity, blackmail you, extort you, or expose you. By being wary of strangers and suspicious links or messages, you can avoid falling for these scams and protect yourself from being harmed.

-

Conclusion

-

Facebook password revealer online is a term that refers to any tool or website that claims to be able to reveal someone's Facebook password by entering their email address or phone number. However, using such a tool or website is not a good idea for anyone who wants to access any Facebook account. It is ineffective, risky, illegal, and unethical. It may not only harm the target account, but also your own account, device, data, identity, reputation, relationships, and credibility. It may also violate Facebook's terms and policies and cause you to lose access to your account or face other consequences.

-

Instead of using Facebook password revealer online, you should use Facebook's official features and tools for password recovery and account protection. You should also use a strong and unique password and change it regularly. You should enable two-factor authentication and login alerts. You should avoid phishing and malware attacks. And you should be wary of strangers and suspicious links or messages.

-

By following these alternatives and best practices for Facebook security, you can access and secure your account without relying on any third-party tools or websites that may be unreliable or harmful. You can also respect other people's privacy and personal boundaries and avoid any legal or ethical issues.

-

Do you have any questions or comments about Facebook password revealer online? Feel free to share them below.

-

FAQs

-

Q: Can I use Facebook password revealer online to access my own account if I forgot my password?

-

A: No, you should not use Facebook password revealer online to access your own account if you forgot your password. Instead, Instead, you should use Facebook's official features and tools for password recovery and account protection. You can enter your email address, phone number, username, or full name and Facebook will send you a link or a code to reset your password. You can also use your trusted contacts or your identity document to verify your identity. You can find more information on how to recover your account here.

-

Q: Can I use Facebook password revealer online to access someone else's account for personal or professional reasons?

-

A: No, you should not use Facebook password revealer online to access someone else's account for personal or professional reasons. This is not only ineffective and risky, but also illegal and unethical. You may be breaking the laws and regulations of your country or region, as well as Facebook's terms and policies. You may also be violating the privacy and personal boundaries of the other person, who may feel hurt, angry, or betrayed by your actions. You may also damage your reputation, relationships, and credibility.

-

Instead of using Facebook password revealer online to access someone else's account for personal or professional reasons, you should respect their privacy and personal boundaries and avoid any harmful or abusive activities. If you have a legitimate reason to access someone else's account, such as for work or legal purposes, you should obtain their consent and authorization first. You should also follow the ethical and professional standards of your field or industry.

-

Q: Can I use Facebook password revealer online to prank or spy on someone for fun or curiosity?

-

A: No, you should not use Facebook password revealer online to prank or spy on someone for fun or curiosity. This is not only ineffective and risky, but also illegal and unethical. You may be breaking the laws and regulations of your country or region, as well as Facebook's terms and policies. You may also be violating the privacy and personal boundaries of the other person, who may feel hurt, angry, or violated by your actions. You may also expose yourself to legal or social consequences.

-

Instead of using Facebook password revealer online to prank or spy on someone for fun or curiosity, you should find other ways to have fun or satisfy your curiosity that do not involve harming or disrespecting anyone else. You should also be aware of the potential consequences of your actions and take responsibility for them.

-

Q: How can I protect my account from Facebook password revealer online?

-

A: You can protect your account from Facebook password revealer online by following some best practices for Facebook security. These include:

-
    -
  • Using a strong and unique password and changing it regularly
  • -
  • Enabling two-factor authentication and login alerts
  • -
  • Avoiding phishing and malware attacks
  • -
  • Being wary of strangers and suspicious links or messages
  • -
-

By following these best practices, you can prevent anyone from accessing your account by using Facebook password revealer online or any other method.

-

Q: Is there any legitimate use for Facebook password revealer online?

-

A: No, there is no legitimate use for Facebook password revealer online. It is a tool or a website that claims to be able to reveal anyone's Facebook password by entering their email address or phone number. However, it is ineffective, risky, illegal, and unethical. It may not only harm the target account, but also your own account, device, data, identity, identity, reputation, relationships, and credibility. It may also violate Facebook's terms and policies and cause you to lose access to your account or face other consequences. There is no reason or justification for using Facebook password revealer online. It is a tool or a website that should be avoided and reported by anyone who cares about their own security and privacy, as well as the security and privacy of others.

b2dd77e56b
-
-
\ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Igo Primo 2.0 Wince.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Igo Primo 2.0 Wince.md deleted file mode 100644 index b66c76c4eb96fb6649e48d4811f9f8d7ec3840ed..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Igo Primo 2.0 Wince.md +++ /dev/null @@ -1,44 +0,0 @@ - -

How to Install and Use Igo Primo 2.0 Wince on Your GPS Device

-

Igo Primo 2.0 Wince is a popular navigation software for Windows CE devices. It offers features such as voice guidance, 3D maps, speed camera alerts, lane assistance, and more. If you want to use Igo Primo 2.0 Wince on your GPS device, here are the steps you need to follow:

-

Igo Primo 2.0 Wince


Download Zip 🆗 https://urlcod.com/2uIc3m



-
    -
  1. Download the Igo Primo 2.0 Wince software from a reliable source. You can find official versions and cracked versions online, but be careful of malware and viruses. You can also check the GPS Power forum for links and tips.
  2. -
  3. Extract the downloaded file to your computer. You should see a folder named Igo or Primo with subfolders such as content, license, save, etc.
  4. -
  5. Copy the Igo or Primo folder to your GPS device's memory card. Make sure you have enough space on your card and backup any important files before copying.
  6. -
  7. Insert the memory card into your GPS device and turn it on. You may need to change the navigation path in your device's settings to point to the Igo or Primo executable file.
  8. -
  9. Run the Igo or Primo software and enjoy your navigation experience. You can customize the settings, skins, languages, voices, maps, and more according to your preferences.
  10. -
-

Igo Primo 2.0 Wince is compatible with most Windows CE devices with resolutions of 800x480, 480x272, 480x234, or 320x240. It supports multiple languages and regions, including Europe, North America, South America, Asia, Africa, and Australia. You can also find special editions for truck drivers, campers, bikers, and other users.

-

If you have any questions or problems with Igo Primo 2.0 Wince, you can visit the GPS Zone RO or GPS Power forums for help and advice from other users and experts. You can also find updates, patches, addons, and mods for Igo Primo 2.0 Wince on these forums.

-

Igo Primo 2.0 Wince is a great navigation software for Windows CE devices that can make your travels easier and safer. Try it today and see for yourself!

- -

What are the advantages of Igo Primo 2.0 Wince?

-

Igo Primo 2.0 Wince has many advantages over other navigation software for Windows CE devices. Some of the advantages are:

-
    -
  • It has a user-friendly interface that is easy to navigate and customize.
  • -
  • It has a fast and accurate routing algorithm that calculates the best route for your destination.
  • -
  • It has a large and updated database of maps, points of interest, speed cameras, traffic information, and more.
  • -
  • It has a realistic 3D view of the terrain, buildings, landmarks, and junctions.
  • -
  • It has a voice guidance system that speaks the street names and directions in your language.
  • -
  • It has a lane assistance feature that shows you which lane to take at complex intersections.
  • -
  • It has a speed limit and speed camera alert feature that warns you of the legal speed and the location of speed cameras.
  • -
  • It has a truck mode feature that takes into account the dimensions and weight of your vehicle and avoids roads that are unsuitable for trucks.
  • -
  • It has a camper mode feature that helps you find campsites, parking areas, gas stations, and other facilities for campers.
  • -
  • It has a bike mode feature that optimizes the route for cyclists and avoids highways and busy roads.
  • -
-

Igo Primo 2.0 Wince is a versatile and powerful navigation software that can suit any type of user and vehicle. It can help you save time, money, and fuel by providing you with the best navigation solution.

-

- -

How to update Igo Primo 2.0 Wince?

-

Igo Primo 2.0 Wince is constantly updated with new features, bug fixes, and improvements. To update Igo Primo 2.0 Wince, you need to follow these steps:

-
    -
  1. Download the latest update file from a reliable source. You can find official updates and cracked updates online, but be careful of malware and viruses. You can also check the GPS Power forum for links and tips.
  2. -
  3. Extract the downloaded file to your computer. You should see a folder named update or similar with subfolders such as content, license, save, etc.
  4. -
  5. Copy the update folder to your GPS device's memory card. Make sure you overwrite any existing files and folders with the same name.
  6. -
  7. Insert the memory card into your GPS device and turn it on. The update process should start automatically or you may need to run the update executable file manually.
  8. -
  9. Wait for the update process to finish and restart your GPS device. You should see the new version number on the start screen of Igo Primo 2.0 Wince.
  10. -
-

Igo Primo 2.0 Wince is regularly updated with new maps, points of interest, speed cameras, traffic information, and more. To get the most out of Igo Primo 2.0 Wince, you should update it frequently and enjoy the latest features and improvements.

e93f5a0c3f
-
-
\ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Quantum Chemistry Donald A Mcquarrie Pdf Download !FULL!.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Quantum Chemistry Donald A Mcquarrie Pdf Download !FULL!.md deleted file mode 100644 index 1caa9ac12385ef0893ddbeee78d964dad787a304..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Quantum Chemistry Donald A Mcquarrie Pdf Download !FULL!.md +++ /dev/null @@ -1,15 +0,0 @@ -
-

Quantum Chemistry by Donald A. McQuarrie: A Review and Download Guide

-

Quantum chemistry is the branch of chemistry that deals with the application of quantum mechanics to the study of molecules and their properties. Quantum chemistry is a fascinating and challenging subject that requires a solid mathematical background and a good understanding of the physical principles behind the quantum phenomena.

-

Quantum Chemistry Donald A Mcquarrie Pdf Download


Download ►►►►► https://urlcod.com/2uI9PF



-

One of the most popular and respected textbooks on quantum chemistry is Quantum Chemistry by Donald A. McQuarrie, a professor emeritus at the University of California, Davis. This book, first published in 1983 and updated in 2008, covers all the essential topics of quantum chemistry, such as the Schrödinger equation, the hydrogen atom, the harmonic oscillator, angular momentum, perturbation theory, variational methods, molecular orbital theory, Hartree-Fock theory, configuration interaction, coupled-cluster theory, density functional theory, and many more.

-

The book is written in a clear and engaging style, with numerous examples, exercises, and problems to help students master the concepts and techniques of quantum chemistry. The book also includes appendices on mathematical methods, physical constants, atomic and molecular data, and solutions to selected problems.

-

If you are looking for a comprehensive and accessible introduction to quantum chemistry, you can download a PDF version of Quantum Chemistry by Donald A. McQuarrie from the following link: https://pdfroom.com/books/quantum-chemistry/bWx5amw85BJ. This link will take you to a website that provides free access to various books in PDF format. You can also preview the book before downloading it.

-

-

We hope you enjoy reading Quantum Chemistry by Donald A. McQuarrie and learn a lot from it. If you have any questions or feedback about the book or this article, please feel free to leave a comment below.

- -

One of the distinctive features of Quantum Chemistry by Donald A. McQuarrie is the use of physical models and analogies to help students visualize and understand the quantum concepts. For example, the book uses a spinning top to illustrate the quantization of angular momentum, a spring to explain the harmonic oscillator, and a particle on a ring to introduce molecular orbital theory. The book also provides historical and biographical notes on the scientists who contributed to the development of quantum chemistry, such as Planck, Einstein, Bohr, Schrödinger, Heisenberg, Pauli, Dirac, Born, and many others.

-

Another strength of Quantum Chemistry by Donald A. McQuarrie is the inclusion of numerous applications of quantum chemistry to real-world problems, such as spectroscopy, bonding, molecular structure, reactivity, and reaction mechanisms. The book shows how quantum chemistry can be used to explain and predict the behavior of atoms and molecules in various situations, such as electronic transitions, vibrational and rotational motions, magnetic resonance, molecular orbitals, hybridization, resonance structures, Huckel theory, molecular symmetry, group theory, and more.

-

The PDF version of Quantum Chemistry by Donald A. McQuarrie that you can download from the link above is a high-quality scan of the original book. It has 704 pages and 80.08 MB in size. You can read it on your computer or mobile device using any PDF reader software. You can also print it out if you prefer a hard copy. However, please note that this PDF is intended for personal use only and not for distribution or commercial purposes.

e93f5a0c3f
-
-
\ No newline at end of file diff --git a/spaces/niizam/sovits-models/inference/__init__.py b/spaces/niizam/sovits-models/inference/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/engine/launch.py b/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/engine/launch.py deleted file mode 100644 index 7052c5040e4d9e6553a1b371518cb53fb056524e..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/engine/launch.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import logging -from datetime import timedelta -import torch -import torch.distributed as dist -import torch.multiprocessing as mp - -from detectron2.utils import comm - -__all__ = ["DEFAULT_TIMEOUT", "launch"] - -DEFAULT_TIMEOUT = timedelta(minutes=30) - - -def _find_free_port(): - import socket - - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - # Binding to port 0 will cause the OS to find an available port for us - sock.bind(("", 0)) - port = sock.getsockname()[1] - sock.close() - # NOTE: there is still a chance the port could be taken by other processes. - return port - - -def launch( - main_func, - # Should be num_processes_per_machine, but kept for compatibility. - num_gpus_per_machine, - num_machines=1, - machine_rank=0, - dist_url=None, - args=(), - timeout=DEFAULT_TIMEOUT, -): - """ - Launch multi-process or distributed training. - This function must be called on all machines involved in the training. - It will spawn child processes (defined by ``num_gpus_per_machine``) on each machine. - - Args: - main_func: a function that will be called by `main_func(*args)` - num_gpus_per_machine (int): number of processes per machine. When - using GPUs, this should be the number of GPUs. - num_machines (int): the total number of machines - machine_rank (int): the rank of this machine - dist_url (str): url to connect to for distributed jobs, including protocol - e.g. "tcp://127.0.0.1:8686". - Can be set to "auto" to automatically select a free port on localhost - timeout (timedelta): timeout of the distributed workers - args (tuple): arguments passed to main_func - """ - world_size = num_machines * num_gpus_per_machine - if world_size > 1: - # https://github.com/pytorch/pytorch/pull/14391 - # TODO prctl in spawned processes - - if dist_url == "auto": - assert num_machines == 1, "dist_url=auto not supported in multi-machine jobs." - port = _find_free_port() - dist_url = f"tcp://127.0.0.1:{port}" - if num_machines > 1 and dist_url.startswith("file://"): - logger = logging.getLogger(__name__) - logger.warning( - "file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://" - ) - - mp.start_processes( - _distributed_worker, - nprocs=num_gpus_per_machine, - args=( - main_func, - world_size, - num_gpus_per_machine, - machine_rank, - dist_url, - args, - timeout, - ), - daemon=False, - ) - else: - main_func(*args) - - -def _distributed_worker( - local_rank, - main_func, - world_size, - num_gpus_per_machine, - machine_rank, - dist_url, - args, - timeout=DEFAULT_TIMEOUT, -): - has_gpu = torch.cuda.is_available() - if has_gpu: - assert num_gpus_per_machine <= torch.cuda.device_count() - global_rank = machine_rank * num_gpus_per_machine + local_rank - try: - dist.init_process_group( - backend="NCCL" if has_gpu else "GLOO", - init_method=dist_url, - world_size=world_size, - rank=global_rank, - timeout=timeout, - ) - except Exception as e: - logger = logging.getLogger(__name__) - logger.error("Process group URL: {}".format(dist_url)) - raise e - - # Setup the local process group. - comm.create_local_process_group(num_gpus_per_machine) - if has_gpu: - torch.cuda.set_device(local_rank) - - # synchronize is needed here to prevent a possible timeout after calling init_process_group - # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172 - comm.synchronize() - - main_func(*args) diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/utils/__init__.py b/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/nomic-ai/BelleGroup_school_math_0.25M/index.html b/spaces/nomic-ai/BelleGroup_school_math_0.25M/index.html deleted file mode 100644 index 7f179599987ebf3909f5e1bdd467f3d51c918b80..0000000000000000000000000000000000000000 --- a/spaces/nomic-ai/BelleGroup_school_math_0.25M/index.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - BelleGroup/school_math_0.25M - - - - -
- -
- - - \ No newline at end of file diff --git a/spaces/obsei/obsei-demo/app.py b/spaces/obsei/obsei-demo/app.py deleted file mode 100644 index 297015c3f3b23d4df5b427f2a7d17241bff2d43c..0000000000000000000000000000000000000000 --- a/spaces/obsei/obsei-demo/app.py +++ /dev/null @@ -1,96 +0,0 @@ -from utils import * - -current_path = pathlib.Path(__file__).parent.absolute().as_posix() -configuration = get_obsei_config(current_path, "config.yaml") -logo_url = "https://raw.githubusercontent.com/obsei/obsei-resources/master/logos/obsei_200x200.png" - -st.set_page_config(page_title="Obsei Demo", layout="wide", page_icon=logo_url) - -st.title("Obsei Demo").markdown( - get_icon_name("Obsei Demo", logo_url, 60, 35), unsafe_allow_html=True -) - -st.success( - """ -Please ⭐ the repo and share the feedback at https://github.com/obsei/obsei?utm_source=hfspace - """ -) -st.warning( - """ -**Note:** Demo run will require some secure information based on source or sink selected, -if you don't trust this environment please close the app. -""" -) - -( - pipeline_col, - spinner_col, - execute_col, - download_python_col, - download_yaml_col, -) = st.columns([2, 2, 1, 1, 1]) - -col_map = dict() -col_map["source"], col_map["analyzer"], col_map["sink"] = st.columns([1, 1, 1]) - -selected = {} -name_map = {"source": "Observer", "analyzer": "Analyzer", "sink": "Informer"} - -for node_name, col in col_map.items(): - item_list = [k for k in configuration[node_name].keys()] - selected[node_name] = col.selectbox(f"Select {name_map[node_name]}", item_list) - -icons = [get_icon_name(None, configuration[k][v]["_icon_"]) for k, v in selected.items()] -pipeline_col.header("Pipeline").markdown( - f"**Pipeline:** {icons[0]} ➡➡ {icons[1]} ➡➡ {icons[2]}", - unsafe_allow_html=True, -) - -generate_config = {} -log_component = {} -for node_name, node_value in selected.items(): - type_config = configuration[node_name][node_value] - if node_name == "analyzer": - type_list = [] - for config_key in type_config.keys(): - if config_key != "_icon_": - type_list.append(config_key) - selected_type = col_map[node_name].selectbox(f"{name_map[node_name]} Type", type_list) - type_config = type_config[selected_type] - - config = None - if "config" in type_config: - config = type_config["config"] - if type_config["_help_"] is not None: - with col_map[node_name].expander("Config Help Info", False): - help_area = "\n".join(type_config["_help_"]) - st.code(f"{help_area}") - - config_expander = None - if config is not None: - config_expander = col_map[node_name].expander(f"Configure {name_map[node_name]}", False) - render_config(config, config_expander) - - if node_name == "analyzer" and node_name in type_config and len(type_config[node_name]) > 1: - config_expander = config_expander or col_map[node_name].expander(f"Configure {name_map[node_name]}", False) - render_config(type_config["analyzer"], config_expander) - - generate_config[node_name] = type_config[node_name] - generate_config[f"{node_name}_config"] = config - - log_expander = col_map[node_name].expander(f"{name_map[node_name]} Logs", True) - log_component[node_name] = log_expander.empty() - log_component[node_name].write("Run \"🚀 Execute\" first") - -python_code = generate_python(generate_config) -yaml_code = generate_yaml(generate_config) - -execute_button = execute_col.button("🚀 Execute") -if execute_button: - execute_workflow(generate_config, spinner_col, log_component) - -with download_python_col: - download_button(python_code, "generated-code.py", "🐍 Download (.py)") - -with download_yaml_col: - download_button(yaml_code, "generated-config.yaml", "📖 Download (.yaml)") diff --git a/spaces/oliver2023/chatgpt-on-wechat/plugins/plugin.py b/spaces/oliver2023/chatgpt-on-wechat/plugins/plugin.py deleted file mode 100644 index 289b4f84d88ba676efdff0493ef37c0256ed0d41..0000000000000000000000000000000000000000 --- a/spaces/oliver2023/chatgpt-on-wechat/plugins/plugin.py +++ /dev/null @@ -1,6 +0,0 @@ -class Plugin: - def __init__(self): - self.handlers = {} - - def get_help_text(self, **kwargs): - return "暂无帮助信息" \ No newline at end of file diff --git "a/spaces/oskarvanderwal/MT-bias-demo/results/simple_tan\303\241r_de.html" "b/spaces/oskarvanderwal/MT-bias-demo/results/simple_tan\303\241r_de.html" deleted file mode 100644 index 835aee1a5c592b288e2ab25b8e07c76a233b5bc2..0000000000000000000000000000000000000000 --- "a/spaces/oskarvanderwal/MT-bias-demo/results/simple_tan\303\241r_de.html" +++ /dev/null @@ -1,46 +0,0 @@ -
0th instance:
- -
-
-
- -
-
- Source Saliency Heatmap -
- x: Generated tokens, y: Attributed tokens -
- - - -
▁Er▁ist▁Lehrer.</s>
▁Ő0.8060.0250.1110.1580.229
▁tanár0.5450.3730.6730.5510.516
.0.2310.101-0.0180.8140.507
</s>0.00.00.00.00.0
-
- -
-
-
- -
0th instance:
- -
-
-
- -
-
- Target Saliency Heatmap -
- x: Generated tokens, y: Attributed tokens -
- - - -
▁Er▁ist▁Lehrer.</s>
▁Er0.9220.7040.048-0.534
▁ist0.1960.041-0.037
▁Lehrer-0.0720.343
.-0.138
</s>
-
- -
-
-
- diff --git a/spaces/p-baleine/metaanalyser/examples/programming testing arxiv.md b/spaces/p-baleine/metaanalyser/examples/programming testing arxiv.md deleted file mode 100644 index 58adebe52bd0cffc5880e17d6a75d588126d288e..0000000000000000000000000000000000000000 --- a/spaces/p-baleine/metaanalyser/examples/programming testing arxiv.md +++ /dev/null @@ -1,156 +0,0 @@ -# A Systematic Review of Programming Testing Arxiv - -This systematic review explores the field of programming testing arxiv, which aims to solve the problem of software errors. It covers the historical background of the field and its future development. The review includes five papers that discuss new approaches for generating test cases, benchmark datasets for machine learning research, novel techniques for program synthesis, feasibility and effectiveness of test case generation for program repair, and combinatorial testing for deep learning systems. - -## Table of contents - -1. Introduction: This section provides an overview of the field of programming testing arxiv and its importance in solving the problem of software errors. -2. Metamorphic Testing for Generating Next Test Cases: This section discusses the concept of metamorphic testing and its potential to reveal software errors that are left undetected in successful test cases. It also explores the limitations of current software testing techniques and proposes a novel test case selection technique. - 1. Background: This subsection provides a brief history of software testing and its importance in software development. - 2. Metamorphic Testing: This subsection explains the concept of metamorphic testing and how it can be used to generate new test cases from successful ones. - 3. Limitations of Current Software Testing Techniques: This subsection explores the limitations of current software testing techniques and how metamorphic testing can augment their effectiveness. - 4. Proposed Test Case Selection Technique: This subsection discusses the proposed test case selection technique and how it can help uncover software errors in the production phase. -3. Codexglue for Machine Learning Research: This section introduces CodeXGLUE, a benchmark dataset for machine learning research in program understanding and generation. It also discusses the impact of benchmark datasets on accelerating research in programming language tasks. - 1. Background: This subsection provides a brief history of benchmark datasets and their importance in machine learning research. - 2. CodeXGLUE: This subsection introduces CodeXGLUE, a benchmark dataset for machine learning research in program understanding and generation. - 3. Impact of Benchmark Datasets on Accelerating Research: This subsection discusses the impact of benchmark datasets on accelerating research in programming language tasks. -4. Neuro-Symbolic Program Synthesis: This section proposes a novel technique, Neuro-Symbolic Program Synthesis, to overcome the limitations of neural architectures for program induction. It also explains how the proposed technique can automatically construct computer programs in a domain-specific language that are consistent with a set of input-output examples provided at test time. - 1. Background: This subsection provides a brief history of neural architectures for program induction and their limitations. - 2. Neuro-Symbolic Program Synthesis: This subsection explains the concept of Neuro-Symbolic Program Synthesis and how it can overcome the limitations of neural architectures for program induction. - 3. Automatically Constructing Computer Programs: This subsection explains how the proposed technique can automatically construct computer programs in a domain-specific language that are consistent with a set of input-output examples provided at test time. -5. Test Case Generation for Program Repair: This section investigates the feasibility and effectiveness of test case generation in alleviating the overfitting issue in test suite based program repair techniques. It also proposes two approaches for using test case generation to improve test suite based repair. - 1. Background: This subsection provides a brief history of program repair techniques and their limitations. - 2. Feasibility and Effectiveness of Test Case Generation: This subsection investigates the feasibility and effectiveness of test case generation in alleviating the overfitting issue in test suite based program repair techniques. - 3. Proposed Approaches for Using Test Case Generation: This subsection proposes two approaches for using test case generation to improve test suite based repair. -6. Combinatorial Testing for Deep Learning Systems: This section explores the challenges of testing deep learning systems and proposes combinatorial testing as a promising avenue for testing such systems. It also adapts the concept of combinatorial testing and proposes a set of coverage criteria for deep learning systems. - 1. Background: This subsection provides a brief history of deep learning systems and their challenges in testing. - 2. Combinatorial Testing: This subsection explains the concept of combinatorial testing and how it can be used to reduce the testing space while obtaining relatively high defect detection abilities. - 3. Coverage Criteria for Deep Learning Systems: This subsection proposes a set of coverage criteria for deep learning systems. -7. Conclusion: This section summarizes the main findings of the systematic review and discusses future research directions in programming testing arxiv. - -## Introduction - -Software errors can cause significant damage to the production system and lead to financial losses. Therefore, software testing is an essential part of software development to ensure the quality of the software. However, the limitations of current software testing techniques have been observed [^1]. Firstly, successful test cases that do not reveal software errors are usually not further investigated, although they may contain useful information for revealing software errors. Secondly, errors may still exist in the software even after extensive testing in the development phase, and techniques for uncovering software errors in the production phase are seldom addressed in the literature. Thirdly, the availability of test oracles is pragmatically unattainable in most situations, but it is generally assumed in conventional software testing techniques. - -To address these limitations, the field of programming testing arxiv has emerged, which aims to develop new techniques for generating test cases, benchmark datasets for machine learning research, novel techniques for program synthesis, and testing deep learning systems [^1][^2][^3][^4][^5]. This systematic review explores the field of programming testing arxiv and its importance in solving the problem of software errors. It covers the historical background of the field and its future development. The review includes five papers that discuss new approaches for generating test cases, benchmark datasets for machine learning research, novel techniques for program synthesis, feasibility and effectiveness of test case generation for program repair, and combinatorial testing for deep learning systems. - -## Metamorphic Testing for Generating Next Test Cases - -Metamorphic testing is a new approach for generating next test cases that aims to reveal software errors that are left undetected in successful test cases [^1]. The current artifacts of software testing have limitations, including the assumption that successful test cases that do not reveal software errors are not further investigated. However, these successful test cases may still contain useful information for revealing software errors [^1]. Additionally, errors may still exist in the software even after extensive testing has been conducted in the development phase, and techniques for uncovering software errors in the production phase are seldom addressed in the literature [^1]. - -Metamorphic testing proposes a novel test case selection technique that derives new test cases from the successful ones, augmenting the effectiveness of existing test selection strategies [^1]. The selection aims to reveal software errors that are possibly left undetected in successful test cases generated using some existing strategies [^1]. The proposed technique can also help uncover software errors in the production phase and can be used in the absence of test oracles [^1]. - -The limitations of current software testing techniques can be overcome by using metamorphic testing, which generates new test cases from successful ones and helps uncover software errors that are left undetected [^1]. - -### Background: This subsection provides a brief history of software testing and its importance in software development. - -Software testing is an essential part of software development that aims to identify and correct errors in software systems. As stated by [^1], software testing involves constructing a set of test cases according to some predefined selection criteria and examining the software against these test cases. The importance of software testing lies in its ability to ensure the quality and reliability of software systems. However, despite the extensive testing conducted during the development phase, errors may still exist in the software [^1]. These errors, if left undetected, may eventually cause damage to the production system. Therefore, the study of techniques for uncovering software errors in the production phase is crucial. Additionally, the availability of test oracles is pragmatically unattainable in most situations [^1]. However, the availability of test oracles is generally assumed in conventional software testing techniques. Therefore, new approaches for generating test cases and augmenting the effectiveness of existing test selection strategies are needed to improve the quality and reliability of software systems. - -### Metamorphic Testing - -Metamorphic testing is a new approach for generating next test cases from successful ones [^1]. The technique aims to reveal software errors that are possibly left undetected in successful test cases generated using some existing strategies. The proposed technique augments the effectiveness of existing test selection strategies and helps uncover software errors in the production phase. The selection of new test cases is based on the output of the successful test cases, and the technique can be used in the absence of test oracles [^1]. Metamorphic testing can be used with other test case selection strategies and can be combined with program checkers to suggest further testing [^1]. The approach requires problem domain knowledge, and a domain-specific methodology should be developed to facilitate the development of such a methodology [^1]. - -### Limitations of Current Software Testing Techniques - -Current software testing techniques have limitations that can result in undetected errors in software systems [^1][^5]. Successful test cases that do not reveal software errors are usually not further investigated, even though they may contain useful information for revealing software errors [^1]. Additionally, errors may still exist in the software even after extensive testing has been conducted in the development phase, and techniques for uncovering software errors in the production phase are seldom addressed in the literature [^1]. Furthermore, the availability of test oracles is pragmatically unattainable in most situations, but it is generally assumed in conventional software testing techniques [^1]. However, metamorphic testing can augment the effectiveness of existing test selection strategies by deriving new test cases from successful ones, which aims at revealing software errors that are possibly left undetected in successful test cases [^1]. The proposed technique can also help uncover software errors in the production phase and can be used in the absence of test oracles [^1]. - -### Proposed Test Case Selection Technique - -In software testing, a set of test cases is constructed according to some predefined selection criteria. The software is then examined against these test cases. However, an error-revealing test case is considered useful while a successful test case which does not reveal software errors is usually not further investigated. Whether these successful test cases still contain useful information for revealing software errors has not been properly studied [^1]. To address this issue, a novel test case selection technique is proposed that derives new test cases from the successful ones. The selection aims at revealing software errors that are possibly left undetected in successful test cases which may be generated using some existing strategies. As such, the proposed technique augments the effectiveness of existing test selection strategies. The technique also helps uncover software errors in the production phase and can be used in the absence of test oracles [^1]. - -## Codexglue for Machine Learning Research - -This section introduces CodeXGLUE, a benchmark dataset for machine learning research in program understanding and generation. CodeXGLUE includes a collection of 10 tasks across 14 datasets and a platform for model evaluation and comparison [^2]. The availability of such data and baselines can help the development and validation of new methods that can be applied to various program understanding and generation problems. Benchmark datasets have a significant impact on accelerating research in programming language tasks [^2]. - -### Background: A Brief History of Benchmark Datasets and Their Importance in Machine Learning Research - -Benchmark datasets have played a significant role in accelerating research in programming language tasks and machine learning. As stated by [^2], benchmark datasets provide a platform for the development and validation of new methods that can be applied to various program understanding and generation problems. They also enable researchers to compare the performance of different models and algorithms on a standardized set of tasks. Benchmark datasets have been used in various fields, including computer vision, natural language processing, and speech recognition, to name a few. The availability of such data and baselines can help researchers to develop new methods and improve the state-of-the-art in the field. - -### CodeXGLUE: A Machine Learning Benchmark Dataset for Code Understanding and Generation - -CodeXGLUE is a benchmark dataset for machine learning research in program understanding and generation [^2]. It includes a collection of 10 tasks across 14 datasets and a platform for model evaluation and comparison. CodeXGLUE also features three baseline systems, including the BERT-style, GPT-style, and Encoder-Decoder models, to make it easy for researchers to use the platform. The availability of such data and baselines can help the development and validation of new methods that can be applied to various program understanding and generation problems. Benchmark datasets have a significant impact on accelerating research in programming language tasks, and CodeXGLUE is the first diversified benchmark dataset that can be applied to various code intelligence problems [^2]. - -### Impact of Benchmark Datasets on Accelerating Research - -Benchmark datasets have been shown to have a significant impact on accelerating research in programming language tasks [^2]. The availability of benchmark datasets such as CodeXGLUE can help the development and validation of new methods that can be applied to various program understanding and generation problems. The success of benchmark datasets such as ImageNet for computer vision and GLUE for natural language understanding has demonstrated the importance of diversified benchmark datasets in the growth of applied AI research [^2]. - -## Neuro-Symbolic Program Synthesis - -Neuro-Symbolic Program Synthesis is a novel technique proposed to overcome the limitations of neural architectures for program induction [^3]. This technique can automatically construct computer programs in a domain-specific language that are consistent with a set of input-output examples provided at test time. The approach is based on two novel neural modules. The first module, called the cross-correlation I/O network, produces a continuous representation of the set of input-output examples. The second module, the Recursive-Reverse-Recursive Neural Network (R3NN), synthesizes a program by incrementally expanding partial programs. The R3NN model is not only able to construct programs from new input-output examples but also able to construct new programs for tasks that it had never observed before during training. This technique is motivated by the need for model interpretability and scalability to multiple tasks [^3]. - -### Background: This subsection provides a brief history of neural architectures for program induction and their limitations. - -Recent years have seen the proposal of a number of neural architectures for the problem of Program Induction. These architectures are able to learn mappings that generalize to new test inputs. However, they have several limitations. Firstly, they are computationally expensive and hard to train. Secondly, a model has to be trained for each task (program) separately. Thirdly, it is hard to interpret or verify the correctness of the learnt mapping (as it is defined by a neural network) [^3]. These limitations have motivated the development of novel techniques such as Neuro-Symbolic Program Synthesis, which aims to overcome these problems and automatically construct computer programs in a domain-specific language that are consistent with a set of input-output examples provided at test time. - -### Neuro-Symbolic Program Synthesis - -Neuro-Symbolic Program Synthesis is a novel technique proposed by Parisotto et al. to overcome the limitations of neural architectures for program induction [^3]. The existing neural architectures for program induction are computationally expensive, hard to train, and require a separate model for each task. Moreover, it is difficult to interpret or verify the correctness of the learned mapping. Neuro-Symbolic Program Synthesis can automatically construct computer programs in a domain-specific language that are consistent with a set of input-output examples provided at test time. The proposed method is based on two novel neural modules: the cross-correlation I/O network and the Recursive-Reverse-Recursive Neural Network (R3NN). The cross-correlation I/O network produces a continuous representation of the set of input-output examples, while the R3NN synthesizes a program by incrementally expanding partial programs. The effectiveness of the approach is demonstrated by applying it to the domain of regular expression-based string transformations. The R3NN model is not only able to construct programs from new input-output examples but also able to construct new programs for tasks that it had never observed before during training [^3]. - -### Automatically Constructing Computer Programs - -Neuro-Symbolic Program Synthesis is a novel technique proposed to overcome the limitations of neural architectures for program induction [^3]. The proposed technique can automatically construct computer programs in a domain-specific language that are consistent with a set of input-output examples provided at test time. The approach is based on two novel neural modules. The first module, called the cross correlation I/O network, produces a continuous representation of the set of input-output examples. The second module, the Recursive-Reverse-Recursive Neural Network (R3NN), synthesizes a program by incrementally expanding partial programs. The effectiveness of the approach has been demonstrated by applying it to the domain of regular expression based string transformations. The R3NN model is not only able to construct programs from new input-output examples but also able to construct new programs for tasks that it had never observed before during training [^3]. - -## Test Case Generation for Program Repair - -This section investigates the feasibility and effectiveness of test case generation in alleviating the overfitting issue in test suite based program repair techniques. Test-suites are typically inadequate for completely specifying the expected behavior of the program under repair, which leads to overfitting patches that pass the test suite but may be incorrect. To address this issue, two approaches for using test case generation to improve test suite based repair are proposed in [^4]. The first approach works with generate-and-validate techniques (MinImpact), and the second one for synthesis-based techniques (UnsatGuided). The proposed approaches are evaluated on 224 bugs of the Defects4J repository, and the results indicate that test case generation can change the resulting patch, but is not effective at turning incorrect patches into correct ones. The study identifies the problems related to the ineffectiveness and anticipates that the findings will lead to future research to build test-case generation techniques that are tailored to automatic repair systems. - -### Background: This subsection provides a brief history of program repair techniques and their limitations. - -Program repair techniques aim to automatically fix software bugs. Among the many different kinds of program repair techniques, one widely studied family of techniques is called test suite based repair. Test-suites are in essence input-output specifications and are therefore typically inadequate for completely specifying the expected behavior of the program under repair. Consequently, the patches generated by test suite based program repair techniques pass the test suite, yet may be incorrect. Patches that are overly specific to the used test suite and fail to generalize to other test cases are called overfitting patches [^4]. The limitations of test suite based repair techniques have led to the investigation of the feasibility and effectiveness of test case generation in alleviating the overfitting issue. - -### Feasibility and Effectiveness of Test Case Generation - -Test suite based repair techniques suffer from the overfitting issue, where the generated patches may be overly specific to the used test suite and fail to generalize to other test cases. In order to alleviate this issue, test case generation has been proposed as a potential solution [^4]. This subsection investigates the feasibility and effectiveness of test case generation in improving test suite based repair techniques. The authors propose two approaches for using test case generation and perform an extensive evaluation on 224 bugs of the Defects4J repository. The results indicate that test case generation can change the resulting patch, but is not effective at turning incorrect patches into correct ones. The authors identify the problems related to the ineffectiveness and anticipate that their findings will lead to future research to build test-case generation techniques that are tailored to automatic repair systems [^4]. - -### Proposed Approaches for Using Test Case Generation - -Test suite based repair techniques generate patches that pass the test suite but may be incorrect, leading to overfitting patches. In order to alleviate this issue, test case generation can be used to improve test suite based repair. Two approaches have been proposed for this purpose [^4]: - -1. **MinImpact:** This approach is suitable for generate-and-validate techniques that can enumerate patches. It uses an evolutionary approach to derive test suites that maximize code coverage and generates assertions that encode the current behavior of the program. The aim is to use additional automatically generated tests to guide the patch generation process towards generating patches that are less likely to be overfitting. - -2. **UnsatGuided:** This approach is suitable for synthesis-based techniques. It uses a constraint solver to synthesize patches that satisfy the given test suite. The aim is to use additional automatically generated tests to guide the synthesis process towards generating patches that are less likely to be overfitting. - -These approaches have been evaluated on 224 bugs of the Defects4J repository, and the results indicate that test case generation can change the resulting patch, but is not effective at turning incorrect patches into correct ones. The ineffectiveness is attributed to the limitations of the generate-and-validate and synthesis-based techniques used in the study. Future research is anticipated to build test-case generation techniques that are tailored to automatic repair systems. [^4] - -## Combinatorial Testing for Deep Learning Systems - -Deep learning (DL) systems have been widely applied in various applications due to their high accuracy, but their robustness has recently received great concerns [^5]. Testing techniques could help to evaluate the robustness of a DL system and detect vulnerabilities at an early stage. However, the main challenge of testing such systems is that their runtime state space is too large [^5]. Combinatorial testing (CT) is an effective testing technique for traditional software to reduce the testing space while obtaining relatively high defect detection abilities. In an exploratory study of CT on DL systems, a set of coverage criteria for DL systems and a CT coverage guided test generation technique were proposed [^5]. The evaluation demonstrated that CT provides a promising avenue for testing DL systems. The proposed combinatorial testing coverage criteria are useful for adversarial example detection and local-robustness analysis [^5]. - -### Background: This subsection provides a brief history of deep learning systems and their challenges in testing. - -Deep learning (DL) has achieved remarkable progress over the past decade and been widely applied to many safety-critical applications. However, the robustness of DL systems recently receives great concerns, such as adversarial examples against computer vision systems, which could potentially result in severe consequences [^5]. The main challenge of testing such systems is that its runtime state space is too large: if we view each neuron as a runtime state for DL, then a DL system often contains massive states, rendering testing each state almost impossible [^5]. - -### Combinatorial Testing - -Combinatorial testing (CT) is a well-established and successful technique in traditional software testing that aims to reduce the testing space while obtaining relatively high defect detection abilities [^5]. CT focuses on testing the interactions of inputs rather than exhaustively searching all the combinations of input space. This technique has been successfully applied to testing different configurable software systems, as most faults are caused by interactions involving only a few parameters [^5]. However, CT is limited in handling various constraints and is not directly applicable to deep learning (DL) systems due to their massive runtime state space [^5]. - -In this paper, an exploratory study of CT on DL systems is performed. The concept of CT is adapted, and a set of coverage criteria for DL systems is proposed, as well as a CT coverage-guided test generation technique [^5]. The evaluation results demonstrate that CT provides a promising avenue for testing DL systems [^5]. - -### Coverage Criteria for Deep Learning Systems - -Combinatorial testing (CT) has been proposed as a promising avenue for testing deep learning (DL) systems. To reduce the testing space while obtaining relatively high defect detection abilities, a set of coverage criteria for DL systems has been proposed [^5]. These criteria are useful for adversarial example detection and local-robustness analysis. The proposed CT coverage criteria have been evaluated on small-scale neural networks with only dense layers, with no more than 400 neurons. The usefulness and scalability of the proposed criteria have also been demonstrated on practical-sized DL systems such as ResNet-50 with near 200 layers and 100,000 neurons [^5]. - -## Conclusion - -In this systematic review, we have explored the field of programming testing arxiv and its importance in solving the problem of software errors. We have covered five papers that discuss new approaches for generating test cases, benchmark datasets for machine learning research, novel techniques for program synthesis, feasibility and effectiveness of test case generation for program repair, and combinatorial testing for deep learning systems. - -Metamorphic testing has been proposed as a new approach for generating next test cases that can reveal software errors left undetected in successful test cases [^1]. CodeXGLUE has been introduced as a benchmark dataset for machine learning research in program understanding and generation, which can help accelerate research in programming language tasks [^2]. Neuro-symbolic program synthesis has been proposed as a novel technique to overcome the limitations of neural architectures for program induction [^3]. Test case generation for program repair has been investigated for its feasibility and effectiveness in alleviating the overfitting issue in test suite based program repair techniques [^4]. Combinatorial testing has been explored as a promising avenue for testing deep learning systems, and a set of coverage criteria for deep learning systems has been proposed [^5]. - -Future research directions in programming testing arxiv include exploring the potential of metamorphic testing in uncovering software errors in the production phase, developing new benchmark datasets for machine learning research, improving the effectiveness of test case generation for program repair, and further investigating the feasibility and effectiveness of combinatorial testing for deep learning systems. Overall, the papers covered in this systematic review provide valuable insights and directions for future research in programming testing arxiv. - -[^1]: 1. Metamorphic testing: a new approach for generating next test cases -[^2]: 2. Codexglue: A machine learning benchmark dataset for code understanding and generation -[^3]: 3. Neuro-symbolic program synthesis -[^4]: 4. Test case generation for program repair: A study of feasibility and effectiveness -[^5]: 5. Combinatorial testing for deep learning systems - -## References -[^1]: [Chen, Tsong Y., Shing C. Cheung, and Shiu Ming Yiu. "Metamorphic testing: a new approach for generating next test cases." arXiv preprint arXiv:2002.12543 (2020).](https://arxiv.org/abs/2002.12543) - -[^2]: [Lu, Shuai, et al. "Codexglue: A machine learning benchmark dataset for code understanding and generation." arXiv preprint arXiv:2102.04664 (2021).](https://arxiv.org/abs/2102.04664) - -[^3]: [Parisotto, Emilio, et al. "Neuro-symbolic program synthesis." arXiv preprint arXiv:1611.01855 (2016).](https://arxiv.org/abs/1611.01855) - -[^4]: [Yu, Zhongxing, et al. "Test case generation for program repair: A study of feasibility and effectiveness." arXiv preprint arXiv:1703.00198 (2017).](https://arxiv.org/abs/1703.00198) - -[^5]: [Ma, Lei, et al. "Combinatorial testing for deep learning systems." arXiv preprint arXiv:1806.07723 (2018).](https://arxiv.org/abs/1806.07723) \ No newline at end of file diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/musicldm/pipeline_musicldm.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/musicldm/pipeline_musicldm.py deleted file mode 100644 index 4ee07f4e056a7e4f07e95c67249bc1e271a1d682..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/musicldm/pipeline_musicldm.py +++ /dev/null @@ -1,650 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -from typing import Any, Callable, Dict, List, Optional, Union - -import numpy as np -import torch -from transformers import ( - ClapFeatureExtractor, - ClapModel, - ClapTextModelWithProjection, - RobertaTokenizer, - RobertaTokenizerFast, - SpeechT5HifiGan, -) - -from ...models import AutoencoderKL, UNet2DConditionModel -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( - is_accelerate_available, - is_accelerate_version, - is_librosa_available, - logging, - replace_example_docstring, -) -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline - - -if is_librosa_available(): - import librosa - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - -EXAMPLE_DOC_STRING = """ - Examples: - ```py - >>> from diffusers import MusicLDMPipeline - >>> import torch - >>> import scipy - - >>> repo_id = "cvssp/audioldm-s-full-v2" - >>> pipe = MusicLDMPipeline.from_pretrained(repo_id, torch_dtype=torch.float16) - >>> pipe = pipe.to("cuda") - - >>> prompt = "Techno music with a strong, upbeat tempo and high melodic riffs" - >>> audio = pipe(prompt, num_inference_steps=10, audio_length_in_s=5.0).audios[0] - - >>> # save the audio sample as a .wav file - >>> scipy.io.wavfile.write("techno.wav", rate=16000, data=audio) - ``` -""" - - -class MusicLDMPipeline(DiffusionPipeline): - r""" - Pipeline for text-to-audio generation using MusicLDM. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods - implemented for all pipelines (downloading, saving, running on a particular device, etc.). - - Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. - text_encoder ([`~transformers.ClapModel`]): - Frozen text-audio embedding model (`ClapTextModel`), specifically the - [laion/clap-htsat-unfused](https://huggingface.co/laion/clap-htsat-unfused) variant. - tokenizer ([`PreTrainedTokenizer`]): - A [`~transformers.RobertaTokenizer`] to tokenize text. - feature_extractor ([`~transformers.ClapFeatureExtractor`]): - Feature extractor to compute mel-spectrograms from audio waveforms. - unet ([`UNet2DConditionModel`]): - A `UNet2DConditionModel` to denoise the encoded audio latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - vocoder ([`~transformers.SpeechT5HifiGan`]): - Vocoder of class `SpeechT5HifiGan`. - """ - - def __init__( - self, - vae: AutoencoderKL, - text_encoder: Union[ClapTextModelWithProjection, ClapModel], - tokenizer: Union[RobertaTokenizer, RobertaTokenizerFast], - feature_extractor: Optional[ClapFeatureExtractor], - unet: UNet2DConditionModel, - scheduler: KarrasDiffusionSchedulers, - vocoder: SpeechT5HifiGan, - ): - super().__init__() - - self.register_modules( - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - feature_extractor=feature_extractor, - unet=unet, - scheduler=scheduler, - vocoder=vocoder, - ) - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing - def enable_vae_slicing(self): - r""" - Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to - compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. - """ - self.vae.enable_slicing() - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing - def disable_vae_slicing(self): - r""" - Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to - computing decoding in one step. - """ - self.vae.disable_slicing() - - def _encode_prompt( - self, - prompt, - device, - num_waveforms_per_prompt, - do_classifier_free_guidance, - negative_prompt=None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - ): - r""" - Encodes the prompt into text encoder hidden states. - - Args: - prompt (`str` or `List[str]`, *optional*): - prompt to be encoded - device (`torch.device`): - torch device - num_waveforms_per_prompt (`int`): - number of waveforms that should be generated per prompt - do_classifier_free_guidance (`bool`): - whether to use classifier free guidance or not - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the audio generation. If not defined, one has to pass - `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is - less than `1`). - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input - argument. - """ - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - if prompt_embeds is None: - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - attention_mask = text_inputs.attention_mask - untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - - if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( - text_input_ids, untruncated_ids - ): - removed_text = self.tokenizer.batch_decode( - untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] - ) - logger.warning( - "The following part of your input was truncated because CLAP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}" - ) - - prompt_embeds = self.text_encoder.get_text_features( - text_input_ids.to(device), - attention_mask=attention_mask.to(device), - ) - - prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.text_model.dtype, device=device) - - ( - bs_embed, - seq_len, - ) = prompt_embeds.shape - # duplicate text embeddings for each generation per prompt, using mps friendly method - prompt_embeds = prompt_embeds.repeat(1, num_waveforms_per_prompt) - prompt_embeds = prompt_embeds.view(bs_embed * num_waveforms_per_prompt, seq_len) - - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance and negative_prompt_embeds is None: - uncond_tokens: List[str] - if negative_prompt is None: - uncond_tokens = [""] * batch_size - elif type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}." - ) - elif isinstance(negative_prompt, str): - uncond_tokens = [negative_prompt] - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - else: - uncond_tokens = negative_prompt - - max_length = prompt_embeds.shape[1] - uncond_input = self.tokenizer( - uncond_tokens, - padding="max_length", - max_length=max_length, - truncation=True, - return_tensors="pt", - ) - - uncond_input_ids = uncond_input.input_ids.to(device) - attention_mask = uncond_input.attention_mask.to(device) - - negative_prompt_embeds = self.text_encoder.get_text_features( - uncond_input_ids, - attention_mask=attention_mask, - ) - - if do_classifier_free_guidance: - # duplicate unconditional embeddings for each generation per prompt, using mps friendly method - seq_len = negative_prompt_embeds.shape[1] - - negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.text_model.dtype, device=device) - - negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_waveforms_per_prompt) - negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_waveforms_per_prompt, seq_len) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) - - return prompt_embeds - - # Copied from diffusers.pipelines.audioldm.pipeline_audioldm.AudioLDMPipeline.mel_spectrogram_to_waveform - def mel_spectrogram_to_waveform(self, mel_spectrogram): - if mel_spectrogram.dim() == 4: - mel_spectrogram = mel_spectrogram.squeeze(1) - - waveform = self.vocoder(mel_spectrogram) - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 - waveform = waveform.cpu().float() - return waveform - - # Copied from diffusers.pipelines.audioldm2.pipeline_audioldm2.AudioLDM2Pipeline.score_waveforms - def score_waveforms(self, text, audio, num_waveforms_per_prompt, device, dtype): - if not is_librosa_available(): - logger.info( - "Automatic scoring of the generated audio waveforms against the input prompt text requires the " - "`librosa` package to resample the generated waveforms. Returning the audios in the order they were " - "generated. To enable automatic scoring, install `librosa` with: `pip install librosa`." - ) - return audio - inputs = self.tokenizer(text, return_tensors="pt", padding=True) - resampled_audio = librosa.resample( - audio.numpy(), orig_sr=self.vocoder.config.sampling_rate, target_sr=self.feature_extractor.sampling_rate - ) - inputs["input_features"] = self.feature_extractor( - list(resampled_audio), return_tensors="pt", sampling_rate=self.feature_extractor.sampling_rate - ).input_features.type(dtype) - inputs = inputs.to(device) - - # compute the audio-text similarity score using the CLAP model - logits_per_text = self.text_encoder(**inputs).logits_per_text - # sort by the highest matching generations per prompt - indices = torch.argsort(logits_per_text, dim=1, descending=True)[:, :num_waveforms_per_prompt] - audio = torch.index_select(audio, 0, indices.reshape(-1).cpu()) - return audio - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs - def prepare_extra_step_kwargs(self, generator, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - return extra_step_kwargs - - # Copied from diffusers.pipelines.audioldm.pipeline_audioldm.AudioLDMPipeline.check_inputs - def check_inputs( - self, - prompt, - audio_length_in_s, - vocoder_upsample_factor, - callback_steps, - negative_prompt=None, - prompt_embeds=None, - negative_prompt_embeds=None, - ): - min_audio_length_in_s = vocoder_upsample_factor * self.vae_scale_factor - if audio_length_in_s < min_audio_length_in_s: - raise ValueError( - f"`audio_length_in_s` has to be a positive value greater than or equal to {min_audio_length_in_s}, but " - f"is {audio_length_in_s}." - ) - - if self.vocoder.config.model_in_dim % self.vae_scale_factor != 0: - raise ValueError( - f"The number of frequency bins in the vocoder's log-mel spectrogram has to be divisible by the " - f"VAE scale factor, but got {self.vocoder.config.model_in_dim} bins and a scale factor of " - f"{self.vae_scale_factor}." - ) - - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - if prompt is not None and prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" - " only forward one of the two." - ) - elif prompt is None and prompt_embeds is None: - raise ValueError( - "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." - ) - elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if negative_prompt is not None and negative_prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" - f" {negative_prompt_embeds}. Please make sure to only forward one of the two." - ) - - if prompt_embeds is not None and negative_prompt_embeds is not None: - if prompt_embeds.shape != negative_prompt_embeds.shape: - raise ValueError( - "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" - f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" - f" {negative_prompt_embeds.shape}." - ) - - # Copied from diffusers.pipelines.audioldm.pipeline_audioldm.AudioLDMPipeline.prepare_latents - def prepare_latents(self, batch_size, num_channels_latents, height, dtype, device, generator, latents=None): - shape = ( - batch_size, - num_channels_latents, - height // self.vae_scale_factor, - self.vocoder.config.model_in_dim // self.vae_scale_factor, - ) - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - if latents is None: - latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) - else: - latents = latents.to(device) - - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * self.scheduler.init_noise_sigma - return latents - - def enable_model_cpu_offload(self, gpu_id=0): - r""" - Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared - to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` - method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with - `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. - """ - if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): - from accelerate import cpu_offload_with_hook - else: - raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") - - device = torch.device(f"cuda:{gpu_id}") - - if self.device.type != "cpu": - self.to("cpu", silence_dtype_warnings=True) - torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) - - model_sequence = [ - self.text_encoder.text_model, - self.text_encoder.text_projection, - self.unet, - self.vae, - self.vocoder, - self.text_encoder, - ] - - hook = None - for cpu_offloaded_model in model_sequence: - _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) - - # We'll offload the last model manually. - self.final_offload_hook = hook - - @torch.no_grad() - @replace_example_docstring(EXAMPLE_DOC_STRING) - def __call__( - self, - prompt: Union[str, List[str]] = None, - audio_length_in_s: Optional[float] = None, - num_inference_steps: int = 200, - guidance_scale: float = 2.0, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_waveforms_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.FloatTensor] = None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: Optional[int] = 1, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - output_type: Optional[str] = "np", - ): - r""" - The call function to the pipeline for generation. - - Args: - prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide audio generation. If not defined, you need to pass `prompt_embeds`. - audio_length_in_s (`int`, *optional*, defaults to 10.24): - The length of the generated audio sample in seconds. - num_inference_steps (`int`, *optional*, defaults to 200): - The number of denoising steps. More denoising steps usually lead to a higher quality audio at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 2.0): - A higher guidance scale value encourages the model to generate audio that is closely linked to the text - `prompt` at the expense of lower sound quality. Guidance scale is enabled when `guidance_scale > 1`. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide what to not include in audio generation. If not defined, you need to - pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). - num_waveforms_per_prompt (`int`, *optional*, defaults to 1): - The number of waveforms to generate per prompt. If `num_waveforms_per_prompt > 1`, the text encoding - model is a joint text-audio model ([`~transformers.ClapModel`]), and the tokenizer is a - `[~transformers.ClapProcessor]`, then automatic scoring will be performed between the generated outputs - and the input text. This scoring ranks the generated waveforms based on their cosine similarity to text - input in the joint text-audio embedding space. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies - to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. - generator (`torch.Generator` or `List[torch.Generator]`, *optional*): - A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make - generation deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor is generated by sampling using the supplied random `generator`. - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not - provided, text embeddings are generated from the `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If - not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple. - callback (`Callable`, *optional*): - A function that calls every `callback_steps` steps during inference. The function is called with the - following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function is called. If not specified, the callback is called at - every step. - cross_attention_kwargs (`dict`, *optional*): - A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in - [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). - output_type (`str`, *optional*, defaults to `"np"`): - The output format of the generated audio. Choose between `"np"` to return a NumPy `np.ndarray` or - `"pt"` to return a PyTorch `torch.Tensor` object. Set to `"latent"` to return the latent diffusion - model (LDM) output. - - Examples: - - Returns: - [`~pipelines.AudioPipelineOutput`] or `tuple`: - If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is - returned where the first element is a list with the generated audio. - """ - # 0. Convert audio input length from seconds to spectrogram height - vocoder_upsample_factor = np.prod(self.vocoder.config.upsample_rates) / self.vocoder.config.sampling_rate - - if audio_length_in_s is None: - audio_length_in_s = self.unet.config.sample_size * self.vae_scale_factor * vocoder_upsample_factor - - height = int(audio_length_in_s / vocoder_upsample_factor) - - original_waveform_length = int(audio_length_in_s * self.vocoder.config.sampling_rate) - if height % self.vae_scale_factor != 0: - height = int(np.ceil(height / self.vae_scale_factor)) * self.vae_scale_factor - logger.info( - f"Audio length in seconds {audio_length_in_s} is increased to {height * vocoder_upsample_factor} " - f"so that it can be handled by the model. It will be cut to {audio_length_in_s} after the " - f"denoising process." - ) - - # 1. Check inputs. Raise error if not correct - self.check_inputs( - prompt, - audio_length_in_s, - vocoder_upsample_factor, - callback_steps, - negative_prompt, - prompt_embeds, - negative_prompt_embeds, - ) - - # 2. Define call parameters - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - device = self._execution_device - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - # 3. Encode input prompt - prompt_embeds = self._encode_prompt( - prompt, - device, - num_waveforms_per_prompt, - do_classifier_free_guidance, - negative_prompt, - prompt_embeds=prompt_embeds, - negative_prompt_embeds=negative_prompt_embeds, - ) - - # 4. Prepare timesteps - self.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps = self.scheduler.timesteps - - # 5. Prepare latent variables - num_channels_latents = self.unet.config.in_channels - latents = self.prepare_latents( - batch_size * num_waveforms_per_prompt, - num_channels_latents, - height, - prompt_embeds.dtype, - device, - generator, - latents, - ) - - # 6. Prepare extra step kwargs - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - # 7. Denoising loop - num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order - with self.progress_bar(total=num_inference_steps) as progress_bar: - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # predict the noise residual - noise_pred = self.unet( - latent_model_input, - t, - encoder_hidden_states=None, - class_labels=prompt_embeds, - cross_attention_kwargs=cross_attention_kwargs, - return_dict=False, - )[0] - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - self.maybe_free_model_hooks() - - # 8. Post-processing - if not output_type == "latent": - latents = 1 / self.vae.config.scaling_factor * latents - mel_spectrogram = self.vae.decode(latents).sample - else: - return AudioPipelineOutput(audios=latents) - - audio = self.mel_spectrogram_to_waveform(mel_spectrogram) - - audio = audio[:, :original_waveform_length] - - # 9. Automatic scoring - if num_waveforms_per_prompt > 1 and prompt is not None: - audio = self.score_waveforms( - text=prompt, - audio=audio, - num_waveforms_per_prompt=num_waveforms_per_prompt, - device=device, - dtype=prompt_embeds.dtype, - ) - - if output_type == "np": - audio = audio.numpy() - - if not return_dict: - return (audio,) - - return AudioPipelineOutput(audios=audio) diff --git a/spaces/paschalc/ImageRecognitionDemo/app.py b/spaces/paschalc/ImageRecognitionDemo/app.py deleted file mode 100644 index 352210a2852fdab14e586f07d038d997d95efaa8..0000000000000000000000000000000000000000 --- a/spaces/paschalc/ImageRecognitionDemo/app.py +++ /dev/null @@ -1,16 +0,0 @@ -import gradio as gr -from transformers import AutoFeatureExtractor, AutoModelForImageClassification, pipeline - -image_model = AutoModelForImageClassification.from_pretrained("google/vit-base-patch16-224") -extractor = AutoFeatureExtractor.from_pretrained("google/vit-base-patch16-224") - -image_pipe = pipeline("image-classification", model=image_model, feature_extractor=extractor) - -# classify the image and returns the results -def classify_image(inp): - results = image_pipe(inp) - return {result["label"]: result["score"] for result in results} - -gr.Interface(fn=classify_image, - inputs=gr.Image(type="pil"), - outputs=gr.Label(num_top_classes=5)).launch() \ No newline at end of file diff --git a/spaces/perezcatriel/data_world_jobs/ML/salary.py b/spaces/perezcatriel/data_world_jobs/ML/salary.py deleted file mode 100644 index 3b130df4366cb59382f131ad49db0fd9dbd05a6a..0000000000000000000000000000000000000000 --- a/spaces/perezcatriel/data_world_jobs/ML/salary.py +++ /dev/null @@ -1,61 +0,0 @@ -import pandas as pd -import streamlit as st -from sklearn.preprocessing import LabelEncoder -from sklearn.ensemble import RandomForestRegressor - -# Leer los datos y seleccionar las columnas necesarias -df = pd.read_csv('./ds_salaries.csv') -df = df[['company_location', 'salary_in_usd']] - - -# Codificar las ubicaciones de las empresas -le = LabelEncoder() -df['company_location'] = le.fit_transform(df['company_location']) - -# Decodificar las ubicaciones de las empresas -decoded_locations = le.inverse_transform(df['company_location'].unique()) - -# Separar los datos de entrada y salida -X = df.iloc[:, :-1].values -y = df.iloc[:, -1].values - -# Entrenar el modelo -model = RandomForestRegressor(n_estimators=100, random_state=42) -model.fit(X, y) - -# Obtener las ubicaciones de las empresas y sus salarios predichos -locations = df['company_location'].unique() -predicted_salaries = model.predict(locations.reshape(-1, 1)) -results_df = pd.DataFrame({'company_location': locations, 'predicted_salary': predicted_salaries}) - -# Decodificar las ubicaciones de las empresas -results_df['company_location'] = le.inverse_transform(results_df['company_location']) - -# Ordenar los resultados por salario predicho -results_df = results_df.sort_values('predicted_salary', ascending=False).reset_index(drop=True) - - - -# Mostrar el título y el top 5 de países mejor pagados -st.markdown(""" -

Top 5 de países mejor pagados

- """, unsafe_allow_html=True) - -# Descripción -st.markdown(""" -

Este código utiliza un algoritmo de RandomForest para seleccionar solo algunas empresas de cada país de forma aleatoria y retornar el promedio. Utilizamos este algoritmo para obtener un promedio menos sesgado por outliers

-""", unsafe_allow_html=True) - -for i in range(5): - location = results_df.loc[i, 'company_location'] - salary = results_df.loc[i, 'predicted_salary'] - st.markdown(f'### **{location}**: ${salary:,.2f}', unsafe_allow_html=True) - -# Mostrar el menú desplegable para seleccionar un país -st.markdown('---') -st.title('Seleccionar un país') -selected_location = st.selectbox('Ubicación de la empresa', decoded_locations) - -# Mostrar el salario predicho para el país seleccionado -predicted_salary = results_df.loc[results_df['company_location'] == selected_location, 'predicted_salary'].iloc[0] -st.markdown(f'### **{selected_location}**: ${predicted_salary:,.2f}', unsafe_allow_html=True) diff --git a/spaces/phamson02/tho_ai/complete_poem.py b/spaces/phamson02/tho_ai/complete_poem.py deleted file mode 100644 index 1fa3f72b72d3b7df8b6d949ff17f81bd63ce4fe7..0000000000000000000000000000000000000000 --- a/spaces/phamson02/tho_ai/complete_poem.py +++ /dev/null @@ -1,72 +0,0 @@ -import gradio as gr -from transformers import AutoTokenizer, AutoModelForCausalLM - -tokenizer = AutoTokenizer.from_pretrained("vinai/phobert-base") -# Define your models -models = { - "Lục Bát": AutoModelForCausalLM.from_pretrained( - "Libosa2707/vietnamese-poem-luc-bat-gpt2" - ), - "Bảy Chữ": AutoModelForCausalLM.from_pretrained( - "Libosa2707/vietnamese-poem-bay-chu-gpt2" - ), - "Tám Chữ": AutoModelForCausalLM.from_pretrained( - "Libosa2707/vietnamese-poem-tam-chu-gpt2" - ), - "Năm Chữ": AutoModelForCausalLM.from_pretrained( - "Libosa2707/vietnamese-poem-nam-chu-gpt2" - ), -} - - -def complete_poem(text, style): - # Preprocess the input text - text = text.strip() - text = text.lower() - - # Choose the model based on the selected style - model = models[style] - - # Tokenize the input line - input_ids = tokenizer.encode(text, return_tensors="pt")[:, :-1] - - # Generate text - output = model.generate(input_ids, max_length=100, do_sample=True, temperature=0.7) - - # Decode the output - generated_text = tokenizer.decode( - output[:, input_ids.shape[-1] :][0], skip_special_tokens=True - ) - - text = text + " " + generated_text - - # Post-process the output - text = text.replace("", "\n") - pretty_text = "" - for idx, line in enumerate(text.split("\n")): - line = line.strip() - if not line: - continue - line = line[0].upper() + line[1:] - pretty_text += line + "\n" - - return pretty_text - - -complete_poem_interface = gr.Interface( - title="Viết tiếp áng thơ hay...", - fn=complete_poem, - inputs=[ - gr.components.Textbox( - lines=1, - placeholder="Tôi đâu có biết làm thơ", - label="Những áng thơ đầu tiên", - ), - gr.components.Dropdown( - choices=["Lục Bát", "Bảy Chữ", "Tám Chữ", "Năm Chữ"], - label="Kiểu thơ", - value="Lục Bát", - ), - ], - outputs="text", -) diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/utils/misc.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/utils/misc.py deleted file mode 100644 index bd191c4e14f389d6d0f799dfef9c5c0221a8c568..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/utils/misc.py +++ /dev/null @@ -1,735 +0,0 @@ -import contextlib -import errno -import getpass -import hashlib -import io -import logging -import os -import posixpath -import shutil -import stat -import sys -import sysconfig -import urllib.parse -from io import StringIO -from itertools import filterfalse, tee, zip_longest -from types import TracebackType -from typing import ( - Any, - BinaryIO, - Callable, - ContextManager, - Dict, - Generator, - Iterable, - Iterator, - List, - Optional, - TextIO, - Tuple, - Type, - TypeVar, - Union, - cast, -) - -from pip._vendor.pyproject_hooks import BuildBackendHookCaller -from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed - -from pip import __version__ -from pip._internal.exceptions import CommandError, ExternallyManagedEnvironment -from pip._internal.locations import get_major_minor_version -from pip._internal.utils.compat import WINDOWS -from pip._internal.utils.virtualenv import running_under_virtualenv - -__all__ = [ - "rmtree", - "display_path", - "backup_dir", - "ask", - "splitext", - "format_size", - "is_installable_dir", - "normalize_path", - "renames", - "get_prog", - "captured_stdout", - "ensure_dir", - "remove_auth_from_url", - "check_externally_managed", - "ConfiguredBuildBackendHookCaller", -] - -logger = logging.getLogger(__name__) - -T = TypeVar("T") -ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType] -VersionInfo = Tuple[int, int, int] -NetlocTuple = Tuple[str, Tuple[Optional[str], Optional[str]]] - - -def get_pip_version() -> str: - pip_pkg_dir = os.path.join(os.path.dirname(__file__), "..", "..") - pip_pkg_dir = os.path.abspath(pip_pkg_dir) - - return "pip {} from {} (python {})".format( - __version__, - pip_pkg_dir, - get_major_minor_version(), - ) - - -def normalize_version_info(py_version_info: Tuple[int, ...]) -> Tuple[int, int, int]: - """ - Convert a tuple of ints representing a Python version to one of length - three. - - :param py_version_info: a tuple of ints representing a Python version, - or None to specify no version. The tuple can have any length. - - :return: a tuple of length three if `py_version_info` is non-None. - Otherwise, return `py_version_info` unchanged (i.e. None). - """ - if len(py_version_info) < 3: - py_version_info += (3 - len(py_version_info)) * (0,) - elif len(py_version_info) > 3: - py_version_info = py_version_info[:3] - - return cast("VersionInfo", py_version_info) - - -def ensure_dir(path: str) -> None: - """os.path.makedirs without EEXIST.""" - try: - os.makedirs(path) - except OSError as e: - # Windows can raise spurious ENOTEMPTY errors. See #6426. - if e.errno != errno.EEXIST and e.errno != errno.ENOTEMPTY: - raise - - -def get_prog() -> str: - try: - prog = os.path.basename(sys.argv[0]) - if prog in ("__main__.py", "-c"): - return f"{sys.executable} -m pip" - else: - return prog - except (AttributeError, TypeError, IndexError): - pass - return "pip" - - -# Retry every half second for up to 3 seconds -# Tenacity raises RetryError by default, explicitly raise the original exception -@retry(reraise=True, stop=stop_after_delay(3), wait=wait_fixed(0.5)) -def rmtree(dir: str, ignore_errors: bool = False) -> None: - if sys.version_info >= (3, 12): - shutil.rmtree(dir, ignore_errors=ignore_errors, onexc=rmtree_errorhandler) - else: - shutil.rmtree(dir, ignore_errors=ignore_errors, onerror=rmtree_errorhandler) - - -def rmtree_errorhandler( - func: Callable[..., Any], path: str, exc_info: Union[ExcInfo, BaseException] -) -> None: - """On Windows, the files in .svn are read-only, so when rmtree() tries to - remove them, an exception is thrown. We catch that here, remove the - read-only attribute, and hopefully continue without problems.""" - try: - has_attr_readonly = not (os.stat(path).st_mode & stat.S_IWRITE) - except OSError: - # it's equivalent to os.path.exists - return - - if has_attr_readonly: - # convert to read/write - os.chmod(path, stat.S_IWRITE) - # use the original function to repeat the operation - func(path) - return - else: - raise - - -def display_path(path: str) -> str: - """Gives the display value for a given path, making it relative to cwd - if possible.""" - path = os.path.normcase(os.path.abspath(path)) - if path.startswith(os.getcwd() + os.path.sep): - path = "." + path[len(os.getcwd()) :] - return path - - -def backup_dir(dir: str, ext: str = ".bak") -> str: - """Figure out the name of a directory to back up the given dir to - (adding .bak, .bak2, etc)""" - n = 1 - extension = ext - while os.path.exists(dir + extension): - n += 1 - extension = ext + str(n) - return dir + extension - - -def ask_path_exists(message: str, options: Iterable[str]) -> str: - for action in os.environ.get("PIP_EXISTS_ACTION", "").split(): - if action in options: - return action - return ask(message, options) - - -def _check_no_input(message: str) -> None: - """Raise an error if no input is allowed.""" - if os.environ.get("PIP_NO_INPUT"): - raise Exception( - f"No input was expected ($PIP_NO_INPUT set); question: {message}" - ) - - -def ask(message: str, options: Iterable[str]) -> str: - """Ask the message interactively, with the given possible responses""" - while 1: - _check_no_input(message) - response = input(message) - response = response.strip().lower() - if response not in options: - print( - "Your response ({!r}) was not one of the expected responses: " - "{}".format(response, ", ".join(options)) - ) - else: - return response - - -def ask_input(message: str) -> str: - """Ask for input interactively.""" - _check_no_input(message) - return input(message) - - -def ask_password(message: str) -> str: - """Ask for a password interactively.""" - _check_no_input(message) - return getpass.getpass(message) - - -def strtobool(val: str) -> int: - """Convert a string representation of truth to true (1) or false (0). - - True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values - are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if - 'val' is anything else. - """ - val = val.lower() - if val in ("y", "yes", "t", "true", "on", "1"): - return 1 - elif val in ("n", "no", "f", "false", "off", "0"): - return 0 - else: - raise ValueError(f"invalid truth value {val!r}") - - -def format_size(bytes: float) -> str: - if bytes > 1000 * 1000: - return "{:.1f} MB".format(bytes / 1000.0 / 1000) - elif bytes > 10 * 1000: - return "{} kB".format(int(bytes / 1000)) - elif bytes > 1000: - return "{:.1f} kB".format(bytes / 1000.0) - else: - return "{} bytes".format(int(bytes)) - - -def tabulate(rows: Iterable[Iterable[Any]]) -> Tuple[List[str], List[int]]: - """Return a list of formatted rows and a list of column sizes. - - For example:: - - >>> tabulate([['foobar', 2000], [0xdeadbeef]]) - (['foobar 2000', '3735928559'], [10, 4]) - """ - rows = [tuple(map(str, row)) for row in rows] - sizes = [max(map(len, col)) for col in zip_longest(*rows, fillvalue="")] - table = [" ".join(map(str.ljust, row, sizes)).rstrip() for row in rows] - return table, sizes - - -def is_installable_dir(path: str) -> bool: - """Is path is a directory containing pyproject.toml or setup.py? - - If pyproject.toml exists, this is a PEP 517 project. Otherwise we look for - a legacy setuptools layout by identifying setup.py. We don't check for the - setup.cfg because using it without setup.py is only available for PEP 517 - projects, which are already covered by the pyproject.toml check. - """ - if not os.path.isdir(path): - return False - if os.path.isfile(os.path.join(path, "pyproject.toml")): - return True - if os.path.isfile(os.path.join(path, "setup.py")): - return True - return False - - -def read_chunks( - file: BinaryIO, size: int = io.DEFAULT_BUFFER_SIZE -) -> Generator[bytes, None, None]: - """Yield pieces of data from a file-like object until EOF.""" - while True: - chunk = file.read(size) - if not chunk: - break - yield chunk - - -def normalize_path(path: str, resolve_symlinks: bool = True) -> str: - """ - Convert a path to its canonical, case-normalized, absolute version. - - """ - path = os.path.expanduser(path) - if resolve_symlinks: - path = os.path.realpath(path) - else: - path = os.path.abspath(path) - return os.path.normcase(path) - - -def splitext(path: str) -> Tuple[str, str]: - """Like os.path.splitext, but take off .tar too""" - base, ext = posixpath.splitext(path) - if base.lower().endswith(".tar"): - ext = base[-4:] + ext - base = base[:-4] - return base, ext - - -def renames(old: str, new: str) -> None: - """Like os.renames(), but handles renaming across devices.""" - # Implementation borrowed from os.renames(). - head, tail = os.path.split(new) - if head and tail and not os.path.exists(head): - os.makedirs(head) - - shutil.move(old, new) - - head, tail = os.path.split(old) - if head and tail: - try: - os.removedirs(head) - except OSError: - pass - - -def is_local(path: str) -> bool: - """ - Return True if path is within sys.prefix, if we're running in a virtualenv. - - If we're not in a virtualenv, all paths are considered "local." - - Caution: this function assumes the head of path has been normalized - with normalize_path. - """ - if not running_under_virtualenv(): - return True - return path.startswith(normalize_path(sys.prefix)) - - -def write_output(msg: Any, *args: Any) -> None: - logger.info(msg, *args) - - -class StreamWrapper(StringIO): - orig_stream: TextIO - - @classmethod - def from_stream(cls, orig_stream: TextIO) -> "StreamWrapper": - ret = cls() - ret.orig_stream = orig_stream - return ret - - # compileall.compile_dir() needs stdout.encoding to print to stdout - # type ignore is because TextIOBase.encoding is writeable - @property - def encoding(self) -> str: # type: ignore - return self.orig_stream.encoding - - -@contextlib.contextmanager -def captured_output(stream_name: str) -> Generator[StreamWrapper, None, None]: - """Return a context manager used by captured_stdout/stdin/stderr - that temporarily replaces the sys stream *stream_name* with a StringIO. - - Taken from Lib/support/__init__.py in the CPython repo. - """ - orig_stdout = getattr(sys, stream_name) - setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout)) - try: - yield getattr(sys, stream_name) - finally: - setattr(sys, stream_name, orig_stdout) - - -def captured_stdout() -> ContextManager[StreamWrapper]: - """Capture the output of sys.stdout: - - with captured_stdout() as stdout: - print('hello') - self.assertEqual(stdout.getvalue(), 'hello\n') - - Taken from Lib/support/__init__.py in the CPython repo. - """ - return captured_output("stdout") - - -def captured_stderr() -> ContextManager[StreamWrapper]: - """ - See captured_stdout(). - """ - return captured_output("stderr") - - -# Simulates an enum -def enum(*sequential: Any, **named: Any) -> Type[Any]: - enums = dict(zip(sequential, range(len(sequential))), **named) - reverse = {value: key for key, value in enums.items()} - enums["reverse_mapping"] = reverse - return type("Enum", (), enums) - - -def build_netloc(host: str, port: Optional[int]) -> str: - """ - Build a netloc from a host-port pair - """ - if port is None: - return host - if ":" in host: - # Only wrap host with square brackets when it is IPv6 - host = f"[{host}]" - return f"{host}:{port}" - - -def build_url_from_netloc(netloc: str, scheme: str = "https") -> str: - """ - Build a full URL from a netloc. - """ - if netloc.count(":") >= 2 and "@" not in netloc and "[" not in netloc: - # It must be a bare IPv6 address, so wrap it with brackets. - netloc = f"[{netloc}]" - return f"{scheme}://{netloc}" - - -def parse_netloc(netloc: str) -> Tuple[Optional[str], Optional[int]]: - """ - Return the host-port pair from a netloc. - """ - url = build_url_from_netloc(netloc) - parsed = urllib.parse.urlparse(url) - return parsed.hostname, parsed.port - - -def split_auth_from_netloc(netloc: str) -> NetlocTuple: - """ - Parse out and remove the auth information from a netloc. - - Returns: (netloc, (username, password)). - """ - if "@" not in netloc: - return netloc, (None, None) - - # Split from the right because that's how urllib.parse.urlsplit() - # behaves if more than one @ is present (which can be checked using - # the password attribute of urlsplit()'s return value). - auth, netloc = netloc.rsplit("@", 1) - pw: Optional[str] = None - if ":" in auth: - # Split from the left because that's how urllib.parse.urlsplit() - # behaves if more than one : is present (which again can be checked - # using the password attribute of the return value) - user, pw = auth.split(":", 1) - else: - user, pw = auth, None - - user = urllib.parse.unquote(user) - if pw is not None: - pw = urllib.parse.unquote(pw) - - return netloc, (user, pw) - - -def redact_netloc(netloc: str) -> str: - """ - Replace the sensitive data in a netloc with "****", if it exists. - - For example: - - "user:pass@example.com" returns "user:****@example.com" - - "accesstoken@example.com" returns "****@example.com" - """ - netloc, (user, password) = split_auth_from_netloc(netloc) - if user is None: - return netloc - if password is None: - user = "****" - password = "" - else: - user = urllib.parse.quote(user) - password = ":****" - return "{user}{password}@{netloc}".format( - user=user, password=password, netloc=netloc - ) - - -def _transform_url( - url: str, transform_netloc: Callable[[str], Tuple[Any, ...]] -) -> Tuple[str, NetlocTuple]: - """Transform and replace netloc in a url. - - transform_netloc is a function taking the netloc and returning a - tuple. The first element of this tuple is the new netloc. The - entire tuple is returned. - - Returns a tuple containing the transformed url as item 0 and the - original tuple returned by transform_netloc as item 1. - """ - purl = urllib.parse.urlsplit(url) - netloc_tuple = transform_netloc(purl.netloc) - # stripped url - url_pieces = (purl.scheme, netloc_tuple[0], purl.path, purl.query, purl.fragment) - surl = urllib.parse.urlunsplit(url_pieces) - return surl, cast("NetlocTuple", netloc_tuple) - - -def _get_netloc(netloc: str) -> NetlocTuple: - return split_auth_from_netloc(netloc) - - -def _redact_netloc(netloc: str) -> Tuple[str]: - return (redact_netloc(netloc),) - - -def split_auth_netloc_from_url( - url: str, -) -> Tuple[str, str, Tuple[Optional[str], Optional[str]]]: - """ - Parse a url into separate netloc, auth, and url with no auth. - - Returns: (url_without_auth, netloc, (username, password)) - """ - url_without_auth, (netloc, auth) = _transform_url(url, _get_netloc) - return url_without_auth, netloc, auth - - -def remove_auth_from_url(url: str) -> str: - """Return a copy of url with 'username:password@' removed.""" - # username/pass params are passed to subversion through flags - # and are not recognized in the url. - return _transform_url(url, _get_netloc)[0] - - -def redact_auth_from_url(url: str) -> str: - """Replace the password in a given url with ****.""" - return _transform_url(url, _redact_netloc)[0] - - -class HiddenText: - def __init__(self, secret: str, redacted: str) -> None: - self.secret = secret - self.redacted = redacted - - def __repr__(self) -> str: - return "".format(str(self)) - - def __str__(self) -> str: - return self.redacted - - # This is useful for testing. - def __eq__(self, other: Any) -> bool: - if type(self) != type(other): - return False - - # The string being used for redaction doesn't also have to match, - # just the raw, original string. - return self.secret == other.secret - - -def hide_value(value: str) -> HiddenText: - return HiddenText(value, redacted="****") - - -def hide_url(url: str) -> HiddenText: - redacted = redact_auth_from_url(url) - return HiddenText(url, redacted=redacted) - - -def protect_pip_from_modification_on_windows(modifying_pip: bool) -> None: - """Protection of pip.exe from modification on Windows - - On Windows, any operation modifying pip should be run as: - python -m pip ... - """ - pip_names = [ - "pip", - f"pip{sys.version_info.major}", - f"pip{sys.version_info.major}.{sys.version_info.minor}", - ] - - # See https://github.com/pypa/pip/issues/1299 for more discussion - should_show_use_python_msg = ( - modifying_pip and WINDOWS and os.path.basename(sys.argv[0]) in pip_names - ) - - if should_show_use_python_msg: - new_command = [sys.executable, "-m", "pip"] + sys.argv[1:] - raise CommandError( - "To modify pip, please run the following command:\n{}".format( - " ".join(new_command) - ) - ) - - -def check_externally_managed() -> None: - """Check whether the current environment is externally managed. - - If the ``EXTERNALLY-MANAGED`` config file is found, the current environment - is considered externally managed, and an ExternallyManagedEnvironment is - raised. - """ - if running_under_virtualenv(): - return - marker = os.path.join(sysconfig.get_path("stdlib"), "EXTERNALLY-MANAGED") - if not os.path.isfile(marker): - return - raise ExternallyManagedEnvironment.from_config(marker) - - -def is_console_interactive() -> bool: - """Is this console interactive?""" - return sys.stdin is not None and sys.stdin.isatty() - - -def hash_file(path: str, blocksize: int = 1 << 20) -> Tuple[Any, int]: - """Return (hash, length) for path using hashlib.sha256()""" - - h = hashlib.sha256() - length = 0 - with open(path, "rb") as f: - for block in read_chunks(f, size=blocksize): - length += len(block) - h.update(block) - return h, length - - -def pairwise(iterable: Iterable[Any]) -> Iterator[Tuple[Any, Any]]: - """ - Return paired elements. - - For example: - s -> (s0, s1), (s2, s3), (s4, s5), ... - """ - iterable = iter(iterable) - return zip_longest(iterable, iterable) - - -def partition( - pred: Callable[[T], bool], - iterable: Iterable[T], -) -> Tuple[Iterable[T], Iterable[T]]: - """ - Use a predicate to partition entries into false entries and true entries, - like - - partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9 - """ - t1, t2 = tee(iterable) - return filterfalse(pred, t1), filter(pred, t2) - - -class ConfiguredBuildBackendHookCaller(BuildBackendHookCaller): - def __init__( - self, - config_holder: Any, - source_dir: str, - build_backend: str, - backend_path: Optional[str] = None, - runner: Optional[Callable[..., None]] = None, - python_executable: Optional[str] = None, - ): - super().__init__( - source_dir, build_backend, backend_path, runner, python_executable - ) - self.config_holder = config_holder - - def build_wheel( - self, - wheel_directory: str, - config_settings: Optional[Dict[str, Union[str, List[str]]]] = None, - metadata_directory: Optional[str] = None, - ) -> str: - cs = self.config_holder.config_settings - return super().build_wheel( - wheel_directory, config_settings=cs, metadata_directory=metadata_directory - ) - - def build_sdist( - self, - sdist_directory: str, - config_settings: Optional[Dict[str, Union[str, List[str]]]] = None, - ) -> str: - cs = self.config_holder.config_settings - return super().build_sdist(sdist_directory, config_settings=cs) - - def build_editable( - self, - wheel_directory: str, - config_settings: Optional[Dict[str, Union[str, List[str]]]] = None, - metadata_directory: Optional[str] = None, - ) -> str: - cs = self.config_holder.config_settings - return super().build_editable( - wheel_directory, config_settings=cs, metadata_directory=metadata_directory - ) - - def get_requires_for_build_wheel( - self, config_settings: Optional[Dict[str, Union[str, List[str]]]] = None - ) -> List[str]: - cs = self.config_holder.config_settings - return super().get_requires_for_build_wheel(config_settings=cs) - - def get_requires_for_build_sdist( - self, config_settings: Optional[Dict[str, Union[str, List[str]]]] = None - ) -> List[str]: - cs = self.config_holder.config_settings - return super().get_requires_for_build_sdist(config_settings=cs) - - def get_requires_for_build_editable( - self, config_settings: Optional[Dict[str, Union[str, List[str]]]] = None - ) -> List[str]: - cs = self.config_holder.config_settings - return super().get_requires_for_build_editable(config_settings=cs) - - def prepare_metadata_for_build_wheel( - self, - metadata_directory: str, - config_settings: Optional[Dict[str, Union[str, List[str]]]] = None, - _allow_fallback: bool = True, - ) -> str: - cs = self.config_holder.config_settings - return super().prepare_metadata_for_build_wheel( - metadata_directory=metadata_directory, - config_settings=cs, - _allow_fallback=_allow_fallback, - ) - - def prepare_metadata_for_build_editable( - self, - metadata_directory: str, - config_settings: Optional[Dict[str, Union[str, List[str]]]] = None, - _allow_fallback: bool = True, - ) -> str: - cs = self.config_holder.config_settings - return super().prepare_metadata_for_build_editable( - metadata_directory=metadata_directory, - config_settings=cs, - _allow_fallback=_allow_fallback, - ) diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/command/build_clib.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/command/build_clib.py deleted file mode 100644 index b3f679b67da7c997478bd9ee8546682106b8be62..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/command/build_clib.py +++ /dev/null @@ -1,207 +0,0 @@ -"""distutils.command.build_clib - -Implements the Distutils 'build_clib' command, to build a C/C++ library -that is included in the module distribution and needed by an extension -module.""" - - -# XXX this module has *lots* of code ripped-off quite transparently from -# build_ext.py -- not surprisingly really, as the work required to build -# a static library from a collection of C source files is not really all -# that different from what's required to build a shared object file from -# a collection of C source files. Nevertheless, I haven't done the -# necessary refactoring to account for the overlap in code between the -# two modules, mainly because a number of subtle details changed in the -# cut 'n paste. Sigh. - -import os -from ..core import Command -from ..errors import DistutilsSetupError -from ..sysconfig import customize_compiler -from distutils._log import log - - -def show_compilers(): - from ..ccompiler import show_compilers - - show_compilers() - - -class build_clib(Command): - description = "build C/C++ libraries used by Python extensions" - - user_options = [ - ('build-clib=', 'b', "directory to build C/C++ libraries to"), - ('build-temp=', 't', "directory to put temporary build by-products"), - ('debug', 'g', "compile with debugging information"), - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ('compiler=', 'c', "specify the compiler type"), - ] - - boolean_options = ['debug', 'force'] - - help_options = [ - ('help-compiler', None, "list available compilers", show_compilers), - ] - - def initialize_options(self): - self.build_clib = None - self.build_temp = None - - # List of libraries to build - self.libraries = None - - # Compilation options for all libraries - self.include_dirs = None - self.define = None - self.undef = None - self.debug = None - self.force = 0 - self.compiler = None - - def finalize_options(self): - # This might be confusing: both build-clib and build-temp default - # to build-temp as defined by the "build" command. This is because - # I think that C libraries are really just temporary build - # by-products, at least from the point of view of building Python - # extensions -- but I want to keep my options open. - self.set_undefined_options( - 'build', - ('build_temp', 'build_clib'), - ('build_temp', 'build_temp'), - ('compiler', 'compiler'), - ('debug', 'debug'), - ('force', 'force'), - ) - - self.libraries = self.distribution.libraries - if self.libraries: - self.check_library_list(self.libraries) - - if self.include_dirs is None: - self.include_dirs = self.distribution.include_dirs or [] - if isinstance(self.include_dirs, str): - self.include_dirs = self.include_dirs.split(os.pathsep) - - # XXX same as for build_ext -- what about 'self.define' and - # 'self.undef' ? - - def run(self): - if not self.libraries: - return - - # Yech -- this is cut 'n pasted from build_ext.py! - from ..ccompiler import new_compiler - - self.compiler = new_compiler( - compiler=self.compiler, dry_run=self.dry_run, force=self.force - ) - customize_compiler(self.compiler) - - if self.include_dirs is not None: - self.compiler.set_include_dirs(self.include_dirs) - if self.define is not None: - # 'define' option is a list of (name,value) tuples - for name, value in self.define: - self.compiler.define_macro(name, value) - if self.undef is not None: - for macro in self.undef: - self.compiler.undefine_macro(macro) - - self.build_libraries(self.libraries) - - def check_library_list(self, libraries): - """Ensure that the list of libraries is valid. - - `library` is presumably provided as a command option 'libraries'. - This method checks that it is a list of 2-tuples, where the tuples - are (library_name, build_info_dict). - - Raise DistutilsSetupError if the structure is invalid anywhere; - just returns otherwise. - """ - if not isinstance(libraries, list): - raise DistutilsSetupError("'libraries' option must be a list of tuples") - - for lib in libraries: - if not isinstance(lib, tuple) and len(lib) != 2: - raise DistutilsSetupError("each element of 'libraries' must a 2-tuple") - - name, build_info = lib - - if not isinstance(name, str): - raise DistutilsSetupError( - "first element of each tuple in 'libraries' " - "must be a string (the library name)" - ) - - if '/' in name or (os.sep != '/' and os.sep in name): - raise DistutilsSetupError( - "bad library name '%s': " - "may not contain directory separators" % lib[0] - ) - - if not isinstance(build_info, dict): - raise DistutilsSetupError( - "second element of each tuple in 'libraries' " - "must be a dictionary (build info)" - ) - - def get_library_names(self): - # Assume the library list is valid -- 'check_library_list()' is - # called from 'finalize_options()', so it should be! - if not self.libraries: - return None - - lib_names = [] - for lib_name, build_info in self.libraries: - lib_names.append(lib_name) - return lib_names - - def get_source_files(self): - self.check_library_list(self.libraries) - filenames = [] - for lib_name, build_info in self.libraries: - sources = build_info.get('sources') - if sources is None or not isinstance(sources, (list, tuple)): - raise DistutilsSetupError( - "in 'libraries' option (library '%s'), " - "'sources' must be present and must be " - "a list of source filenames" % lib_name - ) - - filenames.extend(sources) - return filenames - - def build_libraries(self, libraries): - for lib_name, build_info in libraries: - sources = build_info.get('sources') - if sources is None or not isinstance(sources, (list, tuple)): - raise DistutilsSetupError( - "in 'libraries' option (library '%s'), " - "'sources' must be present and must be " - "a list of source filenames" % lib_name - ) - sources = list(sources) - - log.info("building '%s' library", lib_name) - - # First, compile the source code to object files in the library - # directory. (This should probably change to putting object - # files in a temporary build directory.) - macros = build_info.get('macros') - include_dirs = build_info.get('include_dirs') - objects = self.compiler.compile( - sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - ) - - # Now "link" the object files together into a static library. - # (On Unix at least, this isn't really linking -- it just - # builds an archive. Whatever.) - self.compiler.create_static_lib( - objects, lib_name, output_dir=self.build_clib, debug=self.debug - ) diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/monkey.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/monkey.py deleted file mode 100644 index 50653fc7ee41cd529c8413bd9b797ca801eb2dfa..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/monkey.py +++ /dev/null @@ -1,159 +0,0 @@ -""" -Monkey patching of distutils. -""" - -import sys -import distutils.filelist -import platform -import types -import functools -from importlib import import_module -import inspect - -import setuptools - -__all__ = [] -""" -Everything is private. Contact the project team -if you think you need this functionality. -""" - - -def _get_mro(cls): - """ - Returns the bases classes for cls sorted by the MRO. - - Works around an issue on Jython where inspect.getmro will not return all - base classes if multiple classes share the same name. Instead, this - function will return a tuple containing the class itself, and the contents - of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024. - """ - if platform.python_implementation() == "Jython": - return (cls,) + cls.__bases__ - return inspect.getmro(cls) - - -def get_unpatched(item): - lookup = ( - get_unpatched_class if isinstance(item, type) else - get_unpatched_function if isinstance(item, types.FunctionType) else - lambda item: None - ) - return lookup(item) - - -def get_unpatched_class(cls): - """Protect against re-patching the distutils if reloaded - - Also ensures that no other distutils extension monkeypatched the distutils - first. - """ - external_bases = ( - cls - for cls in _get_mro(cls) - if not cls.__module__.startswith('setuptools') - ) - base = next(external_bases) - if not base.__module__.startswith('distutils'): - msg = "distutils has already been patched by %r" % cls - raise AssertionError(msg) - return base - - -def patch_all(): - # we can't patch distutils.cmd, alas - distutils.core.Command = setuptools.Command - - has_issue_12885 = sys.version_info <= (3, 5, 3) - - if has_issue_12885: - # fix findall bug in distutils (http://bugs.python.org/issue12885) - distutils.filelist.findall = setuptools.findall - - needs_warehouse = ( - (3, 4) < sys.version_info < (3, 4, 6) - or - (3, 5) < sys.version_info <= (3, 5, 3) - ) - - if needs_warehouse: - warehouse = 'https://upload.pypi.org/legacy/' - distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse - - _patch_distribution_metadata() - - # Install Distribution throughout the distutils - for module in distutils.dist, distutils.core, distutils.cmd: - module.Distribution = setuptools.dist.Distribution - - # Install the patched Extension - distutils.core.Extension = setuptools.extension.Extension - distutils.extension.Extension = setuptools.extension.Extension - if 'distutils.command.build_ext' in sys.modules: - sys.modules['distutils.command.build_ext'].Extension = ( - setuptools.extension.Extension - ) - - patch_for_msvc_specialized_compiler() - - -def _patch_distribution_metadata(): - """Patch write_pkg_file and read_pkg_file for higher metadata standards""" - for attr in ('write_pkg_file', 'read_pkg_file', 'get_metadata_version'): - new_val = getattr(setuptools.dist, attr) - setattr(distutils.dist.DistributionMetadata, attr, new_val) - - -def patch_func(replacement, target_mod, func_name): - """ - Patch func_name in target_mod with replacement - - Important - original must be resolved by name to avoid - patching an already patched function. - """ - original = getattr(target_mod, func_name) - - # set the 'unpatched' attribute on the replacement to - # point to the original. - vars(replacement).setdefault('unpatched', original) - - # replace the function in the original module - setattr(target_mod, func_name, replacement) - - -def get_unpatched_function(candidate): - return getattr(candidate, 'unpatched') - - -def patch_for_msvc_specialized_compiler(): - """ - Patch functions in distutils to use standalone Microsoft Visual C++ - compilers. - """ - # import late to avoid circular imports on Python < 3.5 - msvc = import_module('setuptools.msvc') - - if platform.system() != 'Windows': - # Compilers only available on Microsoft Windows - return - - def patch_params(mod_name, func_name): - """ - Prepare the parameters for patch_func to patch indicated function. - """ - repl_prefix = 'msvc14_' - repl_name = repl_prefix + func_name.lstrip('_') - repl = getattr(msvc, repl_name) - mod = import_module(mod_name) - if not hasattr(mod, func_name): - raise ImportError(func_name) - return repl, mod, func_name - - # Python 3.5+ - msvc14 = functools.partial(patch_params, 'distutils._msvccompiler') - - try: - # Patch distutils._msvccompiler._get_vc_env - patch_func(*msvc14('_get_vc_env')) - except ImportError: - pass diff --git a/spaces/pragnakalp/Emotion_Detection/app.py b/spaces/pragnakalp/Emotion_Detection/app.py deleted file mode 100644 index 8b4dcc348678259a75ec72d94af2d5a98cdb41a4..0000000000000000000000000000000000000000 --- a/spaces/pragnakalp/Emotion_Detection/app.py +++ /dev/null @@ -1,178 +0,0 @@ -import os -import gc -import csv -import socket -import json -import huggingface_hub -import requests - -import re as r -import gradio as gr -import pandas as pd - -from huggingface_hub import Repository -from urllib.request import urlopen -from transformers import AutoTokenizer, AutoModelWithLMHead - -## connection with HF datasets -HF_TOKEN = os.environ.get("HF_TOKEN") -# DATASET_NAME = "emotion_detection_dataset" -# DATASET_REPO_URL = f"https://huggingface.co/datasets/pragnakalp/{DATASET_NAME}" -DATASET_REPO_URL = "https://huggingface.co/datasets/pragnakalp/emotion_detection_dataset" -DATA_FILENAME = "emotion_detection_logs.csv" -DATA_FILE = os.path.join("emotion_detection_logs", DATA_FILENAME) -DATASET_REPO_ID = "pragnakalp/emotion_detection_dataset" -print("is none?", HF_TOKEN is None) -try: - hf_hub_download( - repo_id=DATASET_REPO_ID, - filename=DATA_FILENAME, - cache_dir=DATA_DIRNAME, - force_filename=DATA_FILENAME - ) - -except: - print("file not found") - -repo = Repository( - local_dir="emotion_detection_logs", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN -) - -SENTENCES_VALUE = """Raj loves Simran.\nLast year I lost my Dog.\nI bought a new phone!\nShe is scared of cockroaches.\nWow! I was not expecting that.\nShe got mad at him.""" -## load model -cwd = os.getcwd() -model_path = os.path.join(cwd) -tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-emotion") -model_base = AutoModelWithLMHead.from_pretrained(model_path) - -def getIP(): - ip_address = '' - try: - d = str(urlopen('http://checkip.dyndns.com/') - .read()) - - return r.compile(r'Address: (\d+\.\d+\.\d+\.\d+)').search(d).group(1) - except Exception as e: - print("Error while getting IP address -->",e) - return ip_address - -def get_location(ip_addr): - location = {} - try: - ip=ip_addr - - req_data={ - "ip":ip, - "token":"pkml123" - } - url = "https://demos.pragnakalp.com/get-ip-location" - - # req_data=json.dumps(req_data) - # print("req_data",req_data) - headers = {'Content-Type': 'application/json'} - - response = requests.request("POST", url, headers=headers, data=json.dumps(req_data)) - response = response.json() - print("response======>>",response) - return response - except Exception as e: - print("Error while getting location -->",e) - return location - - -""" -generate emotions of the sentences -""" -def get_emotion(text): - - # input_ids = tokenizer.encode(text + '
', return_tensors='pt') - input_ids = tokenizer.encode(text, return_tensors='pt') - output = model_base.generate(input_ids=input_ids, - max_length=2) - - dec = [tokenizer.decode(ids) for ids in output] - label = dec[0] - gc.collect() - return label - -def generate_emotion(article): - table = {'Input':[], 'Detected Emotion':[]} - if article.strip(): - sen_list = article - sen_list = sen_list.split('\n') - while("" in sen_list): - sen_list.remove("") - sen_list_temp = sen_list[0:] - print(sen_list_temp) - results_dict = [] - results = [] - - for sen in sen_list_temp: - if(sen.strip()): - cur_result = get_emotion(sen) - - results.append(cur_result) - results_dict.append( - { - 'sentence': sen, - 'emotion': cur_result - } - ) - - table = {'Input':sen_list_temp, 'Detected Emotion':results} - gc.collect() - save_data_and_sendmail(article,results_dict,sen_list, results) - return pd.DataFrame(table) - else: - raise gr.Error("Please enter text in inputbox!!!!") - -""" -Save generated details -""" -def save_data_and_sendmail(article,results_dict,sen_list,results): - try: - - ip_address= getIP() - print(ip_address) - location = get_location(ip_address) - print(location) - - add_csv = [article,results_dict,ip_address,location] - with open(DATA_FILE, "a") as f: - writer = csv.writer(f) - # write the data - writer.writerow(add_csv) - commit_url = repo.push_to_hub() - print("commit data :",commit_url) - - url = 'https://pragnakalpdev33.pythonanywhere.com/HF_space_emotion_detection_demo' - # url = 'https://pragnakalpdev35.pythonanywhere.com/HF_space_emotion_detection' - - myobj = {"sentences":sen_list,"gen_results":results,"ip_addr":ip_address,'loc':location} - response = requests.post(url, json = myobj) - print("response=-----=",response.status_code) - - except Exception as e: - return "Error while sending mail" + str(e) - - return "Successfully save data" - -""" -UI design for demo using gradio app -""" -inputs = gr.Textbox(value=SENTENCES_VALUE,lines=3, label="Sentences",elem_id="inp_div") -outputs = [gr.Dataframe(row_count = (3, "dynamic"), col_count=(2, "fixed"), label="Here is the Result", headers=["Input","Detected Emotion"],wrap=True)] - -demo = gr.Interface( - generate_emotion, - inputs, - outputs, - title="Emotion Detection", - css=".gradio-container {background-color: lightgray} #inp_div {background-color: #FB3D5;}", - article="""

Provide us your feedback on this demo and feel free - to contact us at letstalk@pragnakalp.com if you want to have your own Emotion Detection system. - We will be happy to serve you for your requirement. And don't forget to check out more interesting - NLP services we are offering.

-

Developed by: Pragnakalp Techlabs

""" -) -demo.launch() \ No newline at end of file diff --git a/spaces/prerna9811/Chord/portaudio/pablio/test_w_saw8.c b/spaces/prerna9811/Chord/portaudio/pablio/test_w_saw8.c deleted file mode 100644 index 70686c19b200957e72051c177e1c599acda0bc53..0000000000000000000000000000000000000000 --- a/spaces/prerna9811/Chord/portaudio/pablio/test_w_saw8.c +++ /dev/null @@ -1,112 +0,0 @@ -/* - * $Id$ - * test_w_saw8.c - * Generate stereo 8 bit sawtooth waveforms. - * - * Author: Phil Burk, http://www.softsynth.com - * - * This program uses PABLIO, the Portable Audio Blocking I/O Library. - * PABLIO is built on top of PortAudio, the Portable Audio Library. - * - * For more information see: http://www.portaudio.com - * Copyright (c) 1999-2000 Ross Bencina and Phil Burk - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -#include -#include -#include -#include "pablio.h" -#include - -#define SAMPLE_RATE (22050) -#define NUM_SECONDS (6) -#define SAMPLES_PER_FRAME (2) - - -#define FRAMES_PER_BLOCK (100) - -unsigned char samples[FRAMES_PER_BLOCK][SAMPLES_PER_FRAME]; -unsigned char phases[SAMPLES_PER_FRAME]; - -/*******************************************************************/ -int main(void); -int main(void) -{ - int i,j; - PaError err; - PABLIO_Stream *aOutStream; - - printf("Generate unsigned 8 bit sawtooth waves using PABLIO.\n"); - fflush(stdout); - - /* Open simplified blocking I/O layer on top of PortAudio. */ - err = OpenAudioStream( &aOutStream, SAMPLE_RATE, paUInt8, - (PABLIO_WRITE | PABLIO_STEREO) ); - if( err != paNoError ) goto error; - - /* Initialize oscillator phases to "ground" level for paUInt8. */ - phases[0] = 128; - phases[1] = 128; - - for( i=0; i<(NUM_SECONDS * SAMPLE_RATE); i += FRAMES_PER_BLOCK ) - { - /* Generate sawtooth waveforms in a block for efficiency. */ - for( j=0; j - -#if (defined(_WIN32) && (defined(_MSC_VER) && (_MSC_VER >= 1200))) /* MSC version 6 and above */ -#pragma comment( lib, "setupapi.lib" ) -#endif - -/* Debugging/tracing support */ - -#define PA_LOGE_ -#define PA_LOGL_ - -#ifdef __GNUC__ -#include -#define _WIN32_WINNT 0x0501 -#define WINVER 0x0501 -#endif - -#include /* strlen() */ -#include -#include /* iswspace() */ - -#include "pa_util.h" -#include "pa_allocation.h" -#include "pa_hostapi.h" -#include "pa_stream.h" -#include "pa_cpuload.h" -#include "pa_process.h" -#include "portaudio.h" -#include "pa_debugprint.h" -#include "pa_memorybarrier.h" -#include "pa_ringbuffer.h" -#include "pa_trace.h" -#include "pa_win_waveformat.h" - -#include "pa_win_wdmks.h" - -#ifndef DRV_QUERYDEVICEINTERFACE -#define DRV_QUERYDEVICEINTERFACE (DRV_RESERVED + 12) -#endif -#ifndef DRV_QUERYDEVICEINTERFACESIZE -#define DRV_QUERYDEVICEINTERFACESIZE (DRV_RESERVED + 13) -#endif - -#include -#ifndef __GNUC__ /* Fix for ticket #257: MinGW-w64: Inclusion of triggers multiple redefinition errors. */ -#include -#endif -#include - -#include - -#ifdef _MSC_VER -#define snprintf _snprintf -#define vsnprintf _vsnprintf -#endif - -/* The PA_HP_TRACE macro is used in RT parts, so it can be switched off without affecting -the rest of the debug tracing */ -#if 1 -#define PA_HP_TRACE(x) PaUtil_AddHighSpeedLogMessage x ; -#else -#define PA_HP_TRACE(x) -#endif - -/* A define that selects whether the resulting pin names are chosen from pin category -instead of the available pin names, who sometimes can be quite cheesy, like "Volume control". -Default is to use the pin category. -*/ -#ifndef PA_WDMKS_USE_CATEGORY_FOR_PIN_NAMES -#define PA_WDMKS_USE_CATEGORY_FOR_PIN_NAMES 1 -#endif - -#ifdef __GNUC__ -#undef PA_LOGE_ -#define PA_LOGE_ PA_DEBUG(("%s {\n",__FUNCTION__)) -#undef PA_LOGL_ -#define PA_LOGL_ PA_DEBUG(("} %s\n",__FUNCTION__)) -/* These defines are set in order to allow the WIndows DirectX -* headers to compile with a GCC compiler such as MinGW -* NOTE: The headers may generate a few warning in GCC, but -* they should compile */ -#define _INC_MMSYSTEM -#define _INC_MMREG -#define _NTRTL_ /* Turn off default definition of DEFINE_GUIDEX */ -#define DEFINE_GUID_THUNK(name,guid) DEFINE_GUID(name,guid) -#define DEFINE_GUIDEX(n) DEFINE_GUID_THUNK( n, STATIC_##n ) -#if !defined( DEFINE_WAVEFORMATEX_GUID ) -#define DEFINE_WAVEFORMATEX_GUID(x) (USHORT)(x), 0x0000, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71 -#endif -#define WAVE_FORMAT_ADPCM 0x0002 -#define WAVE_FORMAT_IEEE_FLOAT 0x0003 -#define WAVE_FORMAT_ALAW 0x0006 -#define WAVE_FORMAT_MULAW 0x0007 -#define WAVE_FORMAT_MPEG 0x0050 -#define WAVE_FORMAT_DRM 0x0009 -#define DYNAMIC_GUID_THUNK(l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) {l,w1,w2,{b1,b2,b3,b4,b5,b6,b7,b8}} -#define DYNAMIC_GUID(data) DYNAMIC_GUID_THUNK(data) -#endif - -/* use CreateThread for CYGWIN/Windows Mobile, _beginthreadex for all others */ -#if !defined(__CYGWIN__) && !defined(_WIN32_WCE) -#define CREATE_THREAD_FUNCTION (HANDLE)_beginthreadex -#define PA_THREAD_FUNC static unsigned WINAPI -#else -#define CREATE_THREAD_FUNCTION CreateThread -#define PA_THREAD_FUNC static DWORD WINAPI -#endif - -#ifdef _MSC_VER -#define NOMMIDS -#define DYNAMIC_GUID(data) {data} -#define _NTRTL_ /* Turn off default definition of DEFINE_GUIDEX */ -#undef DEFINE_GUID -#if defined(__clang__) || (defined(_MSVC_TRADITIONAL) && !_MSVC_TRADITIONAL) /* clang-cl and new msvc preprocessor: avoid too many arguments error */ - #define DEFINE_GUID(n, ...) EXTERN_C const GUID n = {__VA_ARGS__} - #define DEFINE_GUID_THUNK(n, ...) DEFINE_GUID(n, __VA_ARGS__) - #define DEFINE_GUIDEX(n) DEFINE_GUID_THUNK(n, STATIC_##n) -#else - #define DEFINE_GUID(n, data) EXTERN_C const GUID n = {data} - #define DEFINE_GUID_THUNK(n, data) DEFINE_GUID(n, data) - #define DEFINE_GUIDEX(n) DEFINE_GUID_THUNK(n, STATIC_##n) -#endif /* __clang__, !_MSVC_TRADITIONAL */ -#endif - -#include - -#ifndef EXTERN_C -#define EXTERN_C extern -#endif - -#if defined(__GNUC__) - -/* For MinGW we reference mingw-include files supplied with WASAPI */ -#define WINBOOL BOOL - -#include "../wasapi/mingw-include/ks.h" -#include "../wasapi/mingw-include/ksmedia.h" - -#else - -#include -#include - -/* Note that Windows SDK V6.0A or later is needed for WaveRT specific structs to be present in - ksmedia.h. Also make sure that the SDK include path is before other include paths (that may contain - an "old" ksmedia.h), so the proper ksmedia.h is used */ -#include - -#endif - -#include -#include - -/* These next definitions allow the use of the KSUSER DLL */ -typedef /*KSDDKAPI*/ DWORD WINAPI KSCREATEPIN(HANDLE, PKSPIN_CONNECT, ACCESS_MASK, PHANDLE); -extern HMODULE DllKsUser; -extern KSCREATEPIN* FunctionKsCreatePin; - -/* These definitions allows the use of AVRT.DLL on Vista and later OSs */ -typedef enum _PA_AVRT_PRIORITY -{ - PA_AVRT_PRIORITY_LOW = -1, - PA_AVRT_PRIORITY_NORMAL, - PA_AVRT_PRIORITY_HIGH, - PA_AVRT_PRIORITY_CRITICAL -} PA_AVRT_PRIORITY, *PPA_AVRT_PRIORITY; - -typedef struct -{ - HINSTANCE hInstance; - - HANDLE (WINAPI *AvSetMmThreadCharacteristics) (LPCSTR, LPDWORD); - BOOL (WINAPI *AvRevertMmThreadCharacteristics) (HANDLE); - BOOL (WINAPI *AvSetMmThreadPriority) (HANDLE, PA_AVRT_PRIORITY); -} PaWinWDMKSAvRtEntryPoints; - -static PaWinWDMKSAvRtEntryPoints paWinWDMKSAvRtEntryPoints = {0}; - -/* An unspecified channel count (-1) is not treated correctly, so we replace it with -* an arbitrarily large number */ -#define MAXIMUM_NUMBER_OF_CHANNELS 256 - -/* Forward definition to break circular type reference between pin and filter */ -struct __PaWinWdmFilter; -typedef struct __PaWinWdmFilter PaWinWdmFilter; - -struct __PaWinWdmPin; -typedef struct __PaWinWdmPin PaWinWdmPin; - -struct __PaWinWdmStream; -typedef struct __PaWinWdmStream PaWinWdmStream; - -/* Function prototype for getting audio position */ -typedef PaError (*FunctionGetPinAudioPosition)(PaWinWdmPin*, unsigned long*); - -/* Function prototype for memory barrier */ -typedef void (*FunctionMemoryBarrier)(void); - -struct __PaProcessThreadInfo; -typedef struct __PaProcessThreadInfo PaProcessThreadInfo; - -typedef PaError (*FunctionPinHandler)(PaProcessThreadInfo* pInfo, unsigned eventIndex); - -typedef enum __PaStreamStartEnum -{ - StreamStart_kOk, - StreamStart_kFailed, - StreamStart_kCnt -} PaStreamStartEnum; - -/* Multiplexed input structure. -* Very often several physical inputs are multiplexed through a MUX node (represented in the topology filter) */ -typedef struct __PaWinWdmMuxedInput -{ - wchar_t friendlyName[MAX_PATH]; - ULONG muxPinId; - ULONG muxNodeId; - ULONG endpointPinId; -} PaWinWdmMuxedInput; - -/* The Pin structure -* A pin is an input or output node, e.g. for audio flow */ -struct __PaWinWdmPin -{ - HANDLE handle; - PaWinWdmMuxedInput** inputs; - unsigned inputCount; - wchar_t friendlyName[MAX_PATH]; - - PaWinWdmFilter* parentFilter; - PaWDMKSSubType pinKsSubType; - unsigned long pinId; - unsigned long endpointPinId; /* For output pins */ - KSPIN_CONNECT* pinConnect; - unsigned long pinConnectSize; - KSDATAFORMAT_WAVEFORMATEX* ksDataFormatWfx; - KSPIN_COMMUNICATION communication; - KSDATARANGE* dataRanges; - KSMULTIPLE_ITEM* dataRangesItem; - KSPIN_DATAFLOW dataFlow; - KSPIN_CINSTANCES instances; - unsigned long frameSize; - int maxChannels; - unsigned long formats; - int defaultSampleRate; - ULONG *positionRegister; /* WaveRT */ - ULONG hwLatency; /* WaveRT */ - FunctionMemoryBarrier fnMemBarrier; /* WaveRT */ - FunctionGetPinAudioPosition fnAudioPosition; /* WaveRT */ - FunctionPinHandler fnEventHandler; - FunctionPinHandler fnSubmitHandler; -}; - -/* The Filter structure -* A filter has a number of pins and a "friendly name" */ -struct __PaWinWdmFilter -{ - HANDLE handle; - PaWinWDMKSDeviceInfo devInfo; /* This will hold information that is exposed in PaDeviceInfo */ - - DWORD deviceNode; - int pinCount; - PaWinWdmPin** pins; - PaWinWdmFilter* topologyFilter; - wchar_t friendlyName[MAX_PATH]; - int validPinCount; - int usageCount; - KSMULTIPLE_ITEM* connections; - KSMULTIPLE_ITEM* nodes; - int filterRefCount; -}; - - -typedef struct __PaWinWdmDeviceInfo -{ - PaDeviceInfo inheritedDeviceInfo; - char compositeName[MAX_PATH]; /* Composite name consists of pin name + device name in utf8 */ - PaWinWdmFilter* filter; - unsigned long pin; - int muxPosition; /* Used only for input devices */ - int endpointPinId; -} -PaWinWdmDeviceInfo; - -/* PaWinWdmHostApiRepresentation - host api datastructure specific to this implementation */ -typedef struct __PaWinWdmHostApiRepresentation -{ - PaUtilHostApiRepresentation inheritedHostApiRep; - PaUtilStreamInterface callbackStreamInterface; - PaUtilStreamInterface blockingStreamInterface; - - PaUtilAllocationGroup* allocations; - int deviceCount; -} -PaWinWdmHostApiRepresentation; - -typedef struct __DATAPACKET -{ - KSSTREAM_HEADER Header; - OVERLAPPED Signal; -} DATAPACKET; - -typedef struct __PaIOPacket -{ - DATAPACKET* packet; - unsigned startByte; - unsigned lengthBytes; -} PaIOPacket; - -typedef struct __PaWinWdmIOInfo -{ - PaWinWdmPin* pPin; - char* hostBuffer; - unsigned hostBufferSize; - unsigned framesPerBuffer; - unsigned bytesPerFrame; - unsigned bytesPerSample; - unsigned noOfPackets; /* Only used in WaveCyclic */ - HANDLE *events; /* noOfPackets handles (WaveCyclic) 1 (WaveRT) */ - DATAPACKET *packets; /* noOfPackets packets (WaveCyclic) 2 (WaveRT) */ - /* WaveRT polled mode */ - unsigned lastPosition; - unsigned pollCntr; -} PaWinWdmIOInfo; - -/* PaWinWdmStream - a stream data structure specifically for this implementation */ -struct __PaWinWdmStream -{ - PaUtilStreamRepresentation streamRepresentation; - PaWDMKSSpecificStreamInfo hostApiStreamInfo; /* This holds info that is exposed through PaStreamInfo */ - PaUtilCpuLoadMeasurer cpuLoadMeasurer; - PaUtilBufferProcessor bufferProcessor; - -#if PA_TRACE_REALTIME_EVENTS - LogHandle hLog; -#endif - - PaUtilAllocationGroup* allocGroup; - PaWinWdmIOInfo capture; - PaWinWdmIOInfo render; - int streamStarted; - int streamActive; - int streamStop; - int streamAbort; - int oldProcessPriority; - HANDLE streamThread; - HANDLE eventAbort; - HANDLE eventStreamStart[StreamStart_kCnt]; /* 0 = OK, 1 = Failed */ - PaError threadResult; - PaStreamFlags streamFlags; - - /* Capture ring buffer */ - PaUtilRingBuffer ringBuffer; - char* ringBufferData; - - /* These values handle the case where the user wants to use fewer - * channels than the device has */ - int userInputChannels; - int deviceInputChannels; - int userOutputChannels; - int deviceOutputChannels; -}; - -/* Gather all processing variables in a struct */ -struct __PaProcessThreadInfo -{ - PaWinWdmStream *stream; - PaStreamCallbackTimeInfo ti; - PaStreamCallbackFlags underover; - int cbResult; - volatile int pending; - volatile int priming; - volatile int pinsStarted; - unsigned long timeout; - unsigned captureHead; - unsigned captureTail; - unsigned renderHead; - unsigned renderTail; - PaIOPacket capturePackets[4]; - PaIOPacket renderPackets[4]; -}; - -/* Used for transferring device infos during scanning / rescanning */ -typedef struct __PaWinWDMScanDeviceInfosResults -{ - PaDeviceInfo **deviceInfos; - PaDeviceIndex defaultInputDevice; - PaDeviceIndex defaultOutputDevice; -} PaWinWDMScanDeviceInfosResults; - -static const unsigned cPacketsArrayMask = 3; - -HMODULE DllKsUser = NULL; -KSCREATEPIN* FunctionKsCreatePin = NULL; - -/* prototypes for functions declared in this file */ - -#ifdef __cplusplus -extern "C" -{ -#endif /* __cplusplus */ - - PaError PaWinWdm_Initialize( PaUtilHostApiRepresentation **hostApi, PaHostApiIndex index ); - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -/* Low level I/O functions */ -static PaError WdmSyncIoctl(HANDLE handle, - unsigned long ioctlNumber, - void* inBuffer, - unsigned long inBufferCount, - void* outBuffer, - unsigned long outBufferCount, - unsigned long* bytesReturned); - -static PaError WdmGetPropertySimple(HANDLE handle, - const GUID* const guidPropertySet, - unsigned long property, - void* value, - unsigned long valueCount); - -static PaError WdmSetPropertySimple(HANDLE handle, - const GUID* const guidPropertySet, - unsigned long property, - void* value, - unsigned long valueCount, - void* instance, - unsigned long instanceCount); - -static PaError WdmGetPinPropertySimple(HANDLE handle, - unsigned long pinId, - const GUID* const guidPropertySet, - unsigned long property, - void* value, - unsigned long valueCount, - unsigned long* byteCount); - -static PaError WdmGetPinPropertyMulti(HANDLE handle, - unsigned long pinId, - const GUID* const guidPropertySet, - unsigned long property, - KSMULTIPLE_ITEM** ksMultipleItem); - -static PaError WdmGetPropertyMulti(HANDLE handle, - const GUID* const guidPropertySet, - unsigned long property, - KSMULTIPLE_ITEM** ksMultipleItem); - -static PaError WdmSetMuxNodeProperty(HANDLE handle, - ULONG nodeId, - ULONG pinId); - - -/** Pin management functions */ -static PaWinWdmPin* PinNew(PaWinWdmFilter* parentFilter, unsigned long pinId, PaError* error); -static void PinFree(PaWinWdmPin* pin); -static void PinClose(PaWinWdmPin* pin); -static PaError PinInstantiate(PaWinWdmPin* pin); -/*static PaError PinGetState(PaWinWdmPin* pin, KSSTATE* state); NOT USED */ -static PaError PinSetState(PaWinWdmPin* pin, KSSTATE state); -static PaError PinSetFormat(PaWinWdmPin* pin, const WAVEFORMATEX* format); -static PaError PinIsFormatSupported(PaWinWdmPin* pin, const WAVEFORMATEX* format); -/* WaveRT support */ -static PaError PinQueryNotificationSupport(PaWinWdmPin* pPin, BOOL* pbResult); -static PaError PinGetBuffer(PaWinWdmPin* pPin, void** pBuffer, DWORD* pRequestedBufSize, BOOL* pbCallMemBarrier); -static PaError PinRegisterPositionRegister(PaWinWdmPin* pPin); -static PaError PinRegisterNotificationHandle(PaWinWdmPin* pPin, HANDLE handle); -static PaError PinUnregisterNotificationHandle(PaWinWdmPin* pPin, HANDLE handle); -static PaError PinGetHwLatency(PaWinWdmPin* pPin, ULONG* pFifoSize, ULONG* pChipsetDelay, ULONG* pCodecDelay); -static PaError PinGetAudioPositionMemoryMapped(PaWinWdmPin* pPin, ULONG* pPosition); -static PaError PinGetAudioPositionViaIOCTLRead(PaWinWdmPin* pPin, ULONG* pPosition); -static PaError PinGetAudioPositionViaIOCTLWrite(PaWinWdmPin* pPin, ULONG* pPosition); - -/* Filter management functions */ -static PaWinWdmFilter* FilterNew(PaWDMKSType type, DWORD devNode, const wchar_t* filterName, const wchar_t* friendlyName, PaError* error); -static PaError FilterInitializePins(PaWinWdmFilter* filter); -static void FilterFree(PaWinWdmFilter* filter); -static void FilterAddRef(PaWinWdmFilter* filter); -static PaWinWdmPin* FilterCreatePin( - PaWinWdmFilter* filter, - int pinId, - const WAVEFORMATEX* wfex, - PaError* error); -static PaError FilterUse(PaWinWdmFilter* filter); -static void FilterRelease(PaWinWdmFilter* filter); - -/* Hot plug functions */ -static BOOL IsDeviceTheSame(const PaWinWdmDeviceInfo* pDev1, - const PaWinWdmDeviceInfo* pDev2); - -/* Interface functions */ -static void Terminate( struct PaUtilHostApiRepresentation *hostApi ); -static PaError IsFormatSupported( -struct PaUtilHostApiRepresentation *hostApi, - const PaStreamParameters *inputParameters, - const PaStreamParameters *outputParameters, - double sampleRate ); - -static PaError ScanDeviceInfos( struct PaUtilHostApiRepresentation *hostApi, PaHostApiIndex index, void **newDeviceInfos, int *newDeviceCount ); -static PaError CommitDeviceInfos( struct PaUtilHostApiRepresentation *hostApi, PaHostApiIndex index, void *deviceInfos, int deviceCount ); -static PaError DisposeDeviceInfos( struct PaUtilHostApiRepresentation *hostApi, void *deviceInfos, int deviceCount ); - -static PaError OpenStream( -struct PaUtilHostApiRepresentation *hostApi, - PaStream** s, - const PaStreamParameters *inputParameters, - const PaStreamParameters *outputParameters, - double sampleRate, - unsigned long framesPerBuffer, - PaStreamFlags streamFlags, - PaStreamCallback *streamCallback, - void *userData ); -static PaError CloseStream( PaStream* stream ); -static PaError StartStream( PaStream *stream ); -static PaError StopStream( PaStream *stream ); -static PaError AbortStream( PaStream *stream ); -static PaError IsStreamStopped( PaStream *s ); -static PaError IsStreamActive( PaStream *stream ); -static PaTime GetStreamTime( PaStream *stream ); -static double GetStreamCpuLoad( PaStream* stream ); -static PaError ReadStream( - PaStream* stream, - void *buffer, - unsigned long frames ); -static PaError WriteStream( - PaStream* stream, - const void *buffer, - unsigned long frames ); -static signed long GetStreamReadAvailable( PaStream* stream ); -static signed long GetStreamWriteAvailable( PaStream* stream ); - -/* Utility functions */ -static unsigned long GetWfexSize(const WAVEFORMATEX* wfex); -static PaWinWdmFilter** BuildFilterList(int* filterCount, int* noOfPaDevices, PaError* result); -static BOOL PinWrite(HANDLE h, DATAPACKET* p); -static BOOL PinRead(HANDLE h, DATAPACKET* p); -static void DuplicateFirstChannelInt16(void* buffer, int channels, int samples); -static void DuplicateFirstChannelInt24(void* buffer, int channels, int samples); -PA_THREAD_FUNC ProcessingThread(void*); - -/* Pin handler functions */ -static PaError PaPinCaptureEventHandler_WaveCyclic(PaProcessThreadInfo* pInfo, unsigned eventIndex); -static PaError PaPinCaptureSubmitHandler_WaveCyclic(PaProcessThreadInfo* pInfo, unsigned eventIndex); - -static PaError PaPinRenderEventHandler_WaveCyclic(PaProcessThreadInfo* pInfo, unsigned eventIndex); -static PaError PaPinRenderSubmitHandler_WaveCyclic(PaProcessThreadInfo* pInfo, unsigned eventIndex); - -static PaError PaPinCaptureEventHandler_WaveRTEvent(PaProcessThreadInfo* pInfo, unsigned eventIndex); -static PaError PaPinCaptureEventHandler_WaveRTPolled(PaProcessThreadInfo* pInfo, unsigned eventIndex); -static PaError PaPinCaptureSubmitHandler_WaveRTEvent(PaProcessThreadInfo* pInfo, unsigned eventIndex); -static PaError PaPinCaptureSubmitHandler_WaveRTPolled(PaProcessThreadInfo* pInfo, unsigned eventIndex); - -static PaError PaPinRenderEventHandler_WaveRTEvent(PaProcessThreadInfo* pInfo, unsigned eventIndex); -static PaError PaPinRenderEventHandler_WaveRTPolled(PaProcessThreadInfo* pInfo, unsigned eventIndex); -static PaError PaPinRenderSubmitHandler_WaveRTEvent(PaProcessThreadInfo* pInfo, unsigned eventIndex); -static PaError PaPinRenderSubmitHandler_WaveRTPolled(PaProcessThreadInfo* pInfo, unsigned eventIndex); - -/* Function bodies */ - -#if defined(_DEBUG) && defined(PA_ENABLE_DEBUG_OUTPUT) -#define PA_WDMKS_SET_TREF -static PaTime tRef = 0; - -static void PaWinWdmDebugPrintf(const char* fmt, ...) -{ - va_list list; - char buffer[1024]; - PaTime t = PaUtil_GetTime() - tRef; - va_start(list, fmt); - _vsnprintf(buffer, 1023, fmt, list); - va_end(list); - PaUtil_DebugPrint("%6.3lf: %s", t, buffer); -} - -#ifdef PA_DEBUG -#undef PA_DEBUG -#define PA_DEBUG(x) PaWinWdmDebugPrintf x ; -#endif -#endif - -static BOOL IsDeviceTheSame(const PaWinWdmDeviceInfo* pDev1, - const PaWinWdmDeviceInfo* pDev2) -{ - if (pDev1 == NULL || pDev2 == NULL) - return FALSE; - - if (pDev1 == pDev2) - return TRUE; - - if (strcmp(pDev1->compositeName, pDev2->compositeName) == 0) - return TRUE; - - return FALSE; -} - -static BOOL IsEarlierThanVista() -{ -/* -NOTE: GetVersionEx() is deprecated as of Windows 8.1 and can not be used to reliably detect -versions of Windows higher than Windows 8 (due to manifest requirements for reporting higher versions). -Microsoft recommends switching to VerifyVersionInfo (available on Win 2k and later), however GetVersionEx -is faster, for now we just disable the deprecation warning. -See: https://msdn.microsoft.com/en-us/library/windows/desktop/ms724451(v=vs.85).aspx -See: http://www.codeproject.com/Articles/678606/Part-Overcoming-Windows-s-deprecation-of-GetVe -*/ -#pragma warning (disable : 4996) /* use of GetVersionEx */ - - OSVERSIONINFO osvi; - osvi.dwOSVersionInfoSize = sizeof(osvi); - if (GetVersionEx(&osvi) && osvi.dwMajorVersion<6) - { - return TRUE; - } - return FALSE; - -#pragma warning (default : 4996) -} - - - -static void MemoryBarrierDummy(void) -{ - /* Do nothing */ -} - -static void MemoryBarrierRead(void) -{ - PaUtil_ReadMemoryBarrier(); -} - -static void MemoryBarrierWrite(void) -{ - PaUtil_WriteMemoryBarrier(); -} - -static unsigned long GetWfexSize(const WAVEFORMATEX* wfex) -{ - if( wfex->wFormatTag == WAVE_FORMAT_PCM ) - { - return sizeof( WAVEFORMATEX ); - } - else - { - return (sizeof( WAVEFORMATEX ) + wfex->cbSize); - } -} - -static void PaWinWDM_SetLastErrorInfo(long errCode, const char* fmt, ...) -{ - va_list list; - char buffer[1024]; - va_start(list, fmt); - _vsnprintf(buffer, 1023, fmt, list); - va_end(list); - PaUtil_SetLastHostErrorInfo(paWDMKS, errCode, buffer); -} - -/* -Low level pin/filter access functions -*/ -static PaError WdmSyncIoctl( - HANDLE handle, - unsigned long ioctlNumber, - void* inBuffer, - unsigned long inBufferCount, - void* outBuffer, - unsigned long outBufferCount, - unsigned long* bytesReturned) -{ - PaError result = paNoError; - unsigned long dummyBytesReturned = 0; - BOOL bRes; - - if( !bytesReturned ) - { - /* Use a dummy as the caller hasn't supplied one */ - bytesReturned = &dummyBytesReturned; - } - - bRes = DeviceIoControl(handle, ioctlNumber, inBuffer, inBufferCount, outBuffer, outBufferCount, bytesReturned, NULL); - if (!bRes) - { - unsigned long error = GetLastError(); - if ( !(((error == ERROR_INSUFFICIENT_BUFFER ) || ( error == ERROR_MORE_DATA )) && - ( ioctlNumber == IOCTL_KS_PROPERTY ) && - ( outBufferCount == 0 ) ) ) - { - KSPROPERTY* ksProperty = (KSPROPERTY*)inBuffer; - - PaWinWDM_SetLastErrorInfo(result, "WdmSyncIoctl: DeviceIoControl GLE = 0x%08X (prop_set = {%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X}, prop_id = %u)", - error, - ksProperty->Set.Data1, ksProperty->Set.Data2, ksProperty->Set.Data3, - ksProperty->Set.Data4[0], ksProperty->Set.Data4[1], - ksProperty->Set.Data4[2], ksProperty->Set.Data4[3], - ksProperty->Set.Data4[4], ksProperty->Set.Data4[5], - ksProperty->Set.Data4[6], ksProperty->Set.Data4[7], - ksProperty->Id - ); - result = paUnanticipatedHostError; - } - } - return result; -} - -static PaError WdmGetPropertySimple(HANDLE handle, - const GUID* const guidPropertySet, - unsigned long property, - void* value, - unsigned long valueCount) -{ - PaError result; - KSPROPERTY ksProperty; - - ksProperty.Set = *guidPropertySet; - ksProperty.Id = property; - ksProperty.Flags = KSPROPERTY_TYPE_GET; - - result = WdmSyncIoctl( - handle, - IOCTL_KS_PROPERTY, - &ksProperty, - sizeof(KSPROPERTY), - value, - valueCount, - NULL); - - return result; -} - -static PaError WdmSetPropertySimple( - HANDLE handle, - const GUID* const guidPropertySet, - unsigned long property, - void* value, - unsigned long valueCount, - void* instance, - unsigned long instanceCount) -{ - PaError result; - KSPROPERTY* ksProperty; - unsigned long propertyCount = 0; - - propertyCount = sizeof(KSPROPERTY) + instanceCount; - ksProperty = (KSPROPERTY*)_alloca( propertyCount ); - if( !ksProperty ) - { - return paInsufficientMemory; - } - - ksProperty->Set = *guidPropertySet; - ksProperty->Id = property; - ksProperty->Flags = KSPROPERTY_TYPE_SET; - - if( instance ) - { - memcpy((void*)((char*)ksProperty + sizeof(KSPROPERTY)), instance, instanceCount); - } - - result = WdmSyncIoctl( - handle, - IOCTL_KS_PROPERTY, - ksProperty, - propertyCount, - value, - valueCount, - NULL); - - return result; -} - -static PaError WdmGetPinPropertySimple( - HANDLE handle, - unsigned long pinId, - const GUID* const guidPropertySet, - unsigned long property, - void* value, - unsigned long valueCount, - unsigned long *byteCount) -{ - PaError result; - - KSP_PIN ksPProp; - ksPProp.Property.Set = *guidPropertySet; - ksPProp.Property.Id = property; - ksPProp.Property.Flags = KSPROPERTY_TYPE_GET; - ksPProp.PinId = pinId; - ksPProp.Reserved = 0; - - result = WdmSyncIoctl( - handle, - IOCTL_KS_PROPERTY, - &ksPProp, - sizeof(KSP_PIN), - value, - valueCount, - byteCount); - - return result; -} - -static PaError WdmGetPinPropertyMulti( - HANDLE handle, - unsigned long pinId, - const GUID* const guidPropertySet, - unsigned long property, - KSMULTIPLE_ITEM** ksMultipleItem) -{ - PaError result; - unsigned long multipleItemSize = 0; - KSP_PIN ksPProp; - - ksPProp.Property.Set = *guidPropertySet; - ksPProp.Property.Id = property; - ksPProp.Property.Flags = KSPROPERTY_TYPE_GET; - ksPProp.PinId = pinId; - ksPProp.Reserved = 0; - - result = WdmSyncIoctl( - handle, - IOCTL_KS_PROPERTY, - &ksPProp.Property, - sizeof(KSP_PIN), - NULL, - 0, - &multipleItemSize); - if( result != paNoError ) - { - return result; - } - - *ksMultipleItem = (KSMULTIPLE_ITEM*)PaUtil_AllocateMemory( multipleItemSize ); - if( !*ksMultipleItem ) - { - return paInsufficientMemory; - } - - result = WdmSyncIoctl( - handle, - IOCTL_KS_PROPERTY, - &ksPProp, - sizeof(KSP_PIN), - (void*)*ksMultipleItem, - multipleItemSize, - NULL); - - if( result != paNoError ) - { - PaUtil_FreeMemory( ksMultipleItem ); - } - - return result; -} - -static PaError WdmGetPropertyMulti(HANDLE handle, - const GUID* const guidPropertySet, - unsigned long property, - KSMULTIPLE_ITEM** ksMultipleItem) -{ - PaError result; - unsigned long multipleItemSize = 0; - KSPROPERTY ksProp; - - ksProp.Set = *guidPropertySet; - ksProp.Id = property; - ksProp.Flags = KSPROPERTY_TYPE_GET; - - result = WdmSyncIoctl( - handle, - IOCTL_KS_PROPERTY, - &ksProp, - sizeof(KSPROPERTY), - NULL, - 0, - &multipleItemSize); - if( result != paNoError ) - { - return result; - } - - *ksMultipleItem = (KSMULTIPLE_ITEM*)PaUtil_AllocateMemory( multipleItemSize ); - if( !*ksMultipleItem ) - { - return paInsufficientMemory; - } - - result = WdmSyncIoctl( - handle, - IOCTL_KS_PROPERTY, - &ksProp, - sizeof(KSPROPERTY), - (void*)*ksMultipleItem, - multipleItemSize, - NULL); - - if( result != paNoError ) - { - PaUtil_FreeMemory( ksMultipleItem ); - } - - return result; -} - -static PaError WdmSetMuxNodeProperty(HANDLE handle, - ULONG nodeId, - ULONG pinId) -{ - PaError result = paNoError; - KSNODEPROPERTY prop; - prop.Property.Set = KSPROPSETID_Audio; - prop.Property.Id = KSPROPERTY_AUDIO_MUX_SOURCE; - prop.Property.Flags = KSPROPERTY_TYPE_SET | KSPROPERTY_TYPE_TOPOLOGY; - prop.NodeId = nodeId; - prop.Reserved = 0; - - result = WdmSyncIoctl(handle, IOCTL_KS_PROPERTY, &prop, sizeof(KSNODEPROPERTY), &pinId, sizeof(ULONG), NULL); - - return result; -} - -/* Used when traversing topology for outputs */ -static const KSTOPOLOGY_CONNECTION* GetConnectionTo(const KSTOPOLOGY_CONNECTION* pFrom, PaWinWdmFilter* filter, int muxIdx) -{ - unsigned i; - const KSTOPOLOGY_CONNECTION* retval = NULL; - const KSTOPOLOGY_CONNECTION* connections = (const KSTOPOLOGY_CONNECTION*)(filter->connections + 1); - (void)muxIdx; - PA_DEBUG(("GetConnectionTo: Checking %u connections... (pFrom = %p)", filter->connections->Count, pFrom)); - for (i = 0; i < filter->connections->Count; ++i) - { - const KSTOPOLOGY_CONNECTION* pConn = connections + i; - if (pConn == pFrom) - continue; - - if (pConn->FromNode == pFrom->ToNode) - { - retval = pConn; - break; - } - } - PA_DEBUG(("GetConnectionTo: Returning %p\n", retval)); - return retval; -} - -/* Used when traversing topology for inputs */ -static const KSTOPOLOGY_CONNECTION* GetConnectionFrom(const KSTOPOLOGY_CONNECTION* pTo, PaWinWdmFilter* filter, int muxIdx) -{ - unsigned i; - const KSTOPOLOGY_CONNECTION* retval = NULL; - const KSTOPOLOGY_CONNECTION* connections = (const KSTOPOLOGY_CONNECTION*)(filter->connections + 1); - int muxCntr = 0; - PA_DEBUG(("GetConnectionFrom: Checking %u connections... (pTo = %p)\n", filter->connections->Count, pTo)); - for (i = 0; i < filter->connections->Count; ++i) - { - const KSTOPOLOGY_CONNECTION* pConn = connections + i; - if (pConn == pTo) - continue; - - if (pConn->ToNode == pTo->FromNode) - { - if (muxIdx >= 0) - { - if (muxCntr < muxIdx) - { - ++muxCntr; - continue; - } - } - retval = pConn; - break; - } - } - PA_DEBUG(("GetConnectionFrom: Returning %p\n", retval)); - return retval; -} - -static ULONG GetNumberOfConnectionsTo(const KSTOPOLOGY_CONNECTION* pTo, PaWinWdmFilter* filter) -{ - ULONG retval = 0; - unsigned i; - const KSTOPOLOGY_CONNECTION* connections = (const KSTOPOLOGY_CONNECTION*)(filter->connections + 1); - PA_DEBUG(("GetNumberOfConnectionsTo: Checking %u connections...\n", filter->connections->Count)); - for (i = 0; i < filter->connections->Count; ++i) - { - const KSTOPOLOGY_CONNECTION* pConn = connections + i; - if (pConn->ToNode == pTo->FromNode && - (pTo->FromNode != KSFILTER_NODE || pConn->ToNodePin == pTo->FromNodePin)) - { - ++retval; - } - } - PA_DEBUG(("GetNumberOfConnectionsTo: Returning %d\n", retval)); - return retval; -} - -typedef const KSTOPOLOGY_CONNECTION *(*TFnGetConnection)(const KSTOPOLOGY_CONNECTION*, PaWinWdmFilter*, int); - -static const KSTOPOLOGY_CONNECTION* FindStartConnectionFrom(ULONG startPin, PaWinWdmFilter* filter) -{ - unsigned i; - const KSTOPOLOGY_CONNECTION* connections = (const KSTOPOLOGY_CONNECTION*)(filter->connections + 1); - PA_DEBUG(("FindStartConnectionFrom: Startpin %u, Checking %u connections...\n", startPin, filter->connections->Count)); - for (i = 0; i < filter->connections->Count; ++i) - { - const KSTOPOLOGY_CONNECTION* pConn = connections + i; - if (pConn->ToNode == KSFILTER_NODE && pConn->ToNodePin == startPin) - { - PA_DEBUG(("FindStartConnectionFrom: returning %p\n", pConn)); - return pConn; - } - } - - /* Some devices may report topologies that leave pins unconnected. This may be by design or driver installation - issues. Pass the error condition back to caller. */ - PA_DEBUG(("FindStartConnectionFrom: returning NULL\n")); - return 0; -} - -static const KSTOPOLOGY_CONNECTION* FindStartConnectionTo(ULONG startPin, PaWinWdmFilter* filter) -{ - unsigned i; - const KSTOPOLOGY_CONNECTION* connections = (const KSTOPOLOGY_CONNECTION*)(filter->connections + 1); - PA_DEBUG(("FindStartConnectionTo: Startpin %u, Checking %u connections...\n", startPin, filter->connections->Count)); - for (i = 0; i < filter->connections->Count; ++i) - { - const KSTOPOLOGY_CONNECTION* pConn = connections + i; - if (pConn->FromNode == KSFILTER_NODE && pConn->FromNodePin == startPin) - { - PA_DEBUG(("FindStartConnectionTo: returning %p\n", pConn)); - return pConn; - } - } - - /* Unconnected pin. Inform caller. */ - PA_DEBUG(("FindStartConnectionTo: returning NULL\n")); - return 0; -} - -static ULONG GetConnectedPin(ULONG startPin, BOOL forward, PaWinWdmFilter* filter, int muxPosition, ULONG *muxInputPinId, ULONG *muxNodeId) -{ - int limit=1000; - const KSTOPOLOGY_CONNECTION *conn = NULL; - TFnGetConnection fnGetConnection = forward ? GetConnectionTo : GetConnectionFrom ; - PA_LOGE_; - while (1) - { - limit--; - if (limit == 0) { - PA_DEBUG(("GetConnectedPin: LOOP LIMIT REACHED\n")); - break; - } - - if (conn == NULL) - { - conn = forward ? FindStartConnectionTo(startPin, filter) : FindStartConnectionFrom(startPin, filter); - } - else - { - conn = fnGetConnection(conn, filter, -1); - } - - /* Handling case of erroneous connection list */ - if (conn == NULL) - { - break; - } - - if (forward ? conn->ToNode == KSFILTER_NODE : conn->FromNode == KSFILTER_NODE) - { - return forward ? conn->ToNodePin : conn->FromNodePin; - } - else - { - PA_DEBUG(("GetConnectedPin: count=%d, forward=%d, muxPosition=%d\n", filter->nodes->Count, forward, muxPosition)); - if (filter->nodes->Count > 0 && !forward && muxPosition >= 0) - { - const GUID* nodes = (const GUID*)(filter->nodes + 1); - if (IsEqualGUID(&nodes[conn->FromNode], &KSNODETYPE_MUX)) - { - ULONG nConn = GetNumberOfConnectionsTo(conn, filter); - conn = fnGetConnection(conn, filter, muxPosition); - if (conn == NULL) - { - break; - } - if (muxInputPinId != 0) - { - *muxInputPinId = conn->ToNodePin; - } - if (muxNodeId != 0) - { - *muxNodeId = conn->ToNode; - } - } - } - } - } - PA_LOGL_; - return KSFILTER_NODE; -} - -static void DumpConnectionsAndNodes(PaWinWdmFilter* filter) -{ - unsigned i; - const KSTOPOLOGY_CONNECTION* connections = (const KSTOPOLOGY_CONNECTION*)(filter->connections + 1); - const GUID* nodes = (const GUID*)(filter->nodes + 1); - - PA_LOGE_; - PA_DEBUG(("DumpConnectionsAndNodes: connections=%d, nodes=%d\n", filter->connections->Count, filter->nodes->Count)); - - for (i=0; i < filter->connections->Count; ++i) - { - const KSTOPOLOGY_CONNECTION* pConn = connections + i; - PA_DEBUG((" Connection: %u - FromNode=%u,FromPin=%u -> ToNode=%u,ToPin=%u\n", - i, - pConn->FromNode, pConn->FromNodePin, - pConn->ToNode, pConn->ToNodePin - )); - } - - for (i=0; i < filter->nodes->Count; ++i) - { - const GUID* pConn = nodes + i; - PA_DEBUG((" Node: %d - {%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X}\n", - i, - pConn->Data1, pConn->Data2, pConn->Data3, - pConn->Data4[0], pConn->Data4[1], - pConn->Data4[2], pConn->Data4[3], - pConn->Data4[4], pConn->Data4[5], - pConn->Data4[6], pConn->Data4[7] - )); - } - PA_LOGL_; - -} - -typedef struct __PaUsbTerminalGUIDToName -{ - USHORT usbGUID; - wchar_t name[64]; -} PaUsbTerminalGUIDToName; - -static const PaUsbTerminalGUIDToName kNames[] = -{ - /* Types copied from: http://msdn.microsoft.com/en-us/library/ff537742(v=vs.85).aspx */ - /* Input terminal types */ - { 0x0201, L"Microphone" }, - { 0x0202, L"Desktop Microphone" }, - { 0x0203, L"Personal Microphone" }, - { 0x0204, L"Omni Directional Microphone" }, - { 0x0205, L"Microphone Array" }, - { 0x0206, L"Processing Microphone Array" }, - /* Output terminal types */ - { 0x0301, L"Speakers" }, - { 0x0302, L"Headphones" }, - { 0x0303, L"Head Mounted Display Audio" }, - { 0x0304, L"Desktop Speaker" }, - { 0x0305, L"Room Speaker" }, - { 0x0306, L"Communication Speaker" }, - { 0x0307, L"LFE Speakers" }, - /* External terminal types */ - { 0x0601, L"Analog" }, - { 0x0602, L"Digital" }, - { 0x0603, L"Line" }, - { 0x0604, L"Audio" }, - { 0x0605, L"SPDIF" }, -}; - -static const unsigned kNamesCnt = sizeof(kNames)/sizeof(PaUsbTerminalGUIDToName); - -static int PaUsbTerminalGUIDToNameCmp(const void* lhs, const void* rhs) -{ - const PaUsbTerminalGUIDToName* pL = (const PaUsbTerminalGUIDToName*)lhs; - const PaUsbTerminalGUIDToName* pR = (const PaUsbTerminalGUIDToName*)rhs; - return ((int)(pL->usbGUID) - (int)(pR->usbGUID)); -} - -static PaError GetNameFromCategory(const GUID* pGUID, BOOL input, wchar_t* name, unsigned length) -{ - PaError result = paUnanticipatedHostError; - USHORT usbTerminalGUID = (USHORT)(pGUID->Data1 - 0xDFF219E0); - - PA_LOGE_; - if (input && usbTerminalGUID >= 0x301 && usbTerminalGUID < 0x400) - { - /* Output terminal name for an input !? Set it to Line! */ - usbTerminalGUID = 0x603; - } - if (!input && usbTerminalGUID >= 0x201 && usbTerminalGUID < 0x300) - { - /* Input terminal name for an output !? Set it to Line! */ - usbTerminalGUID = 0x603; - } - if (usbTerminalGUID >= 0x201 && usbTerminalGUID < 0x713) - { - PaUsbTerminalGUIDToName s = { usbTerminalGUID }; - const PaUsbTerminalGUIDToName* ptr = bsearch( - &s, - kNames, - kNamesCnt, - sizeof(PaUsbTerminalGUIDToName), - PaUsbTerminalGUIDToNameCmp - ); - if (ptr != 0) - { - PA_DEBUG(("GetNameFromCategory: USB GUID %04X -> '%S'\n", usbTerminalGUID, ptr->name)); - - if (name != NULL && length > 0) - { - int n = _snwprintf(name, length, L"%s", ptr->name); - if (usbTerminalGUID >= 0x601 && usbTerminalGUID < 0x700) - { - _snwprintf(name + n, length - n, L" %s", (input ? L"In":L"Out")); - } - } - result = paNoError; - } - } - else - { - PaWinWDM_SetLastErrorInfo(result, "GetNameFromCategory: usbTerminalGUID = %04X ", usbTerminalGUID); - } - PA_LOGL_; - return result; -} - -static BOOL IsFrequencyWithinRange(const KSDATARANGE_AUDIO* range, int frequency) -{ - if (frequency < (int)range->MinimumSampleFrequency) - return FALSE; - if (frequency > (int)range->MaximumSampleFrequency) - return FALSE; - return TRUE; -} - -static BOOL IsBitsWithinRange(const KSDATARANGE_AUDIO* range, int noOfBits) -{ - if (noOfBits < (int)range->MinimumBitsPerSample) - return FALSE; - if (noOfBits > (int)range->MaximumBitsPerSample) - return FALSE; - return TRUE; -} - -/* Note: Somewhat different order compared to WMME implementation, as we want to focus on fidelity first */ -static const int defaultSampleRateSearchOrder[] = -{ 44100, 48000, 88200, 96000, 192000, 32000, 24000, 22050, 16000, 12000, 11025, 9600, 8000 }; -static const int defaultSampleRateSearchOrderCount = sizeof(defaultSampleRateSearchOrder)/sizeof(defaultSampleRateSearchOrder[0]); - -static int DefaultSampleFrequencyIndex(const KSDATARANGE_AUDIO* range) -{ - int i; - - for(i=0; i < defaultSampleRateSearchOrderCount; ++i) - { - int currentFrequency = defaultSampleRateSearchOrder[i]; - - if (IsFrequencyWithinRange(range, currentFrequency)) - { - return i; - } - } - - return -1; -} - -/* -Create a new pin object belonging to a filter -The pin object holds all the configuration information about the pin -before it is opened, and then the handle of the pin after is opened -*/ -static PaWinWdmPin* PinNew(PaWinWdmFilter* parentFilter, unsigned long pinId, PaError* error) -{ - PaWinWdmPin* pin; - PaError result; - unsigned long i; - KSMULTIPLE_ITEM* item = NULL; - KSIDENTIFIER* identifier; - KSDATARANGE* dataRange; - const ULONG streamingId = (parentFilter->devInfo.streamingType == Type_kWaveRT) ? KSINTERFACE_STANDARD_LOOPED_STREAMING : KSINTERFACE_STANDARD_STREAMING; - int defaultSampleRateIndex = defaultSampleRateSearchOrderCount; - - PA_LOGE_; - PA_DEBUG(("PinNew: Creating pin %d:\n",pinId)); - - /* Allocate the new PIN object */ - pin = (PaWinWdmPin*)PaUtil_AllocateMemory( sizeof(PaWinWdmPin) ); - if( !pin ) - { - result = paInsufficientMemory; - goto error; - } - - /* Zero the pin object */ - /* memset( (void*)pin, 0, sizeof(PaWinWdmPin) ); */ - - pin->parentFilter = parentFilter; - pin->pinId = pinId; - - /* Allocate a connect structure */ - pin->pinConnectSize = sizeof(KSPIN_CONNECT) + sizeof(KSDATAFORMAT_WAVEFORMATEX); - pin->pinConnect = (KSPIN_CONNECT*)PaUtil_AllocateMemory( pin->pinConnectSize ); - if( !pin->pinConnect ) - { - result = paInsufficientMemory; - goto error; - } - - /* Configure the connect structure with default values */ - pin->pinConnect->Interface.Set = KSINTERFACESETID_Standard; - pin->pinConnect->Interface.Id = streamingId; - pin->pinConnect->Interface.Flags = 0; - pin->pinConnect->Medium.Set = KSMEDIUMSETID_Standard; - pin->pinConnect->Medium.Id = KSMEDIUM_TYPE_ANYINSTANCE; - pin->pinConnect->Medium.Flags = 0; - pin->pinConnect->PinId = pinId; - pin->pinConnect->PinToHandle = NULL; - pin->pinConnect->Priority.PriorityClass = KSPRIORITY_NORMAL; - pin->pinConnect->Priority.PrioritySubClass = 1; - pin->ksDataFormatWfx = (KSDATAFORMAT_WAVEFORMATEX*)(pin->pinConnect + 1); - pin->ksDataFormatWfx->DataFormat.FormatSize = sizeof(KSDATAFORMAT_WAVEFORMATEX); - pin->ksDataFormatWfx->DataFormat.Flags = 0; - pin->ksDataFormatWfx->DataFormat.Reserved = 0; - pin->ksDataFormatWfx->DataFormat.MajorFormat = KSDATAFORMAT_TYPE_AUDIO; - pin->ksDataFormatWfx->DataFormat.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; - pin->ksDataFormatWfx->DataFormat.Specifier = KSDATAFORMAT_SPECIFIER_WAVEFORMATEX; - - pin->frameSize = 0; /* Unknown until we instantiate pin */ - - /* Get the COMMUNICATION property */ - result = WdmGetPinPropertySimple( - parentFilter->handle, - pinId, - &KSPROPSETID_Pin, - KSPROPERTY_PIN_COMMUNICATION, - &pin->communication, - sizeof(KSPIN_COMMUNICATION), - NULL); - if( result != paNoError ) - goto error; - - if( /*(pin->communication != KSPIN_COMMUNICATION_SOURCE) &&*/ - (pin->communication != KSPIN_COMMUNICATION_SINK) && - (pin->communication != KSPIN_COMMUNICATION_BOTH) ) - { - PA_DEBUG(("PinNew: Not source/sink\n")); - result = paInvalidDevice; - goto error; - } - - /* Get dataflow information */ - result = WdmGetPinPropertySimple( - parentFilter->handle, - pinId, - &KSPROPSETID_Pin, - KSPROPERTY_PIN_DATAFLOW, - &pin->dataFlow, - sizeof(KSPIN_DATAFLOW), - NULL); - - if( result != paNoError ) - goto error; - - /* Get the INTERFACE property list */ - result = WdmGetPinPropertyMulti( - parentFilter->handle, - pinId, - &KSPROPSETID_Pin, - KSPROPERTY_PIN_INTERFACES, - &item); - - if( result != paNoError ) - goto error; - - identifier = (KSIDENTIFIER*)(item+1); - - /* Check that at least one interface is STANDARD_STREAMING */ - result = paUnanticipatedHostError; - for( i = 0; i < item->Count; i++ ) - { - if( IsEqualGUID(&identifier[i].Set, &KSINTERFACESETID_Standard) && ( identifier[i].Id == streamingId ) ) - { - result = paNoError; - break; - } - } - - if( result != paNoError ) - { - PA_DEBUG(("PinNew: No %s streaming\n", streamingId==KSINTERFACE_STANDARD_LOOPED_STREAMING?"looped":"standard")); - goto error; - } - - /* Don't need interfaces any more */ - PaUtil_FreeMemory( item ); - item = NULL; - - /* Get the MEDIUM properties list */ - result = WdmGetPinPropertyMulti( - parentFilter->handle, - pinId, - &KSPROPSETID_Pin, - KSPROPERTY_PIN_MEDIUMS, - &item); - - if( result != paNoError ) - goto error; - - identifier = (KSIDENTIFIER*)(item+1); /* Not actually necessary... */ - - /* Check that at least one medium is STANDARD_DEVIO */ - result = paUnanticipatedHostError; - for( i = 0; i < item->Count; i++ ) - { - if( IsEqualGUID(&identifier[i].Set, &KSMEDIUMSETID_Standard) && ( identifier[i].Id == KSMEDIUM_STANDARD_DEVIO ) ) - { - result = paNoError; - break; - } - } - - if( result != paNoError ) - { - PA_DEBUG(("No standard devio\n")); - goto error; - } - /* Don't need mediums any more */ - PaUtil_FreeMemory( item ); - item = NULL; - - /* Get DATARANGES */ - result = WdmGetPinPropertyMulti( - parentFilter->handle, - pinId, - &KSPROPSETID_Pin, - KSPROPERTY_PIN_DATARANGES, - &pin->dataRangesItem); - - if( result != paNoError ) - goto error; - - pin->dataRanges = (KSDATARANGE*)(pin->dataRangesItem +1); - - /* Check that at least one datarange supports audio */ - result = paUnanticipatedHostError; - dataRange = pin->dataRanges; - pin->maxChannels = 0; - pin->defaultSampleRate = 0; - pin->formats = 0; - PA_DEBUG(("PinNew: Checking %u no of dataranges...\n", pin->dataRangesItem->Count)); - for( i = 0; i < pin->dataRangesItem->Count; i++) - { - PA_DEBUG(("PinNew: DR major format %x\n",*(unsigned long*)(&(dataRange->MajorFormat)))); - /* Check that subformat is WAVEFORMATEX, PCM or WILDCARD */ - if( IS_VALID_WAVEFORMATEX_GUID(&dataRange->SubFormat) || - IsEqualGUID(&dataRange->SubFormat, &KSDATAFORMAT_SUBTYPE_PCM) || - IsEqualGUID(&dataRange->SubFormat, &KSDATAFORMAT_SUBTYPE_IEEE_FLOAT) || - IsEqualGUID(&dataRange->SubFormat, &KSDATAFORMAT_SUBTYPE_WILDCARD) || - IsEqualGUID(&dataRange->MajorFormat, &KSDATAFORMAT_TYPE_AUDIO) ) - { - int defaultIndex; - result = paNoError; - /* Record the maximum possible channels with this pin */ - if( ((KSDATARANGE_AUDIO*)dataRange)->MaximumChannels == (ULONG) -1 ) - { - pin->maxChannels = MAXIMUM_NUMBER_OF_CHANNELS; - } - else if( (int) ((KSDATARANGE_AUDIO*)dataRange)->MaximumChannels > pin->maxChannels ) - { - pin->maxChannels = (int) ((KSDATARANGE_AUDIO*)dataRange)->MaximumChannels; - } - PA_DEBUG(("PinNew: MaxChannel: %d\n",pin->maxChannels)); - - /* Record the formats (bit depths) that are supported */ - if( IsBitsWithinRange((KSDATARANGE_AUDIO*)dataRange, 8) ) - { - pin->formats |= paInt8; - PA_DEBUG(("PinNew: Format PCM 8 bit supported\n")); - } - if( IsBitsWithinRange((KSDATARANGE_AUDIO*)dataRange, 16) ) - { - pin->formats |= paInt16; - PA_DEBUG(("PinNew: Format PCM 16 bit supported\n")); - } - if( IsBitsWithinRange((KSDATARANGE_AUDIO*)dataRange, 24) ) - { - pin->formats |= paInt24; - PA_DEBUG(("PinNew: Format PCM 24 bit supported\n")); - } - if( IsBitsWithinRange((KSDATARANGE_AUDIO*)dataRange, 32) ) - { - if (IsEqualGUID(&dataRange->SubFormat, &KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)) - { - pin->formats |= paFloat32; - PA_DEBUG(("PinNew: Format IEEE float 32 bit supported\n")); - } - else - { - pin->formats |= paInt32; - PA_DEBUG(("PinNew: Format PCM 32 bit supported\n")); - } - } - - defaultIndex = DefaultSampleFrequencyIndex((KSDATARANGE_AUDIO*)dataRange); - if (defaultIndex >= 0 && defaultIndex < defaultSampleRateIndex) - { - defaultSampleRateIndex = defaultIndex; - } - } - dataRange = (KSDATARANGE*)( ((char*)dataRange) + dataRange->FormatSize); - } - - if( result != paNoError ) - goto error; - - /* If none of the frequencies searched for are present, there's something seriously wrong */ - if (defaultSampleRateIndex == defaultSampleRateSearchOrderCount) - { - PA_DEBUG(("PinNew: No default sample rate found, skipping pin!\n")); - PaWinWDM_SetLastErrorInfo(paUnanticipatedHostError, "PinNew: No default sample rate found"); - result = paUnanticipatedHostError; - goto error; - } - - /* Set the default sample rate */ - pin->defaultSampleRate = defaultSampleRateSearchOrder[defaultSampleRateIndex]; - PA_DEBUG(("PinNew: Default sample rate = %d Hz\n", pin->defaultSampleRate)); - - /* Get instance information */ - result = WdmGetPinPropertySimple( - parentFilter->handle, - pinId, - &KSPROPSETID_Pin, - KSPROPERTY_PIN_CINSTANCES, - &pin->instances, - sizeof(KSPIN_CINSTANCES), - NULL); - - if( result != paNoError ) - goto error; - - /* If WaveRT, check if pin supports notification mode */ - if (parentFilter->devInfo.streamingType == Type_kWaveRT) - { - BOOL bSupportsNotification = FALSE; - if (PinQueryNotificationSupport(pin, &bSupportsNotification) == paNoError) - { - pin->pinKsSubType = bSupportsNotification ? SubType_kNotification : SubType_kPolled; - } - } - - /* Query pin name (which means we need to traverse to non IRP pin, via physical connection to topology filter pin, through - its nodes to the endpoint pin, and get that ones name... phew...) */ - PA_DEBUG(("PinNew: Finding topology pin...\n")); - - { - ULONG topoPinId = GetConnectedPin(pinId, (pin->dataFlow == KSPIN_DATAFLOW_IN), parentFilter, -1, NULL, NULL); - const wchar_t kInputName[] = L"Input"; - const wchar_t kOutputName[] = L"Output"; - - if (topoPinId != KSFILTER_NODE) - { - /* Get physical connection for topo pin */ - unsigned long cbBytes = 0; - PA_DEBUG(("PinNew: Getting physical connection...\n")); - result = WdmGetPinPropertySimple(parentFilter->handle, - topoPinId, - &KSPROPSETID_Pin, - KSPROPERTY_PIN_PHYSICALCONNECTION, - 0, - 0, - &cbBytes - ); - - if (result != paNoError) - { - /* No physical connection -> there is no topology filter! So we get the name of the pin! */ - PA_DEBUG(("PinNew: No physical connection! Getting the pin name\n")); - result = WdmGetPinPropertySimple(parentFilter->handle, - topoPinId, - &KSPROPSETID_Pin, - KSPROPERTY_PIN_NAME, - pin->friendlyName, - MAX_PATH, - NULL); - if (result != paNoError) - { - GUID category = {0}; - - /* Get pin category information */ - result = WdmGetPinPropertySimple(parentFilter->handle, - topoPinId, - &KSPROPSETID_Pin, - KSPROPERTY_PIN_CATEGORY, - &category, - sizeof(GUID), - NULL); - - if (result == paNoError) - { - result = GetNameFromCategory(&category, (pin->dataFlow == KSPIN_DATAFLOW_OUT), pin->friendlyName, MAX_PATH); - } - } - - /* Make sure pin gets a name here... */ - if (wcslen(pin->friendlyName) == 0) - { - wcscpy(pin->friendlyName, (pin->dataFlow == KSPIN_DATAFLOW_IN) ? kOutputName : kInputName); -#ifdef UNICODE - PA_DEBUG(("PinNew: Setting pin friendly name to '%s'\n", pin->friendlyName)); -#else - PA_DEBUG(("PinNew: Setting pin friendly name to '%S'\n", pin->friendlyName)); -#endif - } - - /* This is then == the endpoint pin */ - pin->endpointPinId = (pin->dataFlow == KSPIN_DATAFLOW_IN) ? pinId : topoPinId; - } - else - { - KSPIN_PHYSICALCONNECTION* pc = (KSPIN_PHYSICALCONNECTION*)PaUtil_AllocateMemory(cbBytes + 2); - ULONG pcPin; - wchar_t symbLinkName[MAX_PATH]; - PA_DEBUG(("PinNew: Physical connection found!\n")); - if (pc == NULL) - { - result = paInsufficientMemory; - goto error; - } - result = WdmGetPinPropertySimple(parentFilter->handle, - topoPinId, - &KSPROPSETID_Pin, - KSPROPERTY_PIN_PHYSICALCONNECTION, - pc, - cbBytes, - NULL - ); - - pcPin = pc->Pin; - wcsncpy(symbLinkName, pc->SymbolicLinkName, MAX_PATH); - PaUtil_FreeMemory( pc ); - - if (result != paNoError) - { - /* Shouldn't happen, but fail if it does */ - PA_DEBUG(("PinNew: failed to retrieve physical connection!\n")); - goto error; - } - - if (symbLinkName[1] == TEXT('?')) - { - symbLinkName[1] = TEXT('\\'); - } - - if (pin->parentFilter->topologyFilter == NULL) - { - PA_DEBUG(("PinNew: Creating topology filter '%S'\n", symbLinkName)); - - pin->parentFilter->topologyFilter = FilterNew(Type_kNotUsed, 0, symbLinkName, L"", &result); - if (pin->parentFilter->topologyFilter == NULL) - { - PA_DEBUG(("PinNew: Failed creating topology filter\n")); - result = paUnanticipatedHostError; - PaWinWDM_SetLastErrorInfo(result, "Failed to create topology filter '%S'", symbLinkName); - goto error; - } - - /* Copy info so we have it in device info */ - wcsncpy(pin->parentFilter->devInfo.topologyPath, symbLinkName, MAX_PATH); - } - else - { - /* Must be the same */ - assert(wcscmp(symbLinkName, pin->parentFilter->topologyFilter->devInfo.filterPath) == 0); - } - - PA_DEBUG(("PinNew: Opening topology filter...")); - - result = FilterUse(pin->parentFilter->topologyFilter); - if (result == paNoError) - { - unsigned long endpointPinId; - - if (pin->dataFlow == KSPIN_DATAFLOW_IN) - { - /* The "endpointPinId" is what WASAPI looks at for pin names */ - GUID category = {0}; - - PA_DEBUG(("PinNew: Checking for output endpoint pin id...\n")); - - endpointPinId = GetConnectedPin(pcPin, TRUE, pin->parentFilter->topologyFilter, -1, NULL, NULL); - - if (endpointPinId == KSFILTER_NODE) - { - result = paUnanticipatedHostError; - PaWinWDM_SetLastErrorInfo(result, "Failed to get endpoint pin ID on topology filter!"); - goto error; - } - - PA_DEBUG(("PinNew: Found endpoint pin id %u\n", endpointPinId)); - - /* Get pin category information */ - result = WdmGetPinPropertySimple(pin->parentFilter->topologyFilter->handle, - endpointPinId, - &KSPROPSETID_Pin, - KSPROPERTY_PIN_CATEGORY, - &category, - sizeof(GUID), - NULL); - - if (result == paNoError) - { -#if !PA_WDMKS_USE_CATEGORY_FOR_PIN_NAMES - wchar_t pinName[MAX_PATH]; - - PA_DEBUG(("PinNew: Getting pin name property...")); - - /* Ok, try pin name also, and favor that if available */ - result = WdmGetPinPropertySimple(pin->parentFilter->topologyFilter->handle, - endpointPinId, - &KSPROPSETID_Pin, - KSPROPERTY_PIN_NAME, - pinName, - MAX_PATH, - NULL); - - if (result == paNoError && wcslen(pinName)>0) - { - wcsncpy(pin->friendlyName, pinName, MAX_PATH); - } - else -#endif - { - result = GetNameFromCategory(&category, (pin->dataFlow == KSPIN_DATAFLOW_OUT), pin->friendlyName, MAX_PATH); - } - } - - /* Make sure we get a name for the pin */ - if (wcslen(pin->friendlyName) == 0) - { - wcscpy(pin->friendlyName, kOutputName); - } -#ifdef UNICODE - PA_DEBUG(("PinNew: Pin name '%s'\n", pin->friendlyName)); -#else - PA_DEBUG(("PinNew: Pin name '%S'\n", pin->friendlyName)); -#endif - - /* Set endpoint pin ID (this is the topology INPUT pin, since portmixer will always traverse the - filter in audio streaming direction, see http://msdn.microsoft.com/en-us/library/windows/hardware/ff536331(v=vs.85).aspx - for more information) - */ - pin->endpointPinId = pcPin; - } - else - { - unsigned muxCount = 0; - int muxPos = 0; - /* Max 64 multiplexer inputs... sanity check :) */ - for (i = 0; i < 64; ++i) - { - ULONG muxNodeIdTest = (unsigned)-1; - PA_DEBUG(("PinNew: Checking for input endpoint pin id (%d)...\n", i)); - - endpointPinId = GetConnectedPin(pcPin, - FALSE, - pin->parentFilter->topologyFilter, - (int)i, - NULL, - &muxNodeIdTest); - - if (endpointPinId == KSFILTER_NODE) - { - /* We're done */ - PA_DEBUG(("PinNew: Done with inputs.\n", endpointPinId)); - break; - } - else - { - /* The "endpointPinId" is what WASAPI looks at for pin names */ - GUID category = {0}; - - PA_DEBUG(("PinNew: Found endpoint pin id %u\n", endpointPinId)); - - /* Get pin category information */ - result = WdmGetPinPropertySimple(pin->parentFilter->topologyFilter->handle, - endpointPinId, - &KSPROPSETID_Pin, - KSPROPERTY_PIN_CATEGORY, - &category, - sizeof(GUID), - NULL); - - if (result == paNoError) - { - if (muxNodeIdTest == (unsigned)-1) - { - /* Ok, try pin name, and favor that if available */ - result = WdmGetPinPropertySimple(pin->parentFilter->topologyFilter->handle, - endpointPinId, - &KSPROPSETID_Pin, - KSPROPERTY_PIN_NAME, - pin->friendlyName, - MAX_PATH, - NULL); - - if (result != paNoError) - { - result = GetNameFromCategory(&category, TRUE, pin->friendlyName, MAX_PATH); - } - break; - } - else - { - result = GetNameFromCategory(&category, TRUE, NULL, 0); - - if (result == paNoError) - { - ++muxCount; - } - } - } - else - { - PA_DEBUG(("PinNew: Failed to get pin category")); - } - } - } - - if (muxCount == 0) - { - pin->endpointPinId = endpointPinId; - /* Make sure we get a name for the pin */ - if (wcslen(pin->friendlyName) == 0) - { - wcscpy(pin->friendlyName, kInputName); - } -#ifdef UNICODE - PA_DEBUG(("PinNew: Input friendly name '%s'\n", pin->friendlyName)); -#else - PA_DEBUG(("PinNew: Input friendly name '%S'\n", pin->friendlyName)); -#endif - } - else // muxCount > 0 - { - PA_DEBUG(("PinNew: Setting up %u inputs\n", muxCount)); - - /* Now we redo the operation once known how many multiplexer positions there are */ - pin->inputs = (PaWinWdmMuxedInput**)PaUtil_AllocateMemory(muxCount * sizeof(PaWinWdmMuxedInput*)); - if (pin->inputs == NULL) - { - FilterRelease(pin->parentFilter->topologyFilter); - result = paInsufficientMemory; - goto error; - } - pin->inputCount = muxCount; - - for (i = 0; i < muxCount; ++muxPos) - { - PA_DEBUG(("PinNew: Setting up input %u...\n", i)); - - if (pin->inputs[i] == NULL) - { - pin->inputs[i] = (PaWinWdmMuxedInput*)PaUtil_AllocateMemory(sizeof(PaWinWdmMuxedInput)); - if (pin->inputs[i] == NULL) - { - FilterRelease(pin->parentFilter->topologyFilter); - result = paInsufficientMemory; - goto error; - } - } - - endpointPinId = GetConnectedPin(pcPin, - FALSE, - pin->parentFilter->topologyFilter, - muxPos, - &pin->inputs[i]->muxPinId, - &pin->inputs[i]->muxNodeId); - - if (endpointPinId != KSFILTER_NODE) - { - /* The "endpointPinId" is what WASAPI looks at for pin names */ - GUID category = {0}; - - /* Set input endpoint ID */ - pin->inputs[i]->endpointPinId = endpointPinId; - - /* Get pin category information */ - result = WdmGetPinPropertySimple(pin->parentFilter->topologyFilter->handle, - endpointPinId, - &KSPROPSETID_Pin, - KSPROPERTY_PIN_CATEGORY, - &category, - sizeof(GUID), - NULL); - - if (result == paNoError) - { - /* Try pin name first, and if that is not defined, use category instead */ - result = WdmGetPinPropertySimple(pin->parentFilter->topologyFilter->handle, - endpointPinId, - &KSPROPSETID_Pin, - KSPROPERTY_PIN_NAME, - pin->inputs[i]->friendlyName, - MAX_PATH, - NULL); - - if (result != paNoError) - { - result = GetNameFromCategory(&category, TRUE, pin->inputs[i]->friendlyName, MAX_PATH); - if (result != paNoError) - { - /* Only specify name, let name hash in ScanDeviceInfos fix postfix enumerators */ - wcscpy(pin->inputs[i]->friendlyName, kInputName); - } - } -#ifdef UNICODE - PA_DEBUG(("PinNew: Input (%u) friendly name '%s'\n", i, pin->inputs[i]->friendlyName)); -#else - PA_DEBUG(("PinNew: Input (%u) friendly name '%S'\n", i, pin->inputs[i]->friendlyName)); -#endif - ++i; - } - } - else - { - /* Unconnected pin */ - goto error; - } - } - } - } - } - } - } - else - { - PA_DEBUG(("PinNew: No topology pin id found. Bad...\n")); - /* No TOPO pin id ??? This is bad. Ok, so we just say it is an input or output... */ - wcscpy(pin->friendlyName, (pin->dataFlow == KSPIN_DATAFLOW_IN) ? kOutputName : kInputName); - } - } - - /* Release topology filter if it has been used */ - if (pin->parentFilter->topologyFilter && pin->parentFilter->topologyFilter->handle != NULL) - { - PA_DEBUG(("PinNew: Releasing topology filter...\n")); - FilterRelease(pin->parentFilter->topologyFilter); - } - - /* Success */ - *error = paNoError; - PA_DEBUG(("Pin created successfully\n")); - PA_LOGL_; - return pin; - -error: - PA_DEBUG(("PinNew: Error %d\n", result)); - /* - Error cleanup - */ - - if (pin->parentFilter->topologyFilter && pin->parentFilter->topologyFilter->handle != NULL) - { - FilterRelease(pin->parentFilter->topologyFilter); - } - - PaUtil_FreeMemory( item ); - PinFree(pin); - - *error = result; - PA_LOGL_; - return NULL; -} - -/* -Safely free all resources associated with the pin -*/ -static void PinFree(PaWinWdmPin* pin) -{ - unsigned i; - PA_LOGE_; - if( pin ) - { - PinClose(pin); - if( pin->pinConnect ) - { - PaUtil_FreeMemory( pin->pinConnect ); - } - if( pin->dataRangesItem ) - { - PaUtil_FreeMemory( pin->dataRangesItem ); - } - if( pin->inputs ) - { - for (i = 0; i < pin->inputCount; ++i) - { - PaUtil_FreeMemory( pin->inputs[i] ); - } - PaUtil_FreeMemory( pin->inputs ); - } - PaUtil_FreeMemory( pin ); - } - PA_LOGL_; -} - -/* -If the pin handle is open, close it -*/ -static void PinClose(PaWinWdmPin* pin) -{ - PA_LOGE_; - if( pin == NULL ) - { - PA_DEBUG(("Closing NULL pin!")); - PA_LOGL_; - return; - } - if( pin->handle != NULL ) - { - PinSetState( pin, KSSTATE_PAUSE ); - PinSetState( pin, KSSTATE_STOP ); - CloseHandle( pin->handle ); - pin->handle = NULL; - FilterRelease(pin->parentFilter); - } - PA_LOGL_; -} - -/* -Set the state of this (instantiated) pin -*/ -static PaError PinSetState(PaWinWdmPin* pin, KSSTATE state) -{ - PaError result = paNoError; - KSPROPERTY prop; - - PA_LOGE_; - prop.Set = KSPROPSETID_Connection; - prop.Id = KSPROPERTY_CONNECTION_STATE; - prop.Flags = KSPROPERTY_TYPE_SET; - - if( pin == NULL ) - return paInternalError; - if( pin->handle == NULL ) - return paInternalError; - - result = WdmSyncIoctl(pin->handle, IOCTL_KS_PROPERTY, &prop, sizeof(KSPROPERTY), &state, sizeof(KSSTATE), NULL); - - PA_LOGL_; - return result; -} - -static PaError PinInstantiate(PaWinWdmPin* pin) -{ - PaError result; - unsigned long createResult; - KSALLOCATOR_FRAMING ksaf; - KSALLOCATOR_FRAMING_EX ksafex; - - PA_LOGE_; - - if( pin == NULL ) - return paInternalError; - if(!pin->pinConnect) - return paInternalError; - - FilterUse(pin->parentFilter); - - createResult = FunctionKsCreatePin( - pin->parentFilter->handle, - pin->pinConnect, - GENERIC_WRITE | GENERIC_READ, - &pin->handle - ); - - PA_DEBUG(("Pin create result = 0x%08x\n",createResult)); - if( createResult != ERROR_SUCCESS ) - { - FilterRelease(pin->parentFilter); - pin->handle = NULL; - switch (createResult) - { - case ERROR_INVALID_PARAMETER: - /* First case when pin actually don't support the format */ - return paSampleFormatNotSupported; - case ERROR_BAD_COMMAND: - /* Case when pin is occupied (by another application) */ - return paDeviceUnavailable; - default: - /* All other cases */ - return paInvalidDevice; - } - } - - if (pin->parentFilter->devInfo.streamingType == Type_kWaveCyclic) - { - /* Framing size query only valid for WaveCyclic devices */ - result = WdmGetPropertySimple( - pin->handle, - &KSPROPSETID_Connection, - KSPROPERTY_CONNECTION_ALLOCATORFRAMING, - &ksaf, - sizeof(ksaf)); - - if( result != paNoError ) - { - result = WdmGetPropertySimple( - pin->handle, - &KSPROPSETID_Connection, - KSPROPERTY_CONNECTION_ALLOCATORFRAMING_EX, - &ksafex, - sizeof(ksafex)); - if( result == paNoError ) - { - pin->frameSize = ksafex.FramingItem[0].FramingRange.Range.MinFrameSize; - } - } - else - { - pin->frameSize = ksaf.FrameSize; - } - } - - PA_LOGL_; - - return paNoError; -} - -static PaError PinSetFormat(PaWinWdmPin* pin, const WAVEFORMATEX* format) -{ - unsigned long size; - void* newConnect; - - PA_LOGE_; - - if( pin == NULL ) - return paInternalError; - if( format == NULL ) - return paInternalError; - - size = GetWfexSize(format) + sizeof(KSPIN_CONNECT) + sizeof(KSDATAFORMAT_WAVEFORMATEX) - sizeof(WAVEFORMATEX); - - if( pin->pinConnectSize != size ) - { - newConnect = PaUtil_AllocateMemory( size ); - if( newConnect == NULL ) - return paInsufficientMemory; - memcpy( newConnect, (void*)pin->pinConnect, min(pin->pinConnectSize,size) ); - PaUtil_FreeMemory( pin->pinConnect ); - pin->pinConnect = (KSPIN_CONNECT*)newConnect; - pin->pinConnectSize = size; - pin->ksDataFormatWfx = (KSDATAFORMAT_WAVEFORMATEX*)((KSPIN_CONNECT*)newConnect + 1); - pin->ksDataFormatWfx->DataFormat.FormatSize = size - sizeof(KSPIN_CONNECT); - } - - memcpy( (void*)&(pin->ksDataFormatWfx->WaveFormatEx), format, GetWfexSize(format) ); - pin->ksDataFormatWfx->DataFormat.SampleSize = (unsigned short)(format->nChannels * (format->wBitsPerSample / 8)); - - PA_LOGL_; - - return paNoError; -} - -static PaError PinIsFormatSupported(PaWinWdmPin* pin, const WAVEFORMATEX* format) -{ - KSDATARANGE_AUDIO* dataRange; - unsigned long count; - GUID guid = DYNAMIC_GUID( DEFINE_WAVEFORMATEX_GUID(format->wFormatTag) ); - PaError result = paInvalidDevice; - const WAVEFORMATEXTENSIBLE* pFormatExt = (format->wFormatTag == WAVE_FORMAT_EXTENSIBLE) ? (const WAVEFORMATEXTENSIBLE*)format : 0; - - PA_LOGE_; - - if( pFormatExt != 0 ) - { - guid = pFormatExt->SubFormat; - } - dataRange = (KSDATARANGE_AUDIO*)pin->dataRanges; - for(count = 0; - countdataRangesItem->Count; - count++, - dataRange = (KSDATARANGE_AUDIO*)( ((char*)dataRange) + dataRange->DataRange.FormatSize)) /* Need to update dataRange here, due to 'continue' !! */ - { - /* Check major format*/ - if (!(IsEqualGUID(&(dataRange->DataRange.MajorFormat), &KSDATAFORMAT_TYPE_AUDIO) || - IsEqualGUID(&(dataRange->DataRange.MajorFormat), &KSDATAFORMAT_TYPE_WILDCARD))) - { - continue; - } - - /* This is an audio or wildcard datarange... */ - if (! (IsEqualGUID(&(dataRange->DataRange.SubFormat), &KSDATAFORMAT_SUBTYPE_WILDCARD) || - IsEqualGUID(&(dataRange->DataRange.SubFormat), &KSDATAFORMAT_SUBTYPE_PCM) || - IsEqualGUID(&(dataRange->DataRange.SubFormat), &guid) )) - { - continue; - } - - /* Check specifier... */ - if (! (IsEqualGUID(&(dataRange->DataRange.Specifier), &KSDATAFORMAT_SPECIFIER_WILDCARD) || - IsEqualGUID(&(dataRange->DataRange.Specifier), &KSDATAFORMAT_SPECIFIER_WAVEFORMATEX)) ) - { - continue; - } - - PA_DEBUG(("Pin:%x, DataRange:%d\n",(void*)pin,count)); - PA_DEBUG(("\tFormatSize:%d, SampleSize:%d\n",dataRange->DataRange.FormatSize,dataRange->DataRange.SampleSize)); - PA_DEBUG(("\tMaxChannels:%d\n",dataRange->MaximumChannels)); - PA_DEBUG(("\tBits:%d-%d\n",dataRange->MinimumBitsPerSample,dataRange->MaximumBitsPerSample)); - PA_DEBUG(("\tSampleRate:%d-%d\n",dataRange->MinimumSampleFrequency,dataRange->MaximumSampleFrequency)); - - if( dataRange->MaximumChannels != (ULONG)-1 && - dataRange->MaximumChannels < format->nChannels ) - { - result = paInvalidChannelCount; - continue; - } - - if (pFormatExt != 0) - { - if (!IsBitsWithinRange(dataRange, pFormatExt->Samples.wValidBitsPerSample)) - { - result = paSampleFormatNotSupported; - continue; - } - } - else - { - if (!IsBitsWithinRange(dataRange, format->wBitsPerSample)) - { - result = paSampleFormatNotSupported; - continue; - } - } - - if (!IsFrequencyWithinRange(dataRange, format->nSamplesPerSec)) - { - result = paInvalidSampleRate; - continue; - } - - /* Success! */ - result = paNoError; - break; - } - - PA_LOGL_; - return result; -} - -static PaError PinQueryNotificationSupport(PaWinWdmPin* pPin, BOOL* pbResult) -{ - PaError result = paNoError; - KSPROPERTY propIn; - - PA_LOGE_; - - propIn.Set = KSPROPSETID_RtAudio; - propIn.Id = 8; /* = KSPROPERTY_RTAUDIO_QUERY_NOTIFICATION_SUPPORT */ - propIn.Flags = KSPROPERTY_TYPE_GET; - - result = WdmSyncIoctl(pPin->handle, IOCTL_KS_PROPERTY, - &propIn, - sizeof(KSPROPERTY), - pbResult, - sizeof(BOOL), - NULL); - - if (result != paNoError) - { - PA_DEBUG(("Failed PinQueryNotificationSupport\n")); - } - - PA_LOGL_; - return result; -} - -static PaError PinGetBufferWithNotification(PaWinWdmPin* pPin, void** pBuffer, DWORD* pRequestedBufSize, BOOL* pbCallMemBarrier) -{ - PaError result = paNoError; - KSRTAUDIO_BUFFER_PROPERTY_WITH_NOTIFICATION propIn; - KSRTAUDIO_BUFFER propOut; - - PA_LOGE_; - - propIn.BaseAddress = 0; - propIn.NotificationCount = 2; - propIn.RequestedBufferSize = *pRequestedBufSize; - propIn.Property.Set = KSPROPSETID_RtAudio; - propIn.Property.Id = KSPROPERTY_RTAUDIO_BUFFER_WITH_NOTIFICATION; - propIn.Property.Flags = KSPROPERTY_TYPE_GET; - - result = WdmSyncIoctl(pPin->handle, IOCTL_KS_PROPERTY, - &propIn, - sizeof(KSRTAUDIO_BUFFER_PROPERTY_WITH_NOTIFICATION), - &propOut, - sizeof(KSRTAUDIO_BUFFER), - NULL); - - if (result == paNoError) - { - *pBuffer = propOut.BufferAddress; - *pRequestedBufSize = propOut.ActualBufferSize; - *pbCallMemBarrier = propOut.CallMemoryBarrier; - } - else - { - PA_DEBUG(("Failed to get buffer with notification\n")); - } - - PA_LOGL_; - return result; -} - -static PaError PinGetBufferWithoutNotification(PaWinWdmPin* pPin, void** pBuffer, DWORD* pRequestedBufSize, BOOL* pbCallMemBarrier) -{ - PaError result = paNoError; - KSRTAUDIO_BUFFER_PROPERTY propIn; - KSRTAUDIO_BUFFER propOut; - - PA_LOGE_; - - propIn.BaseAddress = NULL; - propIn.RequestedBufferSize = *pRequestedBufSize; - propIn.Property.Set = KSPROPSETID_RtAudio; - propIn.Property.Id = KSPROPERTY_RTAUDIO_BUFFER; - propIn.Property.Flags = KSPROPERTY_TYPE_GET; - - result = WdmSyncIoctl(pPin->handle, IOCTL_KS_PROPERTY, - &propIn, - sizeof(KSRTAUDIO_BUFFER_PROPERTY), - &propOut, - sizeof(KSRTAUDIO_BUFFER), - NULL); - - if (result == paNoError) - { - *pBuffer = propOut.BufferAddress; - *pRequestedBufSize = propOut.ActualBufferSize; - *pbCallMemBarrier = propOut.CallMemoryBarrier; - } - else - { - PA_DEBUG(("Failed to get buffer without notification\n")); - } - - PA_LOGL_; - return result; -} - -/* greatest common divisor - PGCD in French */ -static unsigned long PaWinWDMGCD( unsigned long a, unsigned long b ) -{ - return (b==0) ? a : PaWinWDMGCD( b, a%b); -} - - -/* This function will handle getting the cyclic buffer from a WaveRT driver. Certain WaveRT drivers needs to have -requested buffer size on multiples of 128 bytes: - -*/ -static PaError PinGetBuffer(PaWinWdmPin* pPin, void** pBuffer, DWORD* pRequestedBufSize, BOOL* pbCallMemBarrier) -{ - PaError result = paNoError; - int limit = 1000; - PA_LOGE_; - - while (1) - { - limit--; - if (limit == 0) { - PA_DEBUG(("PinGetBuffer: LOOP LIMIT REACHED\n")); - break; - } - - if (pPin->pinKsSubType != SubType_kPolled) - { - /* In case of unknown (or notification), we try both modes */ - result = PinGetBufferWithNotification(pPin, pBuffer, pRequestedBufSize, pbCallMemBarrier); - if (result == paNoError) - { - PA_DEBUG(("PinGetBuffer: SubType_kNotification\n")); - pPin->pinKsSubType = SubType_kNotification; - break; - } - } - - result = PinGetBufferWithoutNotification(pPin, pBuffer, pRequestedBufSize, pbCallMemBarrier); - if (result == paNoError) - { - PA_DEBUG(("PinGetBuffer: SubType_kPolled\n")); - pPin->pinKsSubType = SubType_kPolled; - break; - } - - /* Check if requested size is on a 128 byte boundary */ - if (((*pRequestedBufSize) % 128UL) == 0) - { - PA_DEBUG(("Buffer size on 128 byte boundary, still fails :(\n")); - /* Ok, can't do much more */ - break; - } - else - { - /* Compute LCM so we know which sizes are on a 128 byte boundary */ - const unsigned gcd = PaWinWDMGCD(128UL, pPin->ksDataFormatWfx->WaveFormatEx.nBlockAlign); - const unsigned lcm = (128UL * pPin->ksDataFormatWfx->WaveFormatEx.nBlockAlign) / gcd; - DWORD dwOldSize = *pRequestedBufSize; - - /* Align size to (next larger) LCM byte boundary, and then we try again. Note that LCM is not necessarily a - power of 2. */ - *pRequestedBufSize = ((*pRequestedBufSize + lcm - 1) / lcm) * lcm; - - PA_DEBUG(("Adjusting buffer size from %u to %u bytes (128 byte boundary, LCM=%u)\n", dwOldSize, *pRequestedBufSize, lcm)); - } - } - - PA_LOGL_; - - return result; -} - -static PaError PinRegisterPositionRegister(PaWinWdmPin* pPin) -{ - PaError result = paNoError; - KSRTAUDIO_HWREGISTER_PROPERTY propIn; - KSRTAUDIO_HWREGISTER propOut; - - PA_LOGE_; - - propIn.BaseAddress = NULL; - propIn.Property.Set = KSPROPSETID_RtAudio; - propIn.Property.Id = KSPROPERTY_RTAUDIO_POSITIONREGISTER; - propIn.Property.Flags = KSPROPERTY_TYPE_SET; - - result = WdmSyncIoctl(pPin->handle, IOCTL_KS_PROPERTY, - &propIn, - sizeof(KSRTAUDIO_HWREGISTER_PROPERTY), - &propOut, - sizeof(KSRTAUDIO_HWREGISTER), - NULL); - - if (result == paNoError) - { - pPin->positionRegister = (ULONG*)propOut.Register; - } - else - { - PA_DEBUG(("Failed to register position register\n")); - } - - PA_LOGL_; - - return result; -} - -static PaError PinRegisterNotificationHandle(PaWinWdmPin* pPin, HANDLE handle) -{ - PaError result = paNoError; - KSRTAUDIO_NOTIFICATION_EVENT_PROPERTY prop; - - PA_LOGE_; - - prop.NotificationEvent = handle; - prop.Property.Set = KSPROPSETID_RtAudio; - prop.Property.Id = KSPROPERTY_RTAUDIO_REGISTER_NOTIFICATION_EVENT; - prop.Property.Flags = KSPROPERTY_TYPE_SET; - - result = WdmSyncIoctl(pPin->handle, - IOCTL_KS_PROPERTY, - &prop, - sizeof(KSRTAUDIO_NOTIFICATION_EVENT_PROPERTY), - &prop, - sizeof(KSRTAUDIO_NOTIFICATION_EVENT_PROPERTY), - NULL); - - if (result != paNoError) { - PA_DEBUG(("Failed to register notification handle 0x%08X\n", handle)); - } - - PA_LOGL_; - - return result; -} - -static PaError PinUnregisterNotificationHandle(PaWinWdmPin* pPin, HANDLE handle) -{ - PaError result = paNoError; - KSRTAUDIO_NOTIFICATION_EVENT_PROPERTY prop; - - PA_LOGE_; - - if (handle != NULL) - { - prop.NotificationEvent = handle; - prop.Property.Set = KSPROPSETID_RtAudio; - prop.Property.Id = KSPROPERTY_RTAUDIO_UNREGISTER_NOTIFICATION_EVENT; - prop.Property.Flags = KSPROPERTY_TYPE_SET; - - result = WdmSyncIoctl(pPin->handle, - IOCTL_KS_PROPERTY, - &prop, - sizeof(KSRTAUDIO_NOTIFICATION_EVENT_PROPERTY), - &prop, - sizeof(KSRTAUDIO_NOTIFICATION_EVENT_PROPERTY), - NULL); - - if (result != paNoError) { - PA_DEBUG(("Failed to unregister notification handle 0x%08X\n", handle)); - } - } - PA_LOGL_; - - return result; -} - -static PaError PinGetHwLatency(PaWinWdmPin* pPin, ULONG* pFifoSize, ULONG* pChipsetDelay, ULONG* pCodecDelay) -{ - PaError result = paNoError; - KSPROPERTY propIn; - KSRTAUDIO_HWLATENCY propOut; - - PA_LOGE_; - - propIn.Set = KSPROPSETID_RtAudio; - propIn.Id = KSPROPERTY_RTAUDIO_HWLATENCY; - propIn.Flags = KSPROPERTY_TYPE_GET; - - result = WdmSyncIoctl(pPin->handle, IOCTL_KS_PROPERTY, - &propIn, - sizeof(KSPROPERTY), - &propOut, - sizeof(KSRTAUDIO_HWLATENCY), - NULL); - - if (result == paNoError) - { - *pFifoSize = propOut.FifoSize; - *pChipsetDelay = propOut.ChipsetDelay; - *pCodecDelay = propOut.CodecDelay; - } - else - { - PA_DEBUG(("Failed to retrieve hardware FIFO size!\n")); - } - - PA_LOGL_; - - return result; -} - -/* This one is used for WaveRT */ -static PaError PinGetAudioPositionMemoryMapped(PaWinWdmPin* pPin, ULONG* pPosition) -{ - *pPosition = (*pPin->positionRegister); - return paNoError; -} - -/* This one also, but in case the driver hasn't implemented memory mapped access to the position register */ -static PaError PinGetAudioPositionViaIOCTLRead(PaWinWdmPin* pPin, ULONG* pPosition) -{ - PaError result = paNoError; - KSPROPERTY propIn; - KSAUDIO_POSITION propOut; - - PA_LOGE_; - - propIn.Set = KSPROPSETID_Audio; - propIn.Id = KSPROPERTY_AUDIO_POSITION; - propIn.Flags = KSPROPERTY_TYPE_GET; - - result = WdmSyncIoctl(pPin->handle, - IOCTL_KS_PROPERTY, - &propIn, sizeof(KSPROPERTY), - &propOut, sizeof(KSAUDIO_POSITION), - NULL); - - if (result == paNoError) - { - *pPosition = (ULONG)(propOut.PlayOffset); - } - else - { - PA_DEBUG(("Failed to get audio play position!\n")); - } - - PA_LOGL_; - - return result; - -} - -/* This one also, but in case the driver hasn't implemented memory mapped access to the position register */ -static PaError PinGetAudioPositionViaIOCTLWrite(PaWinWdmPin* pPin, ULONG* pPosition) -{ - PaError result = paNoError; - KSPROPERTY propIn; - KSAUDIO_POSITION propOut; - - PA_LOGE_; - - propIn.Set = KSPROPSETID_Audio; - propIn.Id = KSPROPERTY_AUDIO_POSITION; - propIn.Flags = KSPROPERTY_TYPE_GET; - - result = WdmSyncIoctl(pPin->handle, - IOCTL_KS_PROPERTY, - &propIn, sizeof(KSPROPERTY), - &propOut, sizeof(KSAUDIO_POSITION), - NULL); - - if (result == paNoError) - { - *pPosition = (ULONG)(propOut.WriteOffset); - } - else - { - PA_DEBUG(("Failed to get audio write position!\n")); - } - - PA_LOGL_; - - return result; - -} - -/***********************************************************************************************/ - -/** -* Create a new filter object. -*/ -static PaWinWdmFilter* FilterNew( PaWDMKSType type, DWORD devNode, const wchar_t* filterName, const wchar_t* friendlyName, PaError* error ) -{ - PaWinWdmFilter* filter = 0; - PaError result; - - /* Allocate the new filter object */ - filter = (PaWinWdmFilter*)PaUtil_AllocateMemory( sizeof(PaWinWdmFilter) ); - if( !filter ) - { - result = paInsufficientMemory; - goto error; - } - - PA_DEBUG(("FilterNew: Creating filter '%S'\n", friendlyName)); - - /* Set type flag */ - filter->devInfo.streamingType = type; - - /* Store device node */ - filter->deviceNode = devNode; - - /* Zero the filter object - done by AllocateMemory */ - /* memset( (void*)filter, 0, sizeof(PaWinWdmFilter) ); */ - - /* Copy the filter name */ - wcsncpy(filter->devInfo.filterPath, filterName, MAX_PATH); - - /* Copy the friendly name */ - wcsncpy(filter->friendlyName, friendlyName, MAX_PATH); - - PA_DEBUG(("FilterNew: Opening filter...\n", friendlyName)); - - /* Open the filter handle */ - result = FilterUse(filter); - if( result != paNoError ) - { - goto error; - } - - /* Get pin count */ - result = WdmGetPinPropertySimple - ( - filter->handle, - 0, - &KSPROPSETID_Pin, - KSPROPERTY_PIN_CTYPES, - &filter->pinCount, - sizeof(filter->pinCount), - NULL); - - if( result != paNoError) - { - goto error; - } - - /* Get connections & nodes for filter */ - result = WdmGetPropertyMulti( - filter->handle, - &KSPROPSETID_Topology, - KSPROPERTY_TOPOLOGY_CONNECTIONS, - &filter->connections); - - if( result != paNoError) - { - goto error; - } - - result = WdmGetPropertyMulti( - filter->handle, - &KSPROPSETID_Topology, - KSPROPERTY_TOPOLOGY_NODES, - &filter->nodes); - - if( result != paNoError) - { - goto error; - } - - /* For debugging purposes */ - DumpConnectionsAndNodes(filter); - - /* Get product GUID (it might not be supported) */ - { - KSCOMPONENTID compId; - if (WdmGetPropertySimple(filter->handle, &KSPROPSETID_General, KSPROPERTY_GENERAL_COMPONENTID, &compId, sizeof(KSCOMPONENTID)) == paNoError) - { - filter->devInfo.deviceProductGuid = compId.Product; - } - } - - /* This section is not executed for topology filters */ - if (type != Type_kNotUsed) - { - /* Initialize the pins */ - result = FilterInitializePins(filter); - - if( result != paNoError) - { - goto error; - } - } - - /* Close the filter handle for now - * It will be opened later when needed */ - FilterRelease(filter); - - *error = paNoError; - return filter; - -error: - PA_DEBUG(("FilterNew: Error %d\n", result)); - /* - Error cleanup - */ - FilterFree(filter); - - *error = result; - return NULL; -} - -/** -* Add reference to filter -*/ -static void FilterAddRef( PaWinWdmFilter* filter ) -{ - if (filter != 0) - { - filter->filterRefCount++; - } -} - - -/** -* Initialize the pins of the filter. This is separated from FilterNew because this might fail if there is another -* process using the pin(s). -*/ -PaError FilterInitializePins( PaWinWdmFilter* filter ) -{ - PaError result = paNoError; - int pinId; - - if (filter->devInfo.streamingType == Type_kNotUsed) - return paNoError; - - if (filter->pins != NULL) - return paNoError; - - /* Allocate pointer array to hold the pins */ - filter->pins = (PaWinWdmPin**)PaUtil_AllocateMemory( sizeof(PaWinWdmPin*) * filter->pinCount ); - if( !filter->pins ) - { - result = paInsufficientMemory; - goto error; - } - - /* Create all the pins we can */ - for(pinId = 0; pinId < filter->pinCount; pinId++) - { - /* Create the pin with this Id */ - PaWinWdmPin* newPin; - newPin = PinNew(filter, pinId, &result); - if( result == paInsufficientMemory ) - goto error; - if( newPin != NULL ) - { - filter->pins[pinId] = newPin; - ++filter->validPinCount; - } - else - { - filter->pins[pinId] = 0; - } - } - - if (filter->validPinCount == 0) - { - result = paDeviceUnavailable; - goto error; - } - - return paNoError; - -error: - - if (filter->pins) - { - for (pinId = 0; pinId < filter->pinCount; ++pinId) - { - if (filter->pins[pinId]) - { - PinFree(filter->pins[pinId]); - filter->pins[pinId] = 0; - } - } - PaUtil_FreeMemory( filter->pins ); - filter->pins = 0; - } - - return result; -} - - -/** -* Free a previously created filter -*/ -static void FilterFree(PaWinWdmFilter* filter) -{ - PA_LOGL_; - if( filter ) - { - if (--filter->filterRefCount > 0) - { - /* Ok, a stream has a ref count to this filter */ - return; - } - - if ( filter->topologyFilter ) - { - FilterFree(filter->topologyFilter); - filter->topologyFilter = 0; - } - if ( filter->pins ) - { - int pinId; - for( pinId = 0; pinId < filter->pinCount; pinId++ ) - PinFree(filter->pins[pinId]); - PaUtil_FreeMemory( filter->pins ); - filter->pins = 0; - } - if( filter->connections ) - { - PaUtil_FreeMemory(filter->connections); - filter->connections = 0; - } - if( filter->nodes ) - { - PaUtil_FreeMemory(filter->nodes); - filter->nodes = 0; - } - if( filter->handle ) - CloseHandle( filter->handle ); - PaUtil_FreeMemory( filter ); - } - PA_LOGE_; -} - -/** -* Reopen the filter handle if necessary so it can be used -**/ -static PaError FilterUse(PaWinWdmFilter* filter) -{ - assert( filter ); - - PA_LOGE_; - if( filter->handle == NULL ) - { - /* Open the filter */ - filter->handle = CreateFileW( - filter->devInfo.filterPath, - GENERIC_READ | GENERIC_WRITE, - 0, - NULL, - OPEN_EXISTING, - FILE_ATTRIBUTE_NORMAL | FILE_FLAG_OVERLAPPED, - NULL); - - if( filter->handle == NULL ) - { - return paDeviceUnavailable; - } - } - filter->usageCount++; - PA_LOGL_; - return paNoError; -} - -/** -* Release the filter handle if nobody is using it -**/ -static void FilterRelease(PaWinWdmFilter* filter) -{ - assert( filter ); - assert( filter->usageCount > 0 ); - - PA_LOGE_; - /* Check first topology filter, if used */ - if (filter->topologyFilter != NULL && filter->topologyFilter->handle != NULL) - { - FilterRelease(filter->topologyFilter); - } - - filter->usageCount--; - if( filter->usageCount == 0 ) - { - if( filter->handle != NULL ) - { - CloseHandle( filter->handle ); - filter->handle = NULL; - } - } - PA_LOGL_; -} - -/** -* Create a render or playback pin using the supplied format -**/ -static PaWinWdmPin* FilterCreatePin(PaWinWdmFilter* filter, - int pinId, - const WAVEFORMATEX* wfex, - PaError* error) -{ - PaError result = paNoError; - PaWinWdmPin* pin = NULL; - assert( filter ); - assert( pinId < filter->pinCount ); - pin = filter->pins[pinId]; - assert( pin ); - result = PinSetFormat(pin,wfex); - if( result == paNoError ) - { - result = PinInstantiate(pin); - } - *error = result; - return result == paNoError ? pin : 0; -} - -static const wchar_t kUsbPrefix[] = L"\\\\?\\USB"; - -static BOOL IsUSBDevice(const wchar_t* devicePath) -{ - /* Alex Lessard pointed out that different devices might present the device path with - lower case letters. */ - return (_wcsnicmp(devicePath, kUsbPrefix, sizeof(kUsbPrefix)/sizeof(kUsbPrefix[0]) ) == 0); -} - -/* This should make it more language tolerant, I hope... */ -static const wchar_t kUsbNamePrefix[] = L"USB Audio"; - -static BOOL IsNameUSBAudioDevice(const wchar_t* friendlyName) -{ - return (_wcsnicmp(friendlyName, kUsbNamePrefix, sizeof(kUsbNamePrefix)/sizeof(kUsbNamePrefix[0])) == 0); -} - -typedef enum _tag_EAlias -{ - Alias_kRender = (1<<0), - Alias_kCapture = (1<<1), - Alias_kRealtime = (1<<2), -} EAlias; - -/* Trim whitespace from string */ -static void TrimString(wchar_t* str, size_t length) -{ - wchar_t* s = str; - wchar_t* e = 0; - - /* Find start of string */ - while (iswspace(*s)) ++s; - e=s+min(length,wcslen(s))-1; - - /* Find end of string */ - while(e>s && iswspace(*e)) --e; - ++e; - - length = e - s; - memmove(str, s, length * sizeof(wchar_t)); - str[length] = 0; -} - -/** -* Build the list of available filters -* Use the SetupDi API to enumerate all devices in the KSCATEGORY_AUDIO which -* have a KSCATEGORY_RENDER or KSCATEGORY_CAPTURE alias. For each of these -* devices initialise a PaWinWdmFilter structure by calling our NewFilter() -* function. We enumerate devices twice, once to count how many there are, -* and once to initialize the PaWinWdmFilter structures. -* -* Vista and later: Also check KSCATEGORY_REALTIME for WaveRT devices. -*/ -//PaError BuildFilterList( PaWinWdmHostApiRepresentation* wdmHostApi, int* noOfPaDevices ) -PaWinWdmFilter** BuildFilterList( int* pFilterCount, int* pNoOfPaDevices, PaError* pResult ) -{ - PaWinWdmFilter** ppFilters = NULL; - HDEVINFO handle = NULL; - int device; - int invalidDevices; - int slot; - SP_DEVICE_INTERFACE_DATA interfaceData; - SP_DEVICE_INTERFACE_DATA aliasData; - SP_DEVINFO_DATA devInfoData; - int noError; - const int sizeInterface = sizeof(SP_DEVICE_INTERFACE_DETAIL_DATA) + (MAX_PATH * sizeof(WCHAR)); - unsigned char interfaceDetailsArray[sizeof(SP_DEVICE_INTERFACE_DETAIL_DATA) + (MAX_PATH * sizeof(WCHAR))]; - SP_DEVICE_INTERFACE_DETAIL_DATA_W* devInterfaceDetails = (SP_DEVICE_INTERFACE_DETAIL_DATA_W*)interfaceDetailsArray; - const GUID* category = (const GUID*)&KSCATEGORY_AUDIO; - const GUID* alias_render = (const GUID*)&KSCATEGORY_RENDER; - const GUID* alias_capture = (const GUID*)&KSCATEGORY_CAPTURE; - const GUID* category_realtime = (const GUID*)&KSCATEGORY_REALTIME; - DWORD aliasFlags; - PaWDMKSType streamingType; - int filterCount = 0; - int noOfPaDevices = 0; - - PA_LOGE_; - - assert(pFilterCount != NULL); - assert(pNoOfPaDevices != NULL); - assert(pResult != NULL); - - devInterfaceDetails->cbSize = sizeof(SP_DEVICE_INTERFACE_DETAIL_DATA_W); - *pFilterCount = 0; - *pNoOfPaDevices = 0; - - /* Open a handle to search for devices (filters) */ - handle = SetupDiGetClassDevs(category,NULL,NULL,DIGCF_PRESENT | DIGCF_DEVICEINTERFACE); - if( handle == INVALID_HANDLE_VALUE ) - { - *pResult = paUnanticipatedHostError; - return NULL; - } - PA_DEBUG(("Setup called\n")); - - /* First let's count the number of devices so we can allocate a list */ - invalidDevices = 0; - for( device = 0;;device++ ) - { - interfaceData.cbSize = sizeof(SP_DEVICE_INTERFACE_DATA); - interfaceData.Reserved = 0; - aliasData.cbSize = sizeof(SP_DEVICE_INTERFACE_DATA); - aliasData.Reserved = 0; - noError = SetupDiEnumDeviceInterfaces(handle,NULL,category,device,&interfaceData); - PA_DEBUG(("Enum called\n")); - if( !noError ) - break; /* No more devices */ - - /* Check this one has the render or capture alias */ - aliasFlags = 0; - noError = SetupDiGetDeviceInterfaceAlias(handle,&interfaceData,alias_render,&aliasData); - PA_DEBUG(("noError = %d\n",noError)); - if(noError) - { - if(aliasData.Flags && (!(aliasData.Flags & SPINT_REMOVED))) - { - PA_DEBUG(("Device %d has render alias\n",device)); - aliasFlags |= Alias_kRender; /* Has render alias */ - } - else - { - PA_DEBUG(("Device %d has no render alias\n",device)); - } - } - noError = SetupDiGetDeviceInterfaceAlias(handle,&interfaceData,alias_capture,&aliasData); - if(noError) - { - if(aliasData.Flags && (!(aliasData.Flags & SPINT_REMOVED))) - { - PA_DEBUG(("Device %d has capture alias\n",device)); - aliasFlags |= Alias_kCapture; /* Has capture alias */ - } - else - { - PA_DEBUG(("Device %d has no capture alias\n",device)); - } - } - if(!aliasFlags) - invalidDevices++; /* This was not a valid capture or render audio device */ - } - /* Remember how many there are */ - filterCount = device-invalidDevices; - - PA_DEBUG(("Interfaces found: %d\n",device-invalidDevices)); - - /* Now allocate the list of pointers to devices */ - ppFilters = (PaWinWdmFilter**)PaUtil_AllocateMemory( sizeof(PaWinWdmFilter*) * filterCount); - if( ppFilters == 0 ) - { - if(handle != NULL) - SetupDiDestroyDeviceInfoList(handle); - *pResult = paInsufficientMemory; - return NULL; - } - - /* Now create filter objects for each interface found */ - slot = 0; - for( device = 0;;device++ ) - { - interfaceData.cbSize = sizeof(SP_DEVICE_INTERFACE_DATA); - interfaceData.Reserved = 0; - aliasData.cbSize = sizeof(SP_DEVICE_INTERFACE_DATA); - aliasData.Reserved = 0; - devInfoData.cbSize = sizeof(SP_DEVINFO_DATA); - devInfoData.Reserved = 0; - streamingType = Type_kWaveCyclic; - - noError = SetupDiEnumDeviceInterfaces(handle,NULL,category,device,&interfaceData); - if( !noError ) - break; /* No more devices */ - - /* Check this one has the render or capture alias */ - aliasFlags = 0; - noError = SetupDiGetDeviceInterfaceAlias(handle,&interfaceData,alias_render,&aliasData); - if(noError) - { - if(aliasData.Flags && (!(aliasData.Flags & SPINT_REMOVED))) - { - PA_DEBUG(("Device %d has render alias\n",device)); - aliasFlags |= Alias_kRender; /* Has render alias */ - } - } - noError = SetupDiGetDeviceInterfaceAlias(handle,&interfaceData,alias_capture,&aliasData); - if(noError) - { - if(aliasData.Flags && (!(aliasData.Flags & SPINT_REMOVED))) - { - PA_DEBUG(("Device %d has capture alias\n",device)); - aliasFlags |= Alias_kCapture; /* Has capture alias */ - } - } - if(!aliasFlags) - { - continue; /* This was not a valid capture or render audio device */ - } - else - { - /* Check if filter is WaveRT, if not it is a WaveCyclic */ - noError = SetupDiGetDeviceInterfaceAlias(handle,&interfaceData,category_realtime,&aliasData); - if (noError) - { - PA_DEBUG(("Device %d has realtime alias\n",device)); - aliasFlags |= Alias_kRealtime; - streamingType = Type_kWaveRT; - } - } - - noError = SetupDiGetDeviceInterfaceDetailW(handle,&interfaceData,devInterfaceDetails,sizeInterface,NULL,&devInfoData); - if( noError ) - { - DWORD type; - WCHAR friendlyName[MAX_PATH] = {0}; - DWORD sizeFriendlyName; - PaWinWdmFilter* newFilter = 0; - - PaError result = paNoError; - /* Try to get the "friendly name" for this interface */ - sizeFriendlyName = sizeof(friendlyName); - - if (IsEarlierThanVista() && IsUSBDevice(devInterfaceDetails->DevicePath)) - { - /* XP and USB audio device needs to look elsewhere, otherwise it'll only be a "USB Audio Device". Not - very literate. */ - if (!SetupDiGetDeviceRegistryPropertyW(handle, - &devInfoData, - SPDRP_LOCATION_INFORMATION, - &type, - (BYTE*)friendlyName, - sizeof(friendlyName), - NULL)) - { - friendlyName[0] = 0; - } - } - - if (friendlyName[0] == 0 || IsNameUSBAudioDevice(friendlyName)) - { - /* Fix contributed by Ben Allison - * Removed KEY_SET_VALUE from flags on following call - * as its causes failure when running without admin rights - * and it was not required */ - HKEY hkey=SetupDiOpenDeviceInterfaceRegKey(handle,&interfaceData,0,KEY_QUERY_VALUE); - if(hkey!=INVALID_HANDLE_VALUE) - { - noError = RegQueryValueExW(hkey,L"FriendlyName",0,&type,(BYTE*)friendlyName,&sizeFriendlyName); - if( noError == ERROR_SUCCESS ) - { - PA_DEBUG(("Interface %d, Name: %s\n",device,friendlyName)); - RegCloseKey(hkey); - } - else - { - friendlyName[0] = 0; - } - } - } - - TrimString(friendlyName, sizeFriendlyName); - - newFilter = FilterNew(streamingType, - devInfoData.DevInst, - devInterfaceDetails->DevicePath, - friendlyName, - &result); - - if( result == paNoError ) - { - int pin; - unsigned filterIOs = 0; - - /* Increment number of "devices" */ - for (pin = 0; pin < newFilter->pinCount; ++pin) - { - PaWinWdmPin* pPin = newFilter->pins[pin]; - if (pPin == NULL) - continue; - - filterIOs += max(1, pPin->inputCount); - } - - noOfPaDevices += filterIOs; - - PA_DEBUG(("Filter (%s) created with %d valid pins (total I/Os: %u)\n", ((newFilter->devInfo.streamingType==Type_kWaveRT)?"WaveRT":"WaveCyclic"), newFilter->validPinCount, filterIOs)); - - assert(slot < filterCount); - - ppFilters[slot] = newFilter; - - slot++; - } - else - { - PA_DEBUG(("Filter NOT created\n")); - /* As there are now less filters than we initially thought - * we must reduce the count by one */ - filterCount--; - } - } - } - - /* Clean up */ - if(handle != NULL) - SetupDiDestroyDeviceInfoList(handle); - - *pFilterCount = filterCount; - *pNoOfPaDevices = noOfPaDevices; - - return ppFilters; -} - -typedef struct PaNameHashIndex -{ - unsigned index; - unsigned count; - ULONG hash; - struct PaNameHashIndex *next; -} PaNameHashIndex; - -typedef struct PaNameHashObject -{ - PaNameHashIndex* list; - PaUtilAllocationGroup* allocGroup; -} PaNameHashObject; - -static ULONG GetNameHash(const wchar_t* str, const BOOL input) -{ - /* This is to make sure that a name that exists as both input & output won't get the same hash value */ - const ULONG fnv_prime = (input ? 0x811C9DD7 : 0x811FEB0B); - ULONG hash = 0; - for(; *str != 0; str++) - { - hash *= fnv_prime; - hash ^= (*str); - } - assert(hash != 0); - return hash; -} - -static PaError CreateHashEntry(PaNameHashObject* obj, const wchar_t* name, const BOOL input) -{ - ULONG hash = GetNameHash(name, input); - PaNameHashIndex * pLast = NULL; - PaNameHashIndex * p = obj->list; - while (p != 0) - { - if (p->hash == hash) - { - break; - } - pLast = p; - p = p->next; - } - if (p == NULL) - { - p = (PaNameHashIndex*)PaUtil_GroupAllocateMemory(obj->allocGroup, sizeof(PaNameHashIndex)); - if (p == NULL) - { - return paInsufficientMemory; - } - p->hash = hash; - p->count = 1; - if (pLast != 0) - { - assert(pLast->next == 0); - pLast->next = p; - } - if (obj->list == 0) - { - obj->list = p; - } - } - else - { - ++p->count; - } - return paNoError; -} - -static PaError InitNameHashObject(PaNameHashObject* obj, PaWinWdmFilter* pFilter) -{ - int i; - - obj->allocGroup = PaUtil_CreateAllocationGroup(); - if (obj->allocGroup == NULL) - { - return paInsufficientMemory; - } - - for (i = 0; i < pFilter->pinCount; ++i) - { - unsigned m; - PaWinWdmPin* pin = pFilter->pins[i]; - - if (pin == NULL) - continue; - - for (m = 0; m < max(1, pin->inputCount); ++m) - { - const BOOL isInput = (pin->dataFlow == KSPIN_DATAFLOW_OUT); - const wchar_t* name = (pin->inputs == NULL) ? pin->friendlyName : pin->inputs[m]->friendlyName; - - PaError result = CreateHashEntry(obj, name, isInput); - - if (result != paNoError) - { - return result; - } - } - } - return paNoError; -} - -static void DeinitNameHashObject(PaNameHashObject* obj) -{ - assert(obj != 0); - PaUtil_FreeAllAllocations(obj->allocGroup); - PaUtil_DestroyAllocationGroup(obj->allocGroup); - memset(obj, 0, sizeof(PaNameHashObject)); -} - -static unsigned GetNameIndex(PaNameHashObject* obj, const wchar_t* name, const BOOL input) -{ - ULONG hash = GetNameHash(name, input); - PaNameHashIndex* p = obj->list; - while (p != NULL) - { - if (p->hash == hash) - { - if (p->count > 1) - { - return (++p->index); - } - else - { - return 0; - } - } - - p = p->next; - } - // Should never get here!! - assert(FALSE); - return 0; -} - -static PaError ScanDeviceInfos( struct PaUtilHostApiRepresentation *hostApi, PaHostApiIndex hostApiIndex, void **scanResults, int *newDeviceCount ) -{ - PaWinWdmHostApiRepresentation *wdmHostApi = (PaWinWdmHostApiRepresentation*)hostApi; - PaError result = paNoError; - PaWinWdmFilter** ppFilters = 0; - PaWinWDMScanDeviceInfosResults *outArgument = 0; - int filterCount = 0; - int totalDeviceCount = 0; - int idxDevice = 0; - DWORD defaultInDevPathSize = 0; - DWORD defaultOutDevPathSize = 0; - wchar_t* defaultInDevPath = 0; - wchar_t* defaultOutDevPath = 0; - - ppFilters = BuildFilterList( &filterCount, &totalDeviceCount, &result ); - if( result != paNoError ) - { - goto error; - } - - // Get hold of default device paths for capture & playback - if( waveInMessage(0, DRV_QUERYDEVICEINTERFACESIZE, (DWORD_PTR)&defaultInDevPathSize, 0 ) == MMSYSERR_NOERROR ) - { - defaultInDevPath = (wchar_t *)PaUtil_AllocateMemory((defaultInDevPathSize + 1) * sizeof(wchar_t)); - waveInMessage(0, DRV_QUERYDEVICEINTERFACE, (DWORD_PTR)defaultInDevPath, defaultInDevPathSize); - } - if( waveOutMessage(0, DRV_QUERYDEVICEINTERFACESIZE, (DWORD_PTR)&defaultOutDevPathSize, 0 ) == MMSYSERR_NOERROR ) - { - defaultOutDevPath = (wchar_t *)PaUtil_AllocateMemory((defaultOutDevPathSize + 1) * sizeof(wchar_t)); - waveOutMessage(0, DRV_QUERYDEVICEINTERFACE, (DWORD_PTR)defaultOutDevPath, defaultOutDevPathSize); - } - - if( totalDeviceCount > 0 ) - { - PaWinWdmDeviceInfo *deviceInfoArray = 0; - int idxFilter; - int i; - unsigned devIsDefaultIn = 0, devIsDefaultOut = 0; - - /* Allocate the out param for all the info we need */ - outArgument = (PaWinWDMScanDeviceInfosResults *) PaUtil_GroupAllocateMemory( - wdmHostApi->allocations, sizeof(PaWinWDMScanDeviceInfosResults) ); - if( !outArgument ) - { - result = paInsufficientMemory; - goto error; - } - - outArgument->defaultInputDevice = paNoDevice; - outArgument->defaultOutputDevice = paNoDevice; - - outArgument->deviceInfos = (PaDeviceInfo**)PaUtil_GroupAllocateMemory( - wdmHostApi->allocations, sizeof(PaDeviceInfo*) * totalDeviceCount ); - if( !outArgument->deviceInfos ) - { - result = paInsufficientMemory; - goto error; - } - - /* allocate all device info structs in a contiguous block */ - deviceInfoArray = (PaWinWdmDeviceInfo*)PaUtil_GroupAllocateMemory( - wdmHostApi->allocations, sizeof(PaWinWdmDeviceInfo) * totalDeviceCount ); - if( !deviceInfoArray ) - { - result = paInsufficientMemory; - goto error; - } - - /* Make sure all items in array */ - for( i = 0 ; i < totalDeviceCount; ++i ) - { - PaDeviceInfo *deviceInfo = &deviceInfoArray[i].inheritedDeviceInfo; - deviceInfo->structVersion = 2; - deviceInfo->hostApi = hostApiIndex; - deviceInfo->name = 0; - outArgument->deviceInfos[ i ] = deviceInfo; - } - - idxDevice = 0; - for (idxFilter = 0; idxFilter < filterCount; ++idxFilter) - { - PaNameHashObject nameHash = {0}; - PaWinWdmFilter* pFilter = ppFilters[idxFilter]; - if( pFilter == NULL ) - continue; - - if (InitNameHashObject(&nameHash, pFilter) != paNoError) - { - DeinitNameHashObject(&nameHash); - continue; - } - - devIsDefaultIn = (defaultInDevPath && (_wcsicmp(pFilter->devInfo.filterPath, defaultInDevPath) == 0)); - devIsDefaultOut = (defaultOutDevPath && (_wcsicmp(pFilter->devInfo.filterPath, defaultOutDevPath) == 0)); - - for (i = 0; i < pFilter->pinCount; ++i) - { - unsigned m; - ULONG nameIndex = 0; - ULONG nameIndexHash = 0; - PaWinWdmPin* pin = pFilter->pins[i]; - - if (pin == NULL) - continue; - - for (m = 0; m < max(1, pin->inputCount); ++m) - { - PaWinWdmDeviceInfo *wdmDeviceInfo = (PaWinWdmDeviceInfo *)outArgument->deviceInfos[idxDevice]; - PaDeviceInfo *deviceInfo = &wdmDeviceInfo->inheritedDeviceInfo; - wchar_t localCompositeName[MAX_PATH]; - unsigned nameIndex = 0; - const BOOL isInput = (pin->dataFlow == KSPIN_DATAFLOW_OUT); - - wdmDeviceInfo->filter = pFilter; - - deviceInfo->structVersion = 2; - deviceInfo->hostApi = hostApiIndex; - deviceInfo->name = wdmDeviceInfo->compositeName; - /* deviceInfo->hostApiSpecificDeviceInfo = &pFilter->devInfo; */ - - wdmDeviceInfo->pin = pin->pinId; - - /* Get the name of the "device" */ - if (pin->inputs == NULL) - { - wcsncpy(localCompositeName, pin->friendlyName, MAX_PATH); - wdmDeviceInfo->muxPosition = -1; - wdmDeviceInfo->endpointPinId = pin->endpointPinId; - } - else - { - PaWinWdmMuxedInput* input = pin->inputs[m]; - wcsncpy(localCompositeName, input->friendlyName, MAX_PATH); - wdmDeviceInfo->muxPosition = (int)m; - wdmDeviceInfo->endpointPinId = input->endpointPinId; - } - - { - /* Get base length */ - size_t n = wcslen(localCompositeName); - - /* Check if there are more entries with same name (which might very well be the case), if there - are, the name will be postfixed with an index. */ - nameIndex = GetNameIndex(&nameHash, localCompositeName, isInput); - if (nameIndex > 0) - { - /* This name has multiple instances, so we post fix with a number */ - n += _snwprintf(localCompositeName + n, MAX_PATH - n, L" %u", nameIndex); - } - /* Postfix with filter name */ - _snwprintf(localCompositeName + n, MAX_PATH - n, L" (%s)", pFilter->friendlyName); - } - - /* Convert wide char string to utf-8 */ - WideCharToMultiByte(CP_UTF8, 0, localCompositeName, -1, wdmDeviceInfo->compositeName, MAX_PATH, NULL, NULL); - - /* NB! WDM/KS has no concept of a full-duplex device, each pin is either an input or an output */ - if (isInput) - { - /* INPUT ! */ - deviceInfo->maxInputChannels = pin->maxChannels; - deviceInfo->maxOutputChannels = 0; - - /* RoBi NB: Due to the fact that input audio endpoints in Vista (& later OSs) can be the same device, but with - different input mux settings, there might be a discrepancy between the default input device chosen, and - that which will be used by Portaudio. Not much to do about that unfortunately. - */ - if ((defaultInDevPath == 0 || devIsDefaultIn) && - outArgument->defaultInputDevice == paNoDevice) - { - outArgument->defaultInputDevice = idxDevice; - } - } - else - { - /* OUTPUT ! */ - deviceInfo->maxInputChannels = 0; - deviceInfo->maxOutputChannels = pin->maxChannels; - - if ((defaultOutDevPath == 0 || devIsDefaultOut) && - outArgument->defaultOutputDevice == paNoDevice) - { - outArgument->defaultOutputDevice = idxDevice; - } - } - - /* These low values are not very useful because - * a) The lowest latency we end up with can depend on many factors such - * as the device buffer sizes/granularities, sample rate, channels and format - * b) We cannot know the device buffer sizes until we try to open/use it at - * a particular setting - * So: we give 512x48000Hz frames as the default low input latency - **/ - switch (pFilter->devInfo.streamingType) - { - case Type_kWaveCyclic: - if (IsEarlierThanVista()) - { - /* XP doesn't tolerate low latency, unless the Process Priority Class is set to REALTIME_PRIORITY_CLASS - through SetPriorityClass, then 10 ms is quite feasible. However, one should then bear in mind that ALL of - the process is running in REALTIME_PRIORITY_CLASS, which might not be appropriate for an application with - a GUI . In this case it is advisable to separate the audio engine in another process and use IPC to communicate - with it. */ - deviceInfo->defaultLowInputLatency = 0.02; - deviceInfo->defaultLowOutputLatency = 0.02; - } - else - { - /* This is a conservative estimate. Most WaveCyclic drivers will limit the available latency, but f.i. my Edirol - PCR-A30 can reach 3 ms latency easily... */ - deviceInfo->defaultLowInputLatency = 0.01; - deviceInfo->defaultLowOutputLatency = 0.01; - } - deviceInfo->defaultHighInputLatency = (4096.0/48000.0); - deviceInfo->defaultHighOutputLatency = (4096.0/48000.0); - deviceInfo->defaultSampleRate = (double)(pin->defaultSampleRate); - break; - case Type_kWaveRT: - /* This is also a conservative estimate, based on WaveRT polled mode. In polled mode, the latency will be dictated - by the buffer size given by the driver. */ - deviceInfo->defaultLowInputLatency = 0.01; - deviceInfo->defaultLowOutputLatency = 0.01; - deviceInfo->defaultHighInputLatency = 0.04; - deviceInfo->defaultHighOutputLatency = 0.04; - deviceInfo->defaultSampleRate = (double)(pin->defaultSampleRate); - break; - default: - assert(0); - break; - } - - /* Add reference to filter */ - FilterAddRef(wdmDeviceInfo->filter); - - assert(idxDevice < totalDeviceCount); - ++idxDevice; - } - } - - /* If no one has add ref'd the filter, drop it */ - if (pFilter->filterRefCount == 0) - { - FilterFree(pFilter); - } - - /* Deinitialize name hash object */ - DeinitNameHashObject(&nameHash); - } - } - - *scanResults = outArgument; - *newDeviceCount = idxDevice; - return result; - -error: - result = DisposeDeviceInfos(hostApi, outArgument, totalDeviceCount); - - return result; -} - -static PaError CommitDeviceInfos( struct PaUtilHostApiRepresentation *hostApi, PaHostApiIndex index, void *scanResults, int deviceCount ) -{ - PaWinWdmHostApiRepresentation *wdmHostApi = (PaWinWdmHostApiRepresentation*)hostApi; - - hostApi->info.deviceCount = 0; - hostApi->info.defaultInputDevice = paNoDevice; - hostApi->info.defaultOutputDevice = paNoDevice; - - /* Free any old memory which might be in the device info */ - if( hostApi->deviceInfos ) - { - PaWinWDMScanDeviceInfosResults* localScanResults = (PaWinWDMScanDeviceInfosResults*)PaUtil_GroupAllocateMemory( - wdmHostApi->allocations, sizeof(PaWinWDMScanDeviceInfosResults)); - localScanResults->deviceInfos = hostApi->deviceInfos; - - DisposeDeviceInfos(hostApi, &localScanResults, hostApi->info.deviceCount); - - hostApi->deviceInfos = NULL; - } - - if( scanResults != NULL ) - { - PaWinWDMScanDeviceInfosResults *scanDeviceInfosResults = ( PaWinWDMScanDeviceInfosResults * ) scanResults; - - if( deviceCount > 0 ) - { - /* use the array allocated in ScanDeviceInfos() as our deviceInfos */ - hostApi->deviceInfos = scanDeviceInfosResults->deviceInfos; - - hostApi->info.defaultInputDevice = scanDeviceInfosResults->defaultInputDevice; - hostApi->info.defaultOutputDevice = scanDeviceInfosResults->defaultOutputDevice; - - hostApi->info.deviceCount = deviceCount; - } - - PaUtil_GroupFreeMemory( wdmHostApi->allocations, scanDeviceInfosResults ); - } - - return paNoError; - -} - -static PaError DisposeDeviceInfos( struct PaUtilHostApiRepresentation *hostApi, void *scanResults, int deviceCount ) -{ - PaWinWdmHostApiRepresentation *winDsHostApi = (PaWinWdmHostApiRepresentation*)hostApi; - - if( scanResults != NULL ) - { - PaWinWDMScanDeviceInfosResults *scanDeviceInfosResults = ( PaWinWDMScanDeviceInfosResults * ) scanResults; - - if( scanDeviceInfosResults->deviceInfos ) - { - int i; - for (i = 0; i < deviceCount; ++i) - { - PaWinWdmDeviceInfo* pDevice = (PaWinWdmDeviceInfo*)scanDeviceInfosResults->deviceInfos[i]; - if (pDevice->filter != 0) - { - FilterFree(pDevice->filter); - } - } - - PaUtil_GroupFreeMemory( winDsHostApi->allocations, scanDeviceInfosResults->deviceInfos[0] ); /* all device info structs are allocated in a block so we can destroy them here */ - PaUtil_GroupFreeMemory( winDsHostApi->allocations, scanDeviceInfosResults->deviceInfos ); - } - - PaUtil_GroupFreeMemory( winDsHostApi->allocations, scanDeviceInfosResults ); - } - - return paNoError; - -} - -PaError PaWinWdm_Initialize( PaUtilHostApiRepresentation **hostApi, PaHostApiIndex hostApiIndex ) -{ - PaError result = paNoError; - int deviceCount = 0; - void *scanResults = 0; - PaWinWdmHostApiRepresentation *wdmHostApi = NULL; - - PA_LOGE_; - -#ifdef PA_WDMKS_SET_TREF - tRef = PaUtil_GetTime(); -#endif - - /* - Attempt to load the KSUSER.DLL without which we cannot create pins - We will unload this on termination - */ - if(DllKsUser == NULL) - { - DllKsUser = LoadLibrary(TEXT("ksuser.dll")); - if(DllKsUser == NULL) - goto error; - } - FunctionKsCreatePin = (KSCREATEPIN*)GetProcAddress(DllKsUser, "KsCreatePin"); - if(FunctionKsCreatePin == NULL) - goto error; - - /* Attempt to load AVRT.DLL, if we can't, then we'll just use time critical prio instead... */ - if(paWinWDMKSAvRtEntryPoints.hInstance == NULL) - { - paWinWDMKSAvRtEntryPoints.hInstance = LoadLibrary(TEXT("avrt.dll")); - if (paWinWDMKSAvRtEntryPoints.hInstance != NULL) - { - paWinWDMKSAvRtEntryPoints.AvSetMmThreadCharacteristics = - (HANDLE(WINAPI*)(LPCSTR,LPDWORD))GetProcAddress(paWinWDMKSAvRtEntryPoints.hInstance,"AvSetMmThreadCharacteristicsA"); - paWinWDMKSAvRtEntryPoints.AvRevertMmThreadCharacteristics = - (BOOL(WINAPI*)(HANDLE))GetProcAddress(paWinWDMKSAvRtEntryPoints.hInstance, "AvRevertMmThreadCharacteristics"); - paWinWDMKSAvRtEntryPoints.AvSetMmThreadPriority = - (BOOL(WINAPI*)(HANDLE,PA_AVRT_PRIORITY))GetProcAddress(paWinWDMKSAvRtEntryPoints.hInstance, "AvSetMmThreadPriority"); - } - } - - wdmHostApi = (PaWinWdmHostApiRepresentation*)PaUtil_AllocateMemory( sizeof(PaWinWdmHostApiRepresentation) ); - if( !wdmHostApi ) - { - result = paInsufficientMemory; - goto error; - } - - wdmHostApi->allocations = PaUtil_CreateAllocationGroup(); - if( !wdmHostApi->allocations ) - { - result = paInsufficientMemory; - goto error; - } - - *hostApi = &wdmHostApi->inheritedHostApiRep; - (*hostApi)->info.structVersion = 1; - (*hostApi)->info.type = paWDMKS; - (*hostApi)->info.name = "Windows WDM-KS"; - - /* these are all updated by CommitDeviceInfos() */ - (*hostApi)->info.deviceCount = 0; - (*hostApi)->info.defaultInputDevice = paNoDevice; - (*hostApi)->info.defaultOutputDevice = paNoDevice; - (*hostApi)->deviceInfos = 0; - - result = ScanDeviceInfos(&wdmHostApi->inheritedHostApiRep, hostApiIndex, &scanResults, &deviceCount); - if (result != paNoError) - { - goto error; - } - - CommitDeviceInfos(&wdmHostApi->inheritedHostApiRep, hostApiIndex, scanResults, deviceCount); - - (*hostApi)->Terminate = Terminate; - (*hostApi)->OpenStream = OpenStream; - (*hostApi)->IsFormatSupported = IsFormatSupported; - /* In preparation for hotplug - (*hostApi)->ScanDeviceInfos = ScanDeviceInfos; - (*hostApi)->CommitDeviceInfos = CommitDeviceInfos; - (*hostApi)->DisposeDeviceInfos = DisposeDeviceInfos; - */ - PaUtil_InitializeStreamInterface( &wdmHostApi->callbackStreamInterface, CloseStream, StartStream, - StopStream, AbortStream, IsStreamStopped, IsStreamActive, - GetStreamTime, GetStreamCpuLoad, - PaUtil_DummyRead, PaUtil_DummyWrite, - PaUtil_DummyGetReadAvailable, PaUtil_DummyGetWriteAvailable ); - - PaUtil_InitializeStreamInterface( &wdmHostApi->blockingStreamInterface, CloseStream, StartStream, - StopStream, AbortStream, IsStreamStopped, IsStreamActive, - GetStreamTime, PaUtil_DummyGetCpuLoad, - ReadStream, WriteStream, GetStreamReadAvailable, GetStreamWriteAvailable ); - - PA_LOGL_; - return result; - -error: - Terminate( (PaUtilHostApiRepresentation*)wdmHostApi ); - - PA_LOGL_; - return result; -} - - -static void Terminate( struct PaUtilHostApiRepresentation *hostApi ) -{ - PaWinWdmHostApiRepresentation *wdmHostApi = (PaWinWdmHostApiRepresentation*)hostApi; - PA_LOGE_; - - /* Do not unload the libraries */ - if( DllKsUser != NULL ) - { - FreeLibrary( DllKsUser ); - DllKsUser = NULL; - } - - if( paWinWDMKSAvRtEntryPoints.hInstance != NULL ) - { - FreeLibrary( paWinWDMKSAvRtEntryPoints.hInstance ); - paWinWDMKSAvRtEntryPoints.hInstance = NULL; - } - - if( wdmHostApi) - { - PaWinWDMScanDeviceInfosResults* localScanResults = (PaWinWDMScanDeviceInfosResults*)PaUtil_GroupAllocateMemory( - wdmHostApi->allocations, sizeof(PaWinWDMScanDeviceInfosResults)); - localScanResults->deviceInfos = hostApi->deviceInfos; - DisposeDeviceInfos(hostApi, localScanResults, hostApi->info.deviceCount); - - if( wdmHostApi->allocations ) - { - PaUtil_FreeAllAllocations( wdmHostApi->allocations ); - PaUtil_DestroyAllocationGroup( wdmHostApi->allocations ); - } - PaUtil_FreeMemory( wdmHostApi ); - } - PA_LOGL_; -} - -static PaError IsFormatSupported( struct PaUtilHostApiRepresentation *hostApi, - const PaStreamParameters *inputParameters, - const PaStreamParameters *outputParameters, - double sampleRate ) -{ - int inputChannelCount, outputChannelCount; - PaSampleFormat inputSampleFormat, outputSampleFormat; - PaWinWdmHostApiRepresentation *wdmHostApi = (PaWinWdmHostApiRepresentation*)hostApi; - PaWinWdmFilter* pFilter; - int result = paFormatIsSupported; - WAVEFORMATEXTENSIBLE wfx; - PaWinWaveFormatChannelMask channelMask; - - PA_LOGE_; - - if( inputParameters ) - { - PaWinWdmDeviceInfo* pDeviceInfo = (PaWinWdmDeviceInfo*)wdmHostApi->inheritedHostApiRep.deviceInfos[inputParameters->device]; - PaWinWdmPin* pin; - unsigned fmt; - unsigned long testFormat = 0; - unsigned validBits = 0; - - inputChannelCount = inputParameters->channelCount; - inputSampleFormat = inputParameters->sampleFormat; - - /* all standard sample formats are supported by the buffer adapter, - this implementation doesn't support any custom sample formats */ - if( inputSampleFormat & paCustomFormat ) - { - PaWinWDM_SetLastErrorInfo(paSampleFormatNotSupported, "IsFormatSupported: Custom input format not supported"); - return paSampleFormatNotSupported; - } - - /* unless alternate device specification is supported, reject the use of - paUseHostApiSpecificDeviceSpecification */ - - if( inputParameters->device == paUseHostApiSpecificDeviceSpecification ) - { - PaWinWDM_SetLastErrorInfo(paInvalidDevice, "IsFormatSupported: paUseHostApiSpecificDeviceSpecification not supported"); - return paInvalidDevice; - } - - /* check that input device can support inputChannelCount */ - if( inputChannelCount > hostApi->deviceInfos[ inputParameters->device ]->maxInputChannels ) - { - PaWinWDM_SetLastErrorInfo(paInvalidChannelCount, "IsFormatSupported: Invalid input channel count"); - return paInvalidChannelCount; - } - - /* validate inputStreamInfo */ - if( inputParameters->hostApiSpecificStreamInfo ) - { - PaWinWDM_SetLastErrorInfo(paIncompatibleHostApiSpecificStreamInfo, "Host API stream info not supported"); - return paIncompatibleHostApiSpecificStreamInfo; /* this implementation doesn't use custom stream info */ - } - - pFilter = pDeviceInfo->filter; - pin = pFilter->pins[pDeviceInfo->pin]; - - /* Find out the testing format */ - for (fmt = paFloat32; fmt <= paUInt8; fmt <<= 1) - { - if ((fmt & pin->formats) != 0) - { - /* Found a matching format! */ - testFormat = fmt; - break; - } - } - if (testFormat == 0) - { - PaWinWDM_SetLastErrorInfo(result, "IsFormatSupported(capture) failed: no testformat found!"); - return paUnanticipatedHostError; - } - - /* Due to special considerations, WaveRT devices with paInt24 should be tested with paInt32 and - valid bits = 24 (instead of 24 bit samples) */ - if (pFilter->devInfo.streamingType == Type_kWaveRT && testFormat == paInt24) - { - PA_DEBUG(("IsFormatSupported (capture): WaveRT overriding testFormat paInt24 with paInt32 (24 valid bits)")); - testFormat = paInt32; - validBits = 24; - } - - /* Check that the input format is supported */ - channelMask = PaWin_DefaultChannelMask(inputChannelCount); - PaWin_InitializeWaveFormatExtensible((PaWinWaveFormat*)&wfx, - inputChannelCount, - testFormat, - PaWin_SampleFormatToLinearWaveFormatTag(testFormat), - sampleRate, - channelMask ); - if (validBits != 0) - { - wfx.Samples.wValidBitsPerSample = validBits; - } - - result = PinIsFormatSupported(pin, (const WAVEFORMATEX*)&wfx); - if( result != paNoError ) - { - /* Try a WAVE_FORMAT_PCM instead */ - PaWin_InitializeWaveFormatEx((PaWinWaveFormat*)&wfx, - inputChannelCount, - testFormat, - PaWin_SampleFormatToLinearWaveFormatTag(testFormat), - sampleRate); - - if (validBits != 0) - { - wfx.Samples.wValidBitsPerSample = validBits; - } - - result = PinIsFormatSupported(pin, (const WAVEFORMATEX*)&wfx); - if( result != paNoError ) - { - PaWinWDM_SetLastErrorInfo(result, "IsFormatSupported(capture) failed: sr=%u,ch=%u,bits=%u", wfx.Format.nSamplesPerSec, wfx.Format.nChannels, wfx.Format.wBitsPerSample); - return result; - } - } - } - else - { - inputChannelCount = 0; - } - - if( outputParameters ) - { - PaWinWdmDeviceInfo* pDeviceInfo = (PaWinWdmDeviceInfo*)wdmHostApi->inheritedHostApiRep.deviceInfos[outputParameters->device]; - PaWinWdmPin* pin; - unsigned fmt; - unsigned long testFormat = 0; - unsigned validBits = 0; - - outputChannelCount = outputParameters->channelCount; - outputSampleFormat = outputParameters->sampleFormat; - - /* all standard sample formats are supported by the buffer adapter, - this implementation doesn't support any custom sample formats */ - if( outputSampleFormat & paCustomFormat ) - { - PaWinWDM_SetLastErrorInfo(paSampleFormatNotSupported, "IsFormatSupported: Custom output format not supported"); - return paSampleFormatNotSupported; - } - - /* unless alternate device specification is supported, reject the use of - paUseHostApiSpecificDeviceSpecification */ - - if( outputParameters->device == paUseHostApiSpecificDeviceSpecification ) - { - PaWinWDM_SetLastErrorInfo(paInvalidDevice, "IsFormatSupported: paUseHostApiSpecificDeviceSpecification not supported"); - return paInvalidDevice; - } - - /* check that output device can support outputChannelCount */ - if( outputChannelCount > hostApi->deviceInfos[ outputParameters->device ]->maxOutputChannels ) - { - PaWinWDM_SetLastErrorInfo(paInvalidChannelCount, "Invalid output channel count"); - return paInvalidChannelCount; - } - - /* validate outputStreamInfo */ - if( outputParameters->hostApiSpecificStreamInfo ) - { - PaWinWDM_SetLastErrorInfo(paIncompatibleHostApiSpecificStreamInfo, "Host API stream info not supported"); - return paIncompatibleHostApiSpecificStreamInfo; /* this implementation doesn't use custom stream info */ - } - - pFilter = pDeviceInfo->filter; - pin = pFilter->pins[pDeviceInfo->pin]; - - /* Find out the testing format */ - for (fmt = paFloat32; fmt <= paUInt8; fmt <<= 1) - { - if ((fmt & pin->formats) != 0) - { - /* Found a matching format! */ - testFormat = fmt; - break; - } - } - if (testFormat == 0) - { - PaWinWDM_SetLastErrorInfo(result, "IsFormatSupported(render) failed: no testformat found!"); - return paUnanticipatedHostError; - } - - /* Due to special considerations, WaveRT devices with paInt24 should be tested with paInt32 and - valid bits = 24 (instead of 24 bit samples) */ - if (pFilter->devInfo.streamingType == Type_kWaveRT && testFormat == paInt24) - { - PA_DEBUG(("IsFormatSupported (render): WaveRT overriding testFormat paInt24 with paInt32 (24 valid bits)")); - testFormat = paInt32; - validBits = 24; - } - - /* Check that the output format is supported */ - channelMask = PaWin_DefaultChannelMask(outputChannelCount); - PaWin_InitializeWaveFormatExtensible((PaWinWaveFormat*)&wfx, - outputChannelCount, - testFormat, - PaWin_SampleFormatToLinearWaveFormatTag(testFormat), - sampleRate, - channelMask ); - - if (validBits != 0) - { - wfx.Samples.wValidBitsPerSample = validBits; - } - - result = PinIsFormatSupported(pin, (const WAVEFORMATEX*)&wfx); - if( result != paNoError ) - { - /* Try a WAVE_FORMAT_PCM instead */ - PaWin_InitializeWaveFormatEx((PaWinWaveFormat*)&wfx, - outputChannelCount, - testFormat, - PaWin_SampleFormatToLinearWaveFormatTag(testFormat), - sampleRate); - - if (validBits != 0) - { - wfx.Samples.wValidBitsPerSample = validBits; - } - - result = PinIsFormatSupported(pin, (const WAVEFORMATEX*)&wfx); - if( result != paNoError ) - { - PaWinWDM_SetLastErrorInfo(result, "IsFormatSupported(render) failed: %u,%u,%u", wfx.Format.nSamplesPerSec, wfx.Format.nChannels, wfx.Format.wBitsPerSample); - return result; - } - } - - } - else - { - outputChannelCount = 0; - } - - /* - IMPLEMENT ME: - - - if a full duplex stream is requested, check that the combination - of input and output parameters is supported if necessary - - - check that the device supports sampleRate - - Because the buffer adapter handles conversion between all standard - sample formats, the following checks are only required if paCustomFormat - is implemented, or under some other unusual conditions. - - - check that input device can support inputSampleFormat, or that - we have the capability to convert from inputSampleFormat to - a native format - - - check that output device can support outputSampleFormat, or that - we have the capability to convert from outputSampleFormat to - a native format - */ - if((inputChannelCount == 0)&&(outputChannelCount == 0)) - { - PaWinWDM_SetLastErrorInfo(paSampleFormatNotSupported, "No input or output channels defined"); - result = paSampleFormatNotSupported; /* Not right error */ - } - - PA_LOGL_; - return result; -} - -static void ResetStreamEvents(PaWinWdmStream* stream) -{ - unsigned i; - ResetEvent(stream->eventAbort); - ResetEvent(stream->eventStreamStart[StreamStart_kOk]); - ResetEvent(stream->eventStreamStart[StreamStart_kFailed]); - - for (i=0; icapture.noOfPackets; ++i) - { - if (stream->capture.events && stream->capture.events[i]) - { - ResetEvent(stream->capture.events[i]); - } - } - - for (i=0; irender.noOfPackets; ++i) - { - if (stream->render.events && stream->render.events[i]) - { - ResetEvent(stream->render.events[i]); - } - } -} - -static void CloseStreamEvents(PaWinWdmStream* stream) -{ - unsigned i; - PaWinWdmIOInfo* ios[2] = { &stream->capture, &stream->render }; - - if (stream->eventAbort) - { - CloseHandle(stream->eventAbort); - stream->eventAbort = 0; - } - if (stream->eventStreamStart[StreamStart_kOk]) - { - CloseHandle(stream->eventStreamStart[StreamStart_kOk]); - } - if (stream->eventStreamStart[StreamStart_kFailed]) - { - CloseHandle(stream->eventStreamStart[StreamStart_kFailed]); - } - - for (i = 0; i < 2; ++i) - { - unsigned j; - /* Unregister notification handles for WaveRT */ - if (ios[i]->pPin && ios[i]->pPin->parentFilter->devInfo.streamingType == Type_kWaveRT && - ios[i]->pPin->pinKsSubType == SubType_kNotification && - ios[i]->events != 0) - { - PinUnregisterNotificationHandle(ios[i]->pPin, ios[i]->events[0]); - } - - for (j=0; j < ios[i]->noOfPackets; ++j) - { - if (ios[i]->events && ios[i]->events[j]) - { - CloseHandle(ios[i]->events[j]); - ios[i]->events[j] = 0; - } - } - } -} - -static unsigned NextPowerOf2(unsigned val) -{ - val--; - val = (val >> 1) | val; - val = (val >> 2) | val; - val = (val >> 4) | val; - val = (val >> 8) | val; - val = (val >> 16) | val; - return ++val; -} - -static PaError ValidateSpecificStreamParameters( - const PaStreamParameters *streamParameters, - const PaWinWDMKSInfo *streamInfo, - unsigned isInput) -{ - if( streamInfo ) - { - if( streamInfo->size != sizeof( PaWinWDMKSInfo ) - || streamInfo->version != 1 ) - { - PA_DEBUG(("Stream parameters: size or version not correct")); - return paIncompatibleHostApiSpecificStreamInfo; - } - - if (!!(streamInfo->flags & ~(paWinWDMKSOverrideFramesize | paWinWDMKSUseGivenChannelMask))) - { - PA_DEBUG(("Stream parameters: non supported flags set")); - return paIncompatibleHostApiSpecificStreamInfo; - } - - if (streamInfo->noOfPackets != 0 && - (streamInfo->noOfPackets < 2 || streamInfo->noOfPackets > 8)) - { - PA_DEBUG(("Stream parameters: noOfPackets %u out of range [2,8]", streamInfo->noOfPackets)); - return paIncompatibleHostApiSpecificStreamInfo; - } - - if (streamInfo->flags & paWinWDMKSUseGivenChannelMask) - { - if (isInput) - { - PA_DEBUG(("Stream parameters: Channels mask setting not supported for input stream")); - return paIncompatibleHostApiSpecificStreamInfo; - } - - if (streamInfo->channelMask & PAWIN_SPEAKER_RESERVED) - { - PA_DEBUG(("Stream parameters: Given channels mask 0x%08X not supported", streamInfo->channelMask)); - return paIncompatibleHostApiSpecificStreamInfo; - } - } - - } - - return paNoError; -} - - - - -/* see pa_hostapi.h for a list of validity guarantees made about OpenStream parameters */ - -static PaError OpenStream( struct PaUtilHostApiRepresentation *hostApi, - PaStream** s, - const PaStreamParameters *inputParameters, - const PaStreamParameters *outputParameters, - double sampleRate, - unsigned long framesPerUserBuffer, - PaStreamFlags streamFlags, - PaStreamCallback *streamCallback, - void *userData ) -{ - PaError result = paNoError; - PaWinWdmHostApiRepresentation *wdmHostApi = (PaWinWdmHostApiRepresentation*)hostApi; - PaWinWdmStream *stream = 0; - /* unsigned long framesPerHostBuffer; these may not be equivalent for all implementations */ - PaSampleFormat inputSampleFormat, outputSampleFormat; - PaSampleFormat hostInputSampleFormat, hostOutputSampleFormat; - int userInputChannels,userOutputChannels; - WAVEFORMATEXTENSIBLE wfx; - - PA_LOGE_; - PA_DEBUG(("OpenStream:sampleRate = %f\n",sampleRate)); - PA_DEBUG(("OpenStream:framesPerBuffer = %lu\n",framesPerUserBuffer)); - - if( inputParameters ) - { - userInputChannels = inputParameters->channelCount; - inputSampleFormat = inputParameters->sampleFormat; - - /* unless alternate device specification is supported, reject the use of - paUseHostApiSpecificDeviceSpecification */ - - if( inputParameters->device == paUseHostApiSpecificDeviceSpecification ) - { - PaWinWDM_SetLastErrorInfo(paInvalidDevice, "paUseHostApiSpecificDeviceSpecification(in) not supported"); - return paInvalidDevice; - } - - /* check that input device can support stream->userInputChannels */ - if( userInputChannels > hostApi->deviceInfos[ inputParameters->device ]->maxInputChannels ) - { - PaWinWDM_SetLastErrorInfo(paInvalidChannelCount, "Invalid input channel count"); - return paInvalidChannelCount; - } - - /* validate inputStreamInfo */ - result = ValidateSpecificStreamParameters(inputParameters, inputParameters->hostApiSpecificStreamInfo, 1 ); - if(result != paNoError) - { - PaWinWDM_SetLastErrorInfo(result, "Host API stream info not supported (in)"); - return result; /* this implementation doesn't use custom stream info */ - } - } - else - { - userInputChannels = 0; - inputSampleFormat = paInt16; /* Suppress 'uninitialised var' warnings. */ - } - - if( outputParameters ) - { - userOutputChannels = outputParameters->channelCount; - outputSampleFormat = outputParameters->sampleFormat; - - /* unless alternate device specification is supported, reject the use of - paUseHostApiSpecificDeviceSpecification */ - - if( outputParameters->device == paUseHostApiSpecificDeviceSpecification ) - { - PaWinWDM_SetLastErrorInfo(paInvalidDevice, "paUseHostApiSpecificDeviceSpecification(out) not supported"); - return paInvalidDevice; - } - - /* check that output device can support stream->userInputChannels */ - if( userOutputChannels > hostApi->deviceInfos[ outputParameters->device ]->maxOutputChannels ) - { - PaWinWDM_SetLastErrorInfo(paInvalidChannelCount, "Invalid output channel count"); - return paInvalidChannelCount; - } - - /* validate outputStreamInfo */ - result = ValidateSpecificStreamParameters( outputParameters, outputParameters->hostApiSpecificStreamInfo, 0 ); - if (result != paNoError) - { - PaWinWDM_SetLastErrorInfo(result, "Host API stream info not supported (out)"); - return result; /* this implementation doesn't use custom stream info */ - } - } - else - { - userOutputChannels = 0; - outputSampleFormat = paInt16; /* Suppress 'uninitialized var' warnings. */ - } - - /* validate platform specific flags */ - if( (streamFlags & paPlatformSpecificFlags) != 0 ) - { - PaWinWDM_SetLastErrorInfo(paInvalidFlag, "Invalid flag supplied"); - return paInvalidFlag; /* unexpected platform specific flag */ - } - - stream = (PaWinWdmStream*)PaUtil_AllocateMemory( sizeof(PaWinWdmStream) ); - if( !stream ) - { - result = paInsufficientMemory; - goto error; - } - - /* Create allocation group */ - stream->allocGroup = PaUtil_CreateAllocationGroup(); - if( !stream->allocGroup ) - { - result = paInsufficientMemory; - goto error; - } - - /* Zero the stream object */ - /* memset((void*)stream,0,sizeof(PaWinWdmStream)); */ - - if( streamCallback ) - { - PaUtil_InitializeStreamRepresentation( &stream->streamRepresentation, - &wdmHostApi->callbackStreamInterface, streamCallback, userData ); - } - else - { - /* PaUtil_InitializeStreamRepresentation( &stream->streamRepresentation, - &wdmHostApi->blockingStreamInterface, streamCallback, userData ); */ - - /* We don't support the blocking API yet */ - PA_DEBUG(("Blocking API not supported yet!\n")); - PaWinWDM_SetLastErrorInfo(paUnanticipatedHostError, "Blocking API not supported yet"); - result = paUnanticipatedHostError; - goto error; - } - - PaUtil_InitializeCpuLoadMeasurer( &stream->cpuLoadMeasurer, sampleRate ); - - /* Instantiate the input pin if necessary */ - if(userInputChannels > 0) - { - PaWinWdmFilter* pFilter; - PaWinWdmDeviceInfo* pDeviceInfo; - PaWinWdmPin* pPin; - unsigned validBitsPerSample = 0; - PaWinWaveFormatChannelMask channelMask = PaWin_DefaultChannelMask( userInputChannels ); - - result = paSampleFormatNotSupported; - pDeviceInfo = (PaWinWdmDeviceInfo*)wdmHostApi->inheritedHostApiRep.deviceInfos[inputParameters->device]; - pFilter = pDeviceInfo->filter; - pPin = pFilter->pins[pDeviceInfo->pin]; - - stream->userInputChannels = userInputChannels; - - hostInputSampleFormat = PaUtil_SelectClosestAvailableFormat( pPin->formats, inputSampleFormat ); - if (hostInputSampleFormat == paSampleFormatNotSupported) - { - result = paUnanticipatedHostError; - PaWinWDM_SetLastErrorInfo(result, "PU_SCAF(%X,%X) failed (input)", pPin->formats, inputSampleFormat); - goto error; - } - else if (pFilter->devInfo.streamingType == Type_kWaveRT && hostInputSampleFormat == paInt24) - { - /* For WaveRT, we choose 32 bit format instead of paInt24, since we MIGHT need to align buffer on a - 128 byte boundary (see PinGetBuffer) */ - hostInputSampleFormat = paInt32; - /* But we'll tell the driver that it's 24 bit in 32 bit container */ - validBitsPerSample = 24; - } - - while (hostInputSampleFormat <= paUInt8) - { - unsigned channelsToProbe = stream->userInputChannels; - /* Some or all KS devices can only handle the exact number of channels - * they specify. But PortAudio clients expect to be able to - * at least specify mono I/O on a multi-channel device - * If this is the case, then we will do the channel mapping internally - * The following loop tests this case - **/ - while (1) - { - PaWin_InitializeWaveFormatExtensible((PaWinWaveFormat*)&wfx, - channelsToProbe, - hostInputSampleFormat, - PaWin_SampleFormatToLinearWaveFormatTag(hostInputSampleFormat), - sampleRate, - channelMask ); - stream->capture.bytesPerFrame = wfx.Format.nBlockAlign; - if (validBitsPerSample != 0) - { - wfx.Samples.wValidBitsPerSample = validBitsPerSample; - } - stream->capture.pPin = FilterCreatePin(pFilter, pPin->pinId, (WAVEFORMATEX*)&wfx, &result); - stream->deviceInputChannels = channelsToProbe; - - if( result != paNoError && result != paDeviceUnavailable ) - { - /* Try a WAVE_FORMAT_PCM instead */ - PaWin_InitializeWaveFormatEx((PaWinWaveFormat*)&wfx, - channelsToProbe, - hostInputSampleFormat, - PaWin_SampleFormatToLinearWaveFormatTag(hostInputSampleFormat), - sampleRate); - if (validBitsPerSample != 0) - { - wfx.Samples.wValidBitsPerSample = validBitsPerSample; - } - stream->capture.pPin = FilterCreatePin(pFilter, pPin->pinId, (const WAVEFORMATEX*)&wfx, &result); - } - - if (result == paDeviceUnavailable) goto occupied; - - if (result == paNoError) - { - /* We're done */ - break; - } - - if (channelsToProbe < (unsigned)pPin->maxChannels) - { - /* Go to next multiple of 2 */ - channelsToProbe = min((((channelsToProbe>>1)+1)<<1), (unsigned)pPin->maxChannels); - continue; - } - - break; - } - - if (result == paNoError) - { - /* We're done */ - break; - } - - /* Go to next format in line with lower resolution */ - hostInputSampleFormat <<= 1; - } - - if(stream->capture.pPin == NULL) - { - PaWinWDM_SetLastErrorInfo(result, "Failed to create capture pin: sr=%u,ch=%u,bits=%u,align=%u", - wfx.Format.nSamplesPerSec, wfx.Format.nChannels, wfx.Format.wBitsPerSample, wfx.Format.nBlockAlign); - goto error; - } - - /* Select correct mux input on MUX node of topology filter */ - if (pDeviceInfo->muxPosition >= 0) - { - assert(pPin->parentFilter->topologyFilter != NULL); - - result = FilterUse(pPin->parentFilter->topologyFilter); - if (result != paNoError) - { - PaWinWDM_SetLastErrorInfo(result, "Failed to open topology filter"); - goto error; - } - - result = WdmSetMuxNodeProperty(pPin->parentFilter->topologyFilter->handle, - pPin->inputs[pDeviceInfo->muxPosition]->muxNodeId, - pPin->inputs[pDeviceInfo->muxPosition]->muxPinId); - - FilterRelease(pPin->parentFilter->topologyFilter); - - if(result != paNoError) - { - PaWinWDM_SetLastErrorInfo(result, "Failed to set topology mux node"); - goto error; - } - } - - stream->capture.bytesPerSample = stream->capture.bytesPerFrame / stream->deviceInputChannels; - stream->capture.pPin->frameSize /= stream->capture.bytesPerFrame; - PA_DEBUG(("Capture pin frames: %d\n",stream->capture.pPin->frameSize)); - } - else - { - hostInputSampleFormat = (PaSampleFormat)0; /* Avoid uninitialized variable warning */ - - stream->capture.pPin = NULL; - stream->capture.bytesPerFrame = 0; - } - - /* Instantiate the output pin if necessary */ - if(userOutputChannels > 0) - { - PaWinWdmFilter* pFilter; - PaWinWdmDeviceInfo* pDeviceInfo; - PaWinWdmPin* pPin; - PaWinWDMKSInfo* pInfo = (PaWinWDMKSInfo*)(outputParameters->hostApiSpecificStreamInfo); - unsigned validBitsPerSample = 0; - PaWinWaveFormatChannelMask channelMask = PaWin_DefaultChannelMask( userOutputChannels ); - if (pInfo && (pInfo->flags & paWinWDMKSUseGivenChannelMask)) - { - PA_DEBUG(("Using channelMask 0x%08X instead of default 0x%08X\n", - pInfo->channelMask, - channelMask)); - channelMask = pInfo->channelMask; - } - - result = paSampleFormatNotSupported; - pDeviceInfo = (PaWinWdmDeviceInfo*)wdmHostApi->inheritedHostApiRep.deviceInfos[outputParameters->device]; - pFilter = pDeviceInfo->filter; - pPin = pFilter->pins[pDeviceInfo->pin]; - - stream->userOutputChannels = userOutputChannels; - - hostOutputSampleFormat = PaUtil_SelectClosestAvailableFormat( pPin->formats, outputSampleFormat ); - if (hostOutputSampleFormat == paSampleFormatNotSupported) - { - result = paUnanticipatedHostError; - PaWinWDM_SetLastErrorInfo(result, "PU_SCAF(%X,%X) failed (output)", pPin->formats, hostOutputSampleFormat); - goto error; - } - else if (pFilter->devInfo.streamingType == Type_kWaveRT && hostOutputSampleFormat == paInt24) - { - /* For WaveRT, we choose 32 bit format instead of paInt24, since we MIGHT need to align buffer on a - 128 byte boundary (see PinGetBuffer) */ - hostOutputSampleFormat = paInt32; - /* But we'll tell the driver that it's 24 bit in 32 bit container */ - validBitsPerSample = 24; - } - - while (hostOutputSampleFormat <= paUInt8) - { - unsigned channelsToProbe = stream->userOutputChannels; - /* Some or all KS devices can only handle the exact number of channels - * they specify. But PortAudio clients expect to be able to - * at least specify mono I/O on a multi-channel device - * If this is the case, then we will do the channel mapping internally - * The following loop tests this case - **/ - while (1) - { - PaWin_InitializeWaveFormatExtensible((PaWinWaveFormat*)&wfx, - channelsToProbe, - hostOutputSampleFormat, - PaWin_SampleFormatToLinearWaveFormatTag(hostOutputSampleFormat), - sampleRate, - channelMask ); - stream->render.bytesPerFrame = wfx.Format.nBlockAlign; - if (validBitsPerSample != 0) - { - wfx.Samples.wValidBitsPerSample = validBitsPerSample; - } - stream->render.pPin = FilterCreatePin(pFilter, pPin->pinId, (WAVEFORMATEX*)&wfx, &result); - stream->deviceOutputChannels = channelsToProbe; - - if( result != paNoError && result != paDeviceUnavailable ) - { - PaWin_InitializeWaveFormatEx((PaWinWaveFormat*)&wfx, - channelsToProbe, - hostOutputSampleFormat, - PaWin_SampleFormatToLinearWaveFormatTag(hostOutputSampleFormat), - sampleRate); - if (validBitsPerSample != 0) - { - wfx.Samples.wValidBitsPerSample = validBitsPerSample; - } - stream->render.pPin = FilterCreatePin(pFilter, pPin->pinId, (const WAVEFORMATEX*)&wfx, &result); - } - - if (result == paDeviceUnavailable) goto occupied; - - if (result == paNoError) - { - /* We're done */ - break; - } - - if (channelsToProbe < (unsigned)pPin->maxChannels) - { - /* Go to next multiple of 2 */ - channelsToProbe = min((((channelsToProbe>>1)+1)<<1), (unsigned)pPin->maxChannels); - continue; - } - - break; - }; - - if (result == paNoError) - { - /* We're done */ - break; - } - - /* Go to next format in line with lower resolution */ - hostOutputSampleFormat <<= 1; - } - - if(stream->render.pPin == NULL) - { - PaWinWDM_SetLastErrorInfo(result, "Failed to create render pin: sr=%u,ch=%u,bits=%u,align=%u", - wfx.Format.nSamplesPerSec, wfx.Format.nChannels, wfx.Format.wBitsPerSample, wfx.Format.nBlockAlign); - goto error; - } - - stream->render.bytesPerSample = stream->render.bytesPerFrame / stream->deviceOutputChannels; - stream->render.pPin->frameSize /= stream->render.bytesPerFrame; - PA_DEBUG(("Render pin frames: %d\n",stream->render.pPin->frameSize)); - } - else - { - hostOutputSampleFormat = (PaSampleFormat)0; /* Avoid uninitialized variable warning */ - - stream->render.pPin = NULL; - stream->render.bytesPerFrame = 0; - } - - /* Calculate the framesPerHostXxxxBuffer size based upon the suggested latency values */ - /* Record the buffer length */ - if(inputParameters) - { - /* Calculate the frames from the user's value - add a bit to round up */ - stream->capture.framesPerBuffer = (unsigned long)((inputParameters->suggestedLatency*sampleRate)+0.0001); - if(stream->capture.framesPerBuffer > (unsigned long)sampleRate) - { /* Upper limit is 1 second */ - stream->capture.framesPerBuffer = (unsigned long)sampleRate; - } - else if(stream->capture.framesPerBuffer < stream->capture.pPin->frameSize) - { - stream->capture.framesPerBuffer = stream->capture.pPin->frameSize; - } - PA_DEBUG(("Input frames chosen:%ld\n",stream->capture.framesPerBuffer)); - - /* Setup number of packets to use */ - stream->capture.noOfPackets = 2; - - if (inputParameters->hostApiSpecificStreamInfo) - { - PaWinWDMKSInfo* pInfo = (PaWinWDMKSInfo*)inputParameters->hostApiSpecificStreamInfo; - - if (stream->capture.pPin->parentFilter->devInfo.streamingType == Type_kWaveCyclic && - pInfo->noOfPackets != 0) - { - stream->capture.noOfPackets = pInfo->noOfPackets; - } - } - } - - if(outputParameters) - { - /* Calculate the frames from the user's value - add a bit to round up */ - stream->render.framesPerBuffer = (unsigned long)((outputParameters->suggestedLatency*sampleRate)+0.0001); - if(stream->render.framesPerBuffer > (unsigned long)sampleRate) - { /* Upper limit is 1 second */ - stream->render.framesPerBuffer = (unsigned long)sampleRate; - } - else if(stream->render.framesPerBuffer < stream->render.pPin->frameSize) - { - stream->render.framesPerBuffer = stream->render.pPin->frameSize; - } - PA_DEBUG(("Output frames chosen:%ld\n",stream->render.framesPerBuffer)); - - /* Setup number of packets to use */ - stream->render.noOfPackets = 2; - - if (outputParameters->hostApiSpecificStreamInfo) - { - PaWinWDMKSInfo* pInfo = (PaWinWDMKSInfo*)outputParameters->hostApiSpecificStreamInfo; - - if (stream->render.pPin->parentFilter->devInfo.streamingType == Type_kWaveCyclic && - pInfo->noOfPackets != 0) - { - stream->render.noOfPackets = pInfo->noOfPackets; - } - } - } - - /* Host buffer size is bound to the largest of the input and output frame sizes */ - result = PaUtil_InitializeBufferProcessor( &stream->bufferProcessor, - stream->userInputChannels, inputSampleFormat, hostInputSampleFormat, - stream->userOutputChannels, outputSampleFormat, hostOutputSampleFormat, - sampleRate, streamFlags, framesPerUserBuffer, - max(stream->capture.framesPerBuffer, stream->render.framesPerBuffer), - paUtilBoundedHostBufferSize, - streamCallback, userData ); - if( result != paNoError ) - { - PaWinWDM_SetLastErrorInfo(result, "PaUtil_InitializeBufferProcessor failed: ich=%u, isf=%u, hisf=%u, och=%u, osf=%u, hosf=%u, sr=%lf, flags=0x%X, fpub=%u, fphb=%u", - stream->userInputChannels, inputSampleFormat, hostInputSampleFormat, - stream->userOutputChannels, outputSampleFormat, hostOutputSampleFormat, - sampleRate, streamFlags, framesPerUserBuffer, - max(stream->capture.framesPerBuffer, stream->render.framesPerBuffer)); - goto error; - } - - /* Allocate/get all the buffers for host I/O */ - if (stream->userInputChannels > 0) - { - stream->streamRepresentation.streamInfo.inputLatency = stream->capture.framesPerBuffer / sampleRate; - - switch (stream->capture.pPin->parentFilter->devInfo.streamingType) - { - case Type_kWaveCyclic: - { - unsigned size = stream->capture.noOfPackets * stream->capture.framesPerBuffer * stream->capture.bytesPerFrame; - /* Allocate input host buffer */ - stream->capture.hostBuffer = (char*)PaUtil_GroupAllocateMemory(stream->allocGroup, size); - PA_DEBUG(("Input buffer allocated (size = %u)\n", size)); - if( !stream->capture.hostBuffer ) - { - PA_DEBUG(("Cannot allocate host input buffer!\n")); - PaWinWDM_SetLastErrorInfo(paInsufficientMemory, "Failed to allocate input buffer"); - result = paInsufficientMemory; - goto error; - } - stream->capture.hostBufferSize = size; - PA_DEBUG(("Input buffer start = %p (size=%u)\n",stream->capture.hostBuffer, stream->capture.hostBufferSize)); - stream->capture.pPin->fnEventHandler = PaPinCaptureEventHandler_WaveCyclic; - stream->capture.pPin->fnSubmitHandler = PaPinCaptureSubmitHandler_WaveCyclic; - } - break; - case Type_kWaveRT: - { - const DWORD dwTotalSize = 2 * stream->capture.framesPerBuffer * stream->capture.bytesPerFrame; - DWORD dwRequestedSize = dwTotalSize; - BOOL bCallMemoryBarrier = FALSE; - ULONG hwFifoLatency = 0; - ULONG dummy; - result = PinGetBuffer(stream->capture.pPin, (void**)&stream->capture.hostBuffer, &dwRequestedSize, &bCallMemoryBarrier); - if (!result) - { - PA_DEBUG(("Input buffer start = %p, size = %u\n", stream->capture.hostBuffer, dwRequestedSize)); - if (dwRequestedSize != dwTotalSize) - { - PA_DEBUG(("Buffer length changed by driver from %u to %u !\n", dwTotalSize, dwRequestedSize)); - /* Recalculate to what the driver has given us */ - stream->capture.framesPerBuffer = dwRequestedSize / (2 * stream->capture.bytesPerFrame); - } - stream->capture.hostBufferSize = dwRequestedSize; - - if (stream->capture.pPin->pinKsSubType == SubType_kPolled) - { - stream->capture.pPin->fnEventHandler = PaPinCaptureEventHandler_WaveRTPolled; - stream->capture.pPin->fnSubmitHandler = PaPinCaptureSubmitHandler_WaveRTPolled; - } - else - { - stream->capture.pPin->fnEventHandler = PaPinCaptureEventHandler_WaveRTEvent; - stream->capture.pPin->fnSubmitHandler = PaPinCaptureSubmitHandler_WaveRTEvent; - } - - stream->capture.pPin->fnMemBarrier = bCallMemoryBarrier ? MemoryBarrierRead : MemoryBarrierDummy; - } - else - { - PA_DEBUG(("Failed to get input buffer (WaveRT)\n")); - PaWinWDM_SetLastErrorInfo(paUnanticipatedHostError, "Failed to get input buffer (WaveRT)"); - result = paUnanticipatedHostError; - goto error; - } - - /* Get latency */ - result = PinGetHwLatency(stream->capture.pPin, &hwFifoLatency, &dummy, &dummy); - if (result == paNoError) - { - stream->capture.pPin->hwLatency = hwFifoLatency; - - /* Add HW latency into total input latency */ - stream->streamRepresentation.streamInfo.inputLatency += ((hwFifoLatency / stream->capture.bytesPerFrame) / sampleRate); - } - else - { - PA_DEBUG(("Failed to get size of FIFO hardware buffer (is set to zero)\n")); - stream->capture.pPin->hwLatency = 0; - } - } - break; - default: - /* Undefined wave type!! */ - assert(0); - result = paInternalError; - PaWinWDM_SetLastErrorInfo(result, "Wave type %u ??", stream->capture.pPin->parentFilter->devInfo.streamingType); - goto error; - } - } - else - { - stream->capture.hostBuffer = 0; - } - - if (stream->userOutputChannels > 0) - { - stream->streamRepresentation.streamInfo.outputLatency = stream->render.framesPerBuffer / sampleRate; - - switch (stream->render.pPin->parentFilter->devInfo.streamingType) - { - case Type_kWaveCyclic: - { - unsigned size = stream->render.noOfPackets * stream->render.framesPerBuffer * stream->render.bytesPerFrame; - /* Allocate output device buffer */ - stream->render.hostBuffer = (char*)PaUtil_GroupAllocateMemory(stream->allocGroup, size); - PA_DEBUG(("Output buffer allocated (size = %u)\n", size)); - if( !stream->render.hostBuffer ) - { - PA_DEBUG(("Cannot allocate host output buffer!\n")); - PaWinWDM_SetLastErrorInfo(paInsufficientMemory, "Failed to allocate output buffer"); - result = paInsufficientMemory; - goto error; - } - stream->render.hostBufferSize = size; - PA_DEBUG(("Output buffer start = %p (size=%u)\n",stream->render.hostBuffer, stream->render.hostBufferSize)); - - stream->render.pPin->fnEventHandler = PaPinRenderEventHandler_WaveCyclic; - stream->render.pPin->fnSubmitHandler = PaPinRenderSubmitHandler_WaveCyclic; - } - break; - case Type_kWaveRT: - { - const DWORD dwTotalSize = 2 * stream->render.framesPerBuffer * stream->render.bytesPerFrame; - DWORD dwRequestedSize = dwTotalSize; - BOOL bCallMemoryBarrier = FALSE; - ULONG hwFifoLatency = 0; - ULONG dummy; - result = PinGetBuffer(stream->render.pPin, (void**)&stream->render.hostBuffer, &dwRequestedSize, &bCallMemoryBarrier); - if (!result) - { - PA_DEBUG(("Output buffer start = %p, size = %u, membarrier = %u\n", stream->render.hostBuffer, dwRequestedSize, bCallMemoryBarrier)); - if (dwRequestedSize != dwTotalSize) - { - PA_DEBUG(("Buffer length changed by driver from %u to %u !\n", dwTotalSize, dwRequestedSize)); - /* Recalculate to what the driver has given us */ - stream->render.framesPerBuffer = dwRequestedSize / (2 * stream->render.bytesPerFrame); - } - stream->render.hostBufferSize = dwRequestedSize; - - if (stream->render.pPin->pinKsSubType == SubType_kPolled) - { - stream->render.pPin->fnEventHandler = PaPinRenderEventHandler_WaveRTPolled; - stream->render.pPin->fnSubmitHandler = PaPinRenderSubmitHandler_WaveRTPolled; - } - else - { - stream->render.pPin->fnEventHandler = PaPinRenderEventHandler_WaveRTEvent; - stream->render.pPin->fnSubmitHandler = PaPinRenderSubmitHandler_WaveRTEvent; - } - - stream->render.pPin->fnMemBarrier = bCallMemoryBarrier ? MemoryBarrierWrite : MemoryBarrierDummy; - } - else - { - PA_DEBUG(("Failed to get output buffer (with notification)\n")); - PaWinWDM_SetLastErrorInfo(paUnanticipatedHostError, "Failed to get output buffer (with notification)"); - result = paUnanticipatedHostError; - goto error; - } - - /* Get latency */ - result = PinGetHwLatency(stream->render.pPin, &hwFifoLatency, &dummy, &dummy); - if (result == paNoError) - { - stream->render.pPin->hwLatency = hwFifoLatency; - - /* Add HW latency into total output latency */ - stream->streamRepresentation.streamInfo.outputLatency += ((hwFifoLatency / stream->render.bytesPerFrame) / sampleRate); - } - else - { - PA_DEBUG(("Failed to get size of FIFO hardware buffer (is set to zero)\n")); - stream->render.pPin->hwLatency = 0; - } - } - break; - default: - /* Undefined wave type!! */ - assert(0); - result = paInternalError; - PaWinWDM_SetLastErrorInfo(result, "Wave type %u ??", stream->capture.pPin->parentFilter->devInfo.streamingType); - goto error; - } - } - else - { - stream->render.hostBuffer = 0; - } - - stream->streamRepresentation.streamInfo.sampleRate = sampleRate; - - PA_DEBUG(("BytesPerInputFrame = %d\n",stream->capture.bytesPerFrame)); - PA_DEBUG(("BytesPerOutputFrame = %d\n",stream->render.bytesPerFrame)); - - /* memset(stream->hostBuffer,0,size); */ - - /* Abort */ - stream->eventAbort = CreateEvent(NULL, TRUE, FALSE, NULL); - if (stream->eventAbort == 0) - { - result = paInsufficientMemory; - goto error; - } - stream->eventStreamStart[0] = CreateEvent(NULL, TRUE, FALSE, NULL); - if (stream->eventStreamStart[0] == 0) - { - result = paInsufficientMemory; - goto error; - } - stream->eventStreamStart[1] = CreateEvent(NULL, TRUE, FALSE, NULL); - if (stream->eventStreamStart[1] == 0) - { - result = paInsufficientMemory; - goto error; - } - - if(stream->userInputChannels > 0) - { - const unsigned bufferSizeInBytes = stream->capture.framesPerBuffer * stream->capture.bytesPerFrame; - const unsigned ringBufferFrameSize = NextPowerOf2( 1024 + 2 * max(stream->capture.framesPerBuffer, stream->render.framesPerBuffer) ); - - stream->capture.events = (HANDLE*)PaUtil_GroupAllocateMemory(stream->allocGroup, stream->capture.noOfPackets * sizeof(HANDLE)); - if (stream->capture.events == NULL) - { - result = paInsufficientMemory; - goto error; - } - - stream->capture.packets = (DATAPACKET*)PaUtil_GroupAllocateMemory(stream->allocGroup, stream->capture.noOfPackets * sizeof(DATAPACKET)); - if (stream->capture.packets == NULL) - { - result = paInsufficientMemory; - goto error; - } - - switch(stream->capture.pPin->parentFilter->devInfo.streamingType) - { - case Type_kWaveCyclic: - { - /* WaveCyclic case */ - unsigned i; - for (i = 0; i < stream->capture.noOfPackets; ++i) - { - /* Set up the packets */ - DATAPACKET *p = stream->capture.packets + i; - - /* Record event */ - stream->capture.events[i] = CreateEvent(NULL, TRUE, FALSE, NULL); - - p->Signal.hEvent = stream->capture.events[i]; - p->Header.Data = stream->capture.hostBuffer + (i*bufferSizeInBytes); - p->Header.FrameExtent = bufferSizeInBytes; - p->Header.DataUsed = 0; - p->Header.Size = sizeof(p->Header); - p->Header.PresentationTime.Numerator = 1; - p->Header.PresentationTime.Denominator = 1; - } - } - break; - case Type_kWaveRT: - { - /* Set up the "packets" */ - DATAPACKET *p = stream->capture.packets + 0; - - /* Record event: WaveRT has a single event for 2 notification per buffer */ - stream->capture.events[0] = CreateEvent(NULL, FALSE, FALSE, NULL); - - p->Header.Data = stream->capture.hostBuffer; - p->Header.FrameExtent = bufferSizeInBytes; - p->Header.DataUsed = 0; - p->Header.Size = sizeof(p->Header); - p->Header.PresentationTime.Numerator = 1; - p->Header.PresentationTime.Denominator = 1; - - ++p; - p->Header.Data = stream->capture.hostBuffer + bufferSizeInBytes; - p->Header.FrameExtent = bufferSizeInBytes; - p->Header.DataUsed = 0; - p->Header.Size = sizeof(p->Header); - p->Header.PresentationTime.Numerator = 1; - p->Header.PresentationTime.Denominator = 1; - - if (stream->capture.pPin->pinKsSubType == SubType_kNotification) - { - result = PinRegisterNotificationHandle(stream->capture.pPin, stream->capture.events[0]); - - if (result != paNoError) - { - PA_DEBUG(("Failed to register capture notification handle\n")); - PaWinWDM_SetLastErrorInfo(paUnanticipatedHostError, "Failed to register capture notification handle"); - result = paUnanticipatedHostError; - goto error; - } - } - - result = PinRegisterPositionRegister(stream->capture.pPin); - - if (result != paNoError) - { - unsigned long pos = 0xdeadc0de; - PA_DEBUG(("Failed to register capture position register, using PinGetAudioPositionViaIOCTLWrite\n")); - stream->capture.pPin->fnAudioPosition = PinGetAudioPositionViaIOCTLWrite; - /* Test position function */ - result = (stream->capture.pPin->fnAudioPosition)(stream->capture.pPin, &pos); - if (result != paNoError || pos != 0x0) - { - PA_DEBUG(("Failed to read capture position register (IOCTL)\n")); - PaWinWDM_SetLastErrorInfo(paUnanticipatedHostError, "Failed to read capture position register (IOCTL)"); - result = paUnanticipatedHostError; - goto error; - } - } - else - { - stream->capture.pPin->fnAudioPosition = PinGetAudioPositionMemoryMapped; - } - } - break; - default: - /* Undefined wave type!! */ - assert(0); - result = paInternalError; - PaWinWDM_SetLastErrorInfo(result, "Wave type %u ??", stream->capture.pPin->parentFilter->devInfo.streamingType); - goto error; - } - - /* Setup the input ring buffer here */ - stream->ringBufferData = (char*)PaUtil_GroupAllocateMemory(stream->allocGroup, ringBufferFrameSize * stream->capture.bytesPerFrame); - if (stream->ringBufferData == NULL) - { - result = paInsufficientMemory; - goto error; - } - PaUtil_InitializeRingBuffer(&stream->ringBuffer, stream->capture.bytesPerFrame, ringBufferFrameSize, stream->ringBufferData); - } - if(stream->userOutputChannels > 0) - { - const unsigned bufferSizeInBytes = stream->render.framesPerBuffer * stream->render.bytesPerFrame; - - stream->render.events = (HANDLE*)PaUtil_GroupAllocateMemory(stream->allocGroup, stream->render.noOfPackets * sizeof(HANDLE)); - if (stream->render.events == NULL) - { - result = paInsufficientMemory; - goto error; - } - - stream->render.packets = (DATAPACKET*)PaUtil_GroupAllocateMemory(stream->allocGroup, stream->render.noOfPackets * sizeof(DATAPACKET)); - if (stream->render.packets == NULL) - { - result = paInsufficientMemory; - goto error; - } - - switch(stream->render.pPin->parentFilter->devInfo.streamingType) - { - case Type_kWaveCyclic: - { - /* WaveCyclic case */ - unsigned i; - for (i = 0; i < stream->render.noOfPackets; ++i) - { - /* Set up the packets */ - DATAPACKET *p = stream->render.packets + i; - - /* Playback event */ - stream->render.events[i] = CreateEvent(NULL, TRUE, FALSE, NULL); - - /* In this case, we just use the packets as ptr to the device buffer */ - p->Signal.hEvent = stream->render.events[i]; - p->Header.Data = stream->render.hostBuffer + (i*bufferSizeInBytes); - p->Header.FrameExtent = bufferSizeInBytes; - p->Header.DataUsed = bufferSizeInBytes; - p->Header.Size = sizeof(p->Header); - p->Header.PresentationTime.Numerator = 1; - p->Header.PresentationTime.Denominator = 1; - } - } - break; - case Type_kWaveRT: - { - /* WaveRT case */ - - /* Set up the "packets" */ - DATAPACKET *p = stream->render.packets; - - /* The only playback event */ - stream->render.events[0] = CreateEvent(NULL, FALSE, FALSE, NULL); - - /* In this case, we just use the packets as ptr to the device buffer */ - p->Header.Data = stream->render.hostBuffer; - p->Header.FrameExtent = stream->render.framesPerBuffer*stream->render.bytesPerFrame; - p->Header.DataUsed = stream->render.framesPerBuffer*stream->render.bytesPerFrame; - p->Header.Size = sizeof(p->Header); - p->Header.PresentationTime.Numerator = 1; - p->Header.PresentationTime.Denominator = 1; - - ++p; - p->Header.Data = stream->render.hostBuffer + stream->render.framesPerBuffer*stream->render.bytesPerFrame; - p->Header.FrameExtent = stream->render.framesPerBuffer*stream->render.bytesPerFrame; - p->Header.DataUsed = stream->render.framesPerBuffer*stream->render.bytesPerFrame; - p->Header.Size = sizeof(p->Header); - p->Header.PresentationTime.Numerator = 1; - p->Header.PresentationTime.Denominator = 1; - - if (stream->render.pPin->pinKsSubType == SubType_kNotification) - { - result = PinRegisterNotificationHandle(stream->render.pPin, stream->render.events[0]); - - if (result != paNoError) - { - PA_DEBUG(("Failed to register rendering notification handle\n")); - PaWinWDM_SetLastErrorInfo(paUnanticipatedHostError, "Failed to register rendering notification handle"); - result = paUnanticipatedHostError; - goto error; - } - } - - result = PinRegisterPositionRegister(stream->render.pPin); - - if (result != paNoError) - { - unsigned long pos = 0xdeadc0de; - PA_DEBUG(("Failed to register rendering position register, using PinGetAudioPositionViaIOCTLRead\n")); - stream->render.pPin->fnAudioPosition = PinGetAudioPositionViaIOCTLRead; - /* Test position function */ - result = (stream->render.pPin->fnAudioPosition)(stream->render.pPin, &pos); - if (result != paNoError || pos != 0x0) - { - PA_DEBUG(("Failed to read render position register (IOCTL)\n")); - PaWinWDM_SetLastErrorInfo(paUnanticipatedHostError, "Failed to read render position register (IOCTL)"); - result = paUnanticipatedHostError; - goto error; - } - } - else - { - stream->render.pPin->fnAudioPosition = PinGetAudioPositionMemoryMapped; - } - } - break; - default: - /* Undefined wave type!! */ - assert(0); - result = paInternalError; - PaWinWDM_SetLastErrorInfo(result, "Wave type %u ??", stream->capture.pPin->parentFilter->devInfo.streamingType); - goto error; - } - } - - stream->streamStarted = 0; - stream->streamActive = 0; - stream->streamStop = 0; - stream->streamAbort = 0; - stream->streamFlags = streamFlags; - stream->oldProcessPriority = REALTIME_PRIORITY_CLASS; - - /* Increase ref count on filters in use, so that a CommitDeviceInfos won't delete them */ - if (stream->capture.pPin != 0) - { - FilterAddRef(stream->capture.pPin->parentFilter); - } - if (stream->render.pPin != 0) - { - FilterAddRef(stream->render.pPin->parentFilter); - } - - /* Ok, now update our host API specific stream info */ - if (stream->userInputChannels) - { - PaWinWdmDeviceInfo *pDeviceInfo = (PaWinWdmDeviceInfo*)wdmHostApi->inheritedHostApiRep.deviceInfos[inputParameters->device]; - - stream->hostApiStreamInfo.input.device = Pa_HostApiDeviceIndexToDeviceIndex(Pa_HostApiTypeIdToHostApiIndex(paWDMKS), inputParameters->device); - stream->hostApiStreamInfo.input.channels = stream->deviceInputChannels; - stream->hostApiStreamInfo.input.muxNodeId = -1; - if (stream->capture.pPin->inputs) - { - stream->hostApiStreamInfo.input.muxNodeId = stream->capture.pPin->inputs[pDeviceInfo->muxPosition]->muxNodeId; - } - stream->hostApiStreamInfo.input.endpointPinId = pDeviceInfo->endpointPinId; - stream->hostApiStreamInfo.input.framesPerHostBuffer = stream->capture.framesPerBuffer; - stream->hostApiStreamInfo.input.streamingSubType = stream->capture.pPin->pinKsSubType; - } - else - { - stream->hostApiStreamInfo.input.device = paNoDevice; - } - if (stream->userOutputChannels) - { - stream->hostApiStreamInfo.output.device = Pa_HostApiDeviceIndexToDeviceIndex(Pa_HostApiTypeIdToHostApiIndex(paWDMKS), outputParameters->device); - stream->hostApiStreamInfo.output.channels = stream->deviceOutputChannels; - stream->hostApiStreamInfo.output.framesPerHostBuffer = stream->render.framesPerBuffer; - stream->hostApiStreamInfo.output.endpointPinId = stream->render.pPin->endpointPinId; - stream->hostApiStreamInfo.output.streamingSubType = stream->render.pPin->pinKsSubType; - } - else - { - stream->hostApiStreamInfo.output.device = paNoDevice; - } - /*stream->streamRepresentation.streamInfo.hostApiTypeId = paWDMKS; - stream->streamRepresentation.streamInfo.hostApiSpecificStreamInfo = &stream->hostApiStreamInfo;*/ - stream->streamRepresentation.streamInfo.structVersion = 2; - - *s = (PaStream*)stream; - - PA_LOGL_; - return result; - -occupied: - /* Ok, someone else is hogging the pin, bail out */ - assert (result == paDeviceUnavailable); - PaWinWDM_SetLastErrorInfo(result, "Device is occupied"); - -error: - PaUtil_TerminateBufferProcessor( &stream->bufferProcessor ); - - CloseStreamEvents(stream); - - if (stream->allocGroup) - { - PaUtil_FreeAllAllocations(stream->allocGroup); - PaUtil_DestroyAllocationGroup(stream->allocGroup); - stream->allocGroup = 0; - } - - if(stream->render.pPin) - PinClose(stream->render.pPin); - if(stream->capture.pPin) - PinClose(stream->capture.pPin); - - PaUtil_FreeMemory( stream ); - - PA_LOGL_; - return result; -} - -/* -When CloseStream() is called, the multi-api layer ensures that -the stream has already been stopped or aborted. -*/ -static PaError CloseStream( PaStream* s ) -{ - PaError result = paNoError; - PaWinWdmStream *stream = (PaWinWdmStream*)s; - - PA_LOGE_; - - assert(!stream->streamStarted); - assert(!stream->streamActive); - - PaUtil_TerminateBufferProcessor( &stream->bufferProcessor ); - PaUtil_TerminateStreamRepresentation( &stream->streamRepresentation ); - - CloseStreamEvents(stream); - - if (stream->allocGroup) - { - PaUtil_FreeAllAllocations(stream->allocGroup); - PaUtil_DestroyAllocationGroup(stream->allocGroup); - stream->allocGroup = 0; - } - - if(stream->render.pPin) - { - PinClose(stream->render.pPin); - } - if(stream->capture.pPin) - { - PinClose(stream->capture.pPin); - } - - if (stream->render.pPin) - { - FilterFree(stream->render.pPin->parentFilter); - } - if (stream->capture.pPin) - { - FilterFree(stream->capture.pPin->parentFilter); - } - - PaUtil_FreeMemory( stream ); - - PA_LOGL_; - return result; -} - -/* -Write the supplied packet to the pin -Asynchronous -Should return paNoError on success -*/ -static PaError PinWrite(HANDLE h, DATAPACKET* p) -{ - PaError result = paNoError; - unsigned long cbReturned = 0; - BOOL fRes = DeviceIoControl(h, - IOCTL_KS_WRITE_STREAM, - NULL, - 0, - &p->Header, - p->Header.Size, - &cbReturned, - &p->Signal); - if (!fRes) - { - unsigned long error = GetLastError(); - if (error != ERROR_IO_PENDING) - { - result = paInternalError; - } - } - return result; -} - -/* -Read to the supplied packet from the pin -Asynchronous -Should return paNoError on success -*/ -static PaError PinRead(HANDLE h, DATAPACKET* p) -{ - PaError result = paNoError; - unsigned long cbReturned = 0; - BOOL fRes = DeviceIoControl(h, - IOCTL_KS_READ_STREAM, - NULL, - 0, - &p->Header, - p->Header.Size, - &cbReturned, - &p->Signal); - if (!fRes) - { - unsigned long error = GetLastError(); - if (error != ERROR_IO_PENDING) - { - result = paInternalError; - } - } - return result; -} - -/* -Copy the first interleaved channel of 16 bit data to the other channels -*/ -static void DuplicateFirstChannelInt16(void* buffer, int channels, int samples) -{ - unsigned short* data = (unsigned short*)buffer; - int channel; - unsigned short sourceSample; - while( samples-- ) - { - sourceSample = *data++; - channel = channels-1; - while( channel-- ) - { - *data++ = sourceSample; - } - } -} - -/* -Copy the first interleaved channel of 24 bit data to the other channels -*/ -static void DuplicateFirstChannelInt24(void* buffer, int channels, int samples) -{ - unsigned char* data = (unsigned char*)buffer; - int channel; - unsigned char sourceSample[3]; - while( samples-- ) - { - sourceSample[0] = data[0]; - sourceSample[1] = data[1]; - sourceSample[2] = data[2]; - data += 3; - channel = channels-1; - while( channel-- ) - { - data[0] = sourceSample[0]; - data[1] = sourceSample[1]; - data[2] = sourceSample[2]; - data += 3; - } - } -} - -/* -Copy the first interleaved channel of 32 bit data to the other channels -*/ -static void DuplicateFirstChannelInt32(void* buffer, int channels, int samples) -{ - unsigned long* data = (unsigned long*)buffer; - int channel; - unsigned long sourceSample; - while( samples-- ) - { - sourceSample = *data++; - channel = channels-1; - while( channel-- ) - { - *data++ = sourceSample; - } - } -} - -/* -Increase the priority of the calling thread to RT -*/ -static HANDLE BumpThreadPriority() -{ - HANDLE hThread = GetCurrentThread(); - DWORD dwTask = 0; - HANDLE hAVRT = NULL; - - /* If we have access to AVRT.DLL (Vista and later), use it */ - if (paWinWDMKSAvRtEntryPoints.AvSetMmThreadCharacteristics != NULL) - { - hAVRT = paWinWDMKSAvRtEntryPoints.AvSetMmThreadCharacteristics("Pro Audio", &dwTask); - if (hAVRT != NULL && hAVRT != INVALID_HANDLE_VALUE) - { - BOOL bret = paWinWDMKSAvRtEntryPoints.AvSetMmThreadPriority(hAVRT, PA_AVRT_PRIORITY_CRITICAL); - if (!bret) - { - PA_DEBUG(("Set mm thread prio to critical failed!\n")); - } - else - { - return hAVRT; - } - } - else - { - PA_DEBUG(("Set mm thread characteristic to 'Pro Audio' failed, reverting to SetThreadPriority\n")); - } - } - - /* For XP and earlier, or if AvSetMmThreadCharacteristics fails (MMCSS disabled ?) */ - if (timeBeginPeriod(1) != TIMERR_NOERROR) { - PA_DEBUG(("timeBeginPeriod(1) failed!\n")); - } - - if (!SetThreadPriority(hThread, THREAD_PRIORITY_TIME_CRITICAL)) { - PA_DEBUG(("SetThreadPriority failed!\n")); - } - - return hAVRT; -} - -/* -Decrease the priority of the calling thread to normal -*/ -static void DropThreadPriority(HANDLE hAVRT) -{ - HANDLE hThread = GetCurrentThread(); - - if (hAVRT != NULL) - { - paWinWDMKSAvRtEntryPoints.AvSetMmThreadPriority(hAVRT, PA_AVRT_PRIORITY_NORMAL); - paWinWDMKSAvRtEntryPoints.AvRevertMmThreadCharacteristics(hAVRT); - return; - } - - SetThreadPriority(hThread, THREAD_PRIORITY_NORMAL); - timeEndPeriod(1); -} - -static PaError PreparePinForStart(PaWinWdmPin* pin) -{ - PaError result; - result = PinSetState(pin, KSSTATE_ACQUIRE); - if (result != paNoError) - { - goto error; - } - result = PinSetState(pin, KSSTATE_PAUSE); - if (result != paNoError) - { - goto error; - } - return result; - -error: - PinSetState(pin, KSSTATE_STOP); - return result; -} - -static PaError PreparePinsForStart(PaProcessThreadInfo* pInfo) -{ - PaError result = paNoError; - /* Submit buffers */ - if (pInfo->stream->capture.pPin) - { - if ((result = PreparePinForStart(pInfo->stream->capture.pPin)) != paNoError) - { - goto error; - } - - if (pInfo->stream->capture.pPin->parentFilter->devInfo.streamingType == Type_kWaveCyclic) - { - unsigned i; - for(i=0; i < pInfo->stream->capture.noOfPackets; ++i) - { - if ((result = PinRead(pInfo->stream->capture.pPin->handle, pInfo->stream->capture.packets + i)) != paNoError) - { - goto error; - } - ++pInfo->pending; - } - } - else - { - pInfo->pending = 2; - } - } - - if(pInfo->stream->render.pPin) - { - if ((result = PreparePinForStart(pInfo->stream->render.pPin)) != paNoError) - { - goto error; - } - - pInfo->priming += pInfo->stream->render.noOfPackets; - ++pInfo->pending; - SetEvent(pInfo->stream->render.events[0]); - if (pInfo->stream->render.pPin->parentFilter->devInfo.streamingType == Type_kWaveCyclic) - { - unsigned i; - for(i=1; i < pInfo->stream->render.noOfPackets; ++i) - { - SetEvent(pInfo->stream->render.events[i]); - ++pInfo->pending; - } - } - } - -error: - PA_DEBUG(("PreparePinsForStart = %d\n", result)); - return result; -} - -static PaError StartPin(PaWinWdmPin* pin) -{ - return PinSetState(pin, KSSTATE_RUN); -} - -static PaError StartPins(PaProcessThreadInfo* pInfo) -{ - PaError result = paNoError; - /* Start the pins as synced as possible */ - if (pInfo->stream->capture.pPin) - { - result = StartPin(pInfo->stream->capture.pPin); - } - if(pInfo->stream->render.pPin) - { - result = StartPin(pInfo->stream->render.pPin); - } - PA_DEBUG(("StartPins = %d\n", result)); - return result; -} - - -static PaError StopPin(PaWinWdmPin* pin) -{ - PinSetState(pin, KSSTATE_PAUSE); - PinSetState(pin, KSSTATE_STOP); - return paNoError; -} - - -static PaError StopPins(PaProcessThreadInfo* pInfo) -{ - PaError result = paNoError; - if(pInfo->stream->render.pPin) - { - StopPin(pInfo->stream->render.pPin); - } - if(pInfo->stream->capture.pPin) - { - StopPin(pInfo->stream->capture.pPin); - } - return result; -} - -typedef void (*TSetInputFrameCount)(PaUtilBufferProcessor*, unsigned long); -typedef void (*TSetInputChannel)(PaUtilBufferProcessor*, unsigned int, void *, unsigned int); -static const TSetInputFrameCount fnSetInputFrameCount[2] = { PaUtil_SetInputFrameCount, PaUtil_Set2ndInputFrameCount }; -static const TSetInputChannel fnSetInputChannel[2] = { PaUtil_SetInputChannel, PaUtil_Set2ndInputChannel }; - -static PaError PaDoProcessing(PaProcessThreadInfo* pInfo) -{ - PaError result = paNoError; - int i, framesProcessed = 0, doChannelCopy = 0; - ring_buffer_size_t inputFramesAvailable = PaUtil_GetRingBufferReadAvailable(&pInfo->stream->ringBuffer); - - /* Do necessary buffer processing (which will invoke user callback if necessary) */ - if (pInfo->cbResult == paContinue && - (pInfo->renderHead != pInfo->renderTail || inputFramesAvailable)) - { - unsigned processFullDuplex = pInfo->stream->capture.pPin && pInfo->stream->render.pPin && (!pInfo->priming); - - PA_HP_TRACE((pInfo->stream->hLog, "DoProcessing: InputFrames=%u", inputFramesAvailable)); - - PaUtil_BeginCpuLoadMeasurement( &pInfo->stream->cpuLoadMeasurer ); - - pInfo->ti.currentTime = PaUtil_GetTime(); - - PaUtil_BeginBufferProcessing(&pInfo->stream->bufferProcessor, &pInfo->ti, pInfo->underover); - pInfo->underover = 0; /* Reset the (under|over)flow status */ - - if (pInfo->renderTail != pInfo->renderHead) - { - DATAPACKET* packet = pInfo->renderPackets[pInfo->renderTail & cPacketsArrayMask].packet; - - assert(packet != 0); - assert(packet->Header.Data != 0); - - PaUtil_SetOutputFrameCount(&pInfo->stream->bufferProcessor, pInfo->stream->render.framesPerBuffer); - - for(i=0;istream->userOutputChannels;i++) - { - /* Only write the user output channels. Leave the rest blank */ - PaUtil_SetOutputChannel(&pInfo->stream->bufferProcessor, - i, - ((unsigned char*)(packet->Header.Data))+(i*pInfo->stream->render.bytesPerSample), - pInfo->stream->deviceOutputChannels); - } - - /* We will do a copy to the other channels after the data has been written */ - doChannelCopy = ( pInfo->stream->userOutputChannels == 1 ); - } - - if (inputFramesAvailable && (!pInfo->stream->userOutputChannels || inputFramesAvailable >= (int)pInfo->stream->render.framesPerBuffer)) - { - unsigned wrapCntr = 0; - void* data[2] = {0}; - ring_buffer_size_t size[2] = {0}; - - /* If full-duplex, we just extract output buffer number of frames */ - if (pInfo->stream->userOutputChannels) - { - inputFramesAvailable = min(inputFramesAvailable, (int)pInfo->stream->render.framesPerBuffer); - } - - inputFramesAvailable = PaUtil_GetRingBufferReadRegions(&pInfo->stream->ringBuffer, - inputFramesAvailable, - &data[0], - &size[0], - &data[1], - &size[1]); - - for (wrapCntr = 0; wrapCntr < 2; ++wrapCntr) - { - if (size[wrapCntr] == 0) - break; - - fnSetInputFrameCount[wrapCntr](&pInfo->stream->bufferProcessor, size[wrapCntr]); - for(i=0;istream->userInputChannels;i++) - { - /* Only read as many channels as the user wants */ - fnSetInputChannel[wrapCntr](&pInfo->stream->bufferProcessor, - i, - ((unsigned char*)(data[wrapCntr]))+(i*pInfo->stream->capture.bytesPerSample), - pInfo->stream->deviceInputChannels); - } - } - } - else - { - /* We haven't consumed anything from the ring buffer... */ - inputFramesAvailable = 0; - /* If we have full-duplex, this is at startup, so mark no-input! */ - if (pInfo->stream->userOutputChannels>0 && pInfo->stream->userInputChannels>0) - { - PA_HP_TRACE((pInfo->stream->hLog, "Input startup, marking no input.")); - PaUtil_SetNoInput(&pInfo->stream->bufferProcessor); - } - } - - if (processFullDuplex) /* full duplex */ - { - /* Only call the EndBufferProcessing function when the total input frames == total output frames */ - const unsigned long totalInputFrameCount = pInfo->stream->bufferProcessor.hostInputFrameCount[0] + pInfo->stream->bufferProcessor.hostInputFrameCount[1]; - const unsigned long totalOutputFrameCount = pInfo->stream->bufferProcessor.hostOutputFrameCount[0] + pInfo->stream->bufferProcessor.hostOutputFrameCount[1]; - - if(totalInputFrameCount == totalOutputFrameCount && totalOutputFrameCount != 0) - { - framesProcessed = PaUtil_EndBufferProcessing(&pInfo->stream->bufferProcessor, &pInfo->cbResult); - } - else - { - framesProcessed = 0; - } - } - else - { - framesProcessed = PaUtil_EndBufferProcessing(&pInfo->stream->bufferProcessor, &pInfo->cbResult); - } - - PA_HP_TRACE((pInfo->stream->hLog, "Frames processed: %u %s", framesProcessed, (pInfo->priming ? "(priming)":""))); - - if( doChannelCopy ) - { - DATAPACKET* packet = pInfo->renderPackets[pInfo->renderTail & cPacketsArrayMask].packet; - /* Copy the first output channel to the other channels */ - switch (pInfo->stream->render.bytesPerSample) - { - case 2: - DuplicateFirstChannelInt16(packet->Header.Data, pInfo->stream->deviceOutputChannels, pInfo->stream->render.framesPerBuffer); - break; - case 3: - DuplicateFirstChannelInt24(packet->Header.Data, pInfo->stream->deviceOutputChannels, pInfo->stream->render.framesPerBuffer); - break; - case 4: - DuplicateFirstChannelInt32(packet->Header.Data, pInfo->stream->deviceOutputChannels, pInfo->stream->render.framesPerBuffer); - break; - default: - assert(0); /* Unsupported format! */ - break; - } - } - PaUtil_EndCpuLoadMeasurement( &pInfo->stream->cpuLoadMeasurer, framesProcessed ); - - if (inputFramesAvailable) - { - PaUtil_AdvanceRingBufferReadIndex(&pInfo->stream->ringBuffer, inputFramesAvailable); - } - - if (pInfo->renderTail != pInfo->renderHead) - { - if (!pInfo->stream->streamStop) - { - result = pInfo->stream->render.pPin->fnSubmitHandler(pInfo, pInfo->renderTail); - if (result != paNoError) - { - PA_HP_TRACE((pInfo->stream->hLog, "Capture submit handler failed with result %d", result)); - return result; - } - } - pInfo->renderTail++; - if (!pInfo->pinsStarted && pInfo->priming == 0) - { - /* We start the pins here to allow "prime time" */ - if ((result = StartPins(pInfo)) == paNoError) - { - PA_HP_TRACE((pInfo->stream->hLog, "Starting pins!")); - pInfo->pinsStarted = 1; - } - } - } - } - - return result; -} - -static VOID CALLBACK TimerAPCWaveRTPolledMode( - LPVOID lpArgToCompletionRoutine, - DWORD dwTimerLowValue, - DWORD dwTimerHighValue) -{ - HANDLE* pHandles = (HANDLE*)lpArgToCompletionRoutine; - if (pHandles[0]) SetEvent(pHandles[0]); - if (pHandles[1]) SetEvent(pHandles[1]); -} - -static DWORD GetCurrentTimeInMillisecs() -{ - return timeGetTime(); -} - -PA_THREAD_FUNC ProcessingThread(void* pParam) -{ - PaError result = paNoError; - HANDLE hAVRT = NULL; - HANDLE hTimer = NULL; - HANDLE *handleArray = NULL; - HANDLE timerEventHandles[2] = {0}; - unsigned noOfHandles = 0; - unsigned captureEvents = 0; - unsigned renderEvents = 0; - unsigned timerPeriod = 0; - DWORD timeStamp[2] = {0}; - - PaProcessThreadInfo info; - memset(&info, 0, sizeof(PaProcessThreadInfo)); - info.stream = (PaWinWdmStream*)pParam; - - info.stream->threadResult = paNoError; - - PA_LOGE_; - - info.ti.inputBufferAdcTime = 0.0; - info.ti.currentTime = 0.0; - info.ti.outputBufferDacTime = 0.0; - - PA_DEBUG(("In buffer len: %.3f ms\n",(2000*info.stream->capture.framesPerBuffer) / info.stream->streamRepresentation.streamInfo.sampleRate)); - PA_DEBUG(("Out buffer len: %.3f ms\n",(2000*info.stream->render.framesPerBuffer) / info.stream->streamRepresentation.streamInfo.sampleRate)); - info.timeout = (DWORD)max( - (2000*info.stream->render.framesPerBuffer/info.stream->streamRepresentation.streamInfo.sampleRate + 0.5), - (2000*info.stream->capture.framesPerBuffer/info.stream->streamRepresentation.streamInfo.sampleRate + 0.5)); - info.timeout = max(info.timeout*8, 100); - timerPeriod = info.timeout; - PA_DEBUG(("Timeout = %ld ms\n",info.timeout)); - - /* Allocate handle array */ - handleArray = (HANDLE*)PaUtil_AllocateMemory((info.stream->capture.noOfPackets + info.stream->render.noOfPackets + 1) * sizeof(HANDLE)); - - /* Setup handle array for WFMO */ - if (info.stream->capture.pPin != 0) - { - handleArray[noOfHandles++] = info.stream->capture.events[0]; - if (info.stream->capture.pPin->parentFilter->devInfo.streamingType == Type_kWaveCyclic) - { - unsigned i; - for(i=1; i < info.stream->capture.noOfPackets; ++i) - { - handleArray[noOfHandles++] = info.stream->capture.events[i]; - } - } - captureEvents = noOfHandles; - renderEvents = noOfHandles; - } - - if (info.stream->render.pPin != 0) - { - handleArray[noOfHandles++] = info.stream->render.events[0]; - if (info.stream->render.pPin->parentFilter->devInfo.streamingType == Type_kWaveCyclic) - { - unsigned i; - for(i=1; i < info.stream->render.noOfPackets; ++i) - { - handleArray[noOfHandles++] = info.stream->render.events[i]; - } - } - renderEvents = noOfHandles; - } - handleArray[noOfHandles++] = info.stream->eventAbort; - assert(noOfHandles <= (info.stream->capture.noOfPackets + info.stream->render.noOfPackets + 1)); - - /* Prepare render and capture pins */ - if ((result = PreparePinsForStart(&info)) != paNoError) - { - PA_DEBUG(("Failed to prepare device(s)!\n")); - goto error; - } - - /* Init high speed logger */ - if (PaUtil_InitializeHighSpeedLog(&info.stream->hLog, 1000000) != paNoError) - { - PA_DEBUG(("Failed to init high speed logger!\n")); - goto error; - } - - /* Heighten priority here */ - hAVRT = BumpThreadPriority(); - - /* If input only, we start the pins immediately */ - if (info.stream->render.pPin == 0) - { - if ((result = StartPins(&info)) != paNoError) - { - PA_DEBUG(("Failed to start device(s)!\n")); - goto error; - } - info.pinsStarted = 1; - } - - /* Handle WaveRT polled mode */ - { - const unsigned fs = (unsigned)info.stream->streamRepresentation.streamInfo.sampleRate; - if (info.stream->capture.pPin != 0 && info.stream->capture.pPin->pinKsSubType == SubType_kPolled) - { - timerEventHandles[0] = info.stream->capture.events[0]; - timerPeriod = min(timerPeriod, (1000*info.stream->capture.framesPerBuffer)/fs); - } - - if (info.stream->render.pPin != 0 && info.stream->render.pPin->pinKsSubType == SubType_kPolled) - { - timerEventHandles[1] = info.stream->render.events[0]; - timerPeriod = min(timerPeriod, (1000*info.stream->render.framesPerBuffer)/fs); - } - - if (timerEventHandles[0] || timerEventHandles[1]) - { - LARGE_INTEGER dueTime = {0}; - - timerPeriod=max(timerPeriod/5,1); - PA_DEBUG(("Timer event handles=0x%04X,0x%04X period=%u ms", timerEventHandles[0], timerEventHandles[1], timerPeriod)); - hTimer = CreateWaitableTimer(0, FALSE, NULL); - if (hTimer == NULL) - { - result = paUnanticipatedHostError; - goto error; - } - /* invoke first timeout immediately */ - if (!SetWaitableTimer(hTimer, &dueTime, timerPeriod, TimerAPCWaveRTPolledMode, timerEventHandles, FALSE)) - { - result = paUnanticipatedHostError; - goto error; - } - PA_DEBUG(("Waitable timer started, period = %u ms\n", timerPeriod)); - } - } - - /* Mark stream as active */ - info.stream->streamActive = 1; - info.stream->threadResult = paNoError; - - /* Up and running... */ - SetEvent(info.stream->eventStreamStart[StreamStart_kOk]); - - /* Take timestamp here */ - timeStamp[0] = timeStamp[1] = GetCurrentTimeInMillisecs(); - - while(!info.stream->streamAbort) - { - unsigned doProcessing = 1; - unsigned wait = WaitForMultipleObjects(noOfHandles, handleArray, FALSE, 0); - unsigned eventSignalled = wait - WAIT_OBJECT_0; - DWORD dwCurrentTime = 0; - - if (wait == WAIT_FAILED) - { - PA_DEBUG(("Wait failed = %ld! \n",wait)); - break; - } - if (wait == WAIT_TIMEOUT) - { - wait = WaitForMultipleObjectsEx(noOfHandles, handleArray, FALSE, 50, TRUE); - eventSignalled = wait - WAIT_OBJECT_0; - } - else - { - if (eventSignalled < captureEvents) - { - if (PaUtil_GetRingBufferWriteAvailable(&info.stream->ringBuffer) == 0) - { - PA_HP_TRACE((info.stream->hLog, "!!!!! Input overflow !!!!!")); - info.underover |= paInputOverflow; - } - } - else if (eventSignalled < renderEvents) - { - if (!info.priming && info.renderHead - info.renderTail > 1) - { - PA_HP_TRACE((info.stream->hLog, "!!!!! Output underflow !!!!!")); - info.underover |= paOutputUnderflow; - } - } - } - - /* Get event time */ - dwCurrentTime = GetCurrentTimeInMillisecs(); - - /* Since we can mix capture/render devices between WaveCyclic, WaveRT polled and WaveRT notification (3x3 combinations), - we can't rely on the timeout of WFMO to check for device timeouts, we need to keep tally. */ - if (info.stream->capture.pPin && (dwCurrentTime - timeStamp[0]) >= info.timeout) - { - PA_DEBUG(("Timeout for capture device (%u ms)!", info.timeout, (dwCurrentTime - timeStamp[0]))); - result = paTimedOut; - break; - } - if (info.stream->render.pPin && (dwCurrentTime - timeStamp[1]) >= info.timeout) - { - PA_DEBUG(("Timeout for render device (%u ms)!", info.timeout, (dwCurrentTime - timeStamp[1]))); - result = paTimedOut; - break; - } - - if (wait == WAIT_IO_COMPLETION) - { - /* Waitable timer has fired! */ - PA_HP_TRACE((info.stream->hLog, "WAIT_IO_COMPLETION")); - continue; - } - - if (wait == WAIT_TIMEOUT) - { - continue; - } - else - { - if (eventSignalled < captureEvents) - { - if (info.stream->capture.pPin->fnEventHandler(&info, eventSignalled) == paNoError) - { - timeStamp[0] = dwCurrentTime; - - /* Since we use the ring buffer, we can submit the buffers directly */ - if (!info.stream->streamStop) - { - result = info.stream->capture.pPin->fnSubmitHandler(&info, info.captureTail); - if (result != paNoError) - { - PA_HP_TRACE((info.stream->hLog, "Capture submit handler failed with result %d", result)); - break; - } - } - ++info.captureTail; - /* If full-duplex, let _only_ render event trigger processing. We still need the stream stop - handling working, so let that be processed anyways... */ - if (info.stream->userOutputChannels > 0) - { - doProcessing = 0; - } - } - } - else if (eventSignalled < renderEvents) - { - timeStamp[1] = dwCurrentTime; - eventSignalled -= captureEvents; - info.stream->render.pPin->fnEventHandler(&info, eventSignalled); - } - else - { - assert(info.stream->streamAbort); - PA_HP_TRACE((info.stream->hLog, "Stream abort!")); - continue; - } - } - - /* Handle processing */ - if (doProcessing) - { - result = PaDoProcessing(&info); - if (result != paNoError) - { - PA_HP_TRACE((info.stream->hLog, "PaDoProcessing failed!")); - break; - } - } - - if(info.stream->streamStop && info.cbResult != paComplete) - { - PA_HP_TRACE((info.stream->hLog, "Stream stop! pending=%d",info.pending)); - info.cbResult = paComplete; /* Stop, but play remaining buffers */ - } - - if(info.pending<=0) - { - PA_HP_TRACE((info.stream->hLog, "pending==0 finished...")); - break; - } - if((!info.stream->render.pPin)&&(info.cbResult!=paContinue)) - { - PA_HP_TRACE((info.stream->hLog, "record only cbResult=%d...",info.cbResult)); - break; - } - } - - PA_DEBUG(("Finished processing loop\n")); - - info.stream->threadResult = result; - goto bailout; - -error: - PA_DEBUG(("Error starting processing thread\n")); - /* Set the "error" event together with result */ - info.stream->threadResult = result; - SetEvent(info.stream->eventStreamStart[StreamStart_kFailed]); - -bailout: - if (hTimer) - { - PA_DEBUG(("Waitable timer stopped\n", timerPeriod)); - CancelWaitableTimer(hTimer); - CloseHandle(hTimer); - hTimer = 0; - } - - if (info.pinsStarted) - { - StopPins(&info); - } - - /* Lower prio here */ - DropThreadPriority(hAVRT); - - if (handleArray != NULL) - { - PaUtil_FreeMemory(handleArray); - } - -#if PA_TRACE_REALTIME_EVENTS - if (info.stream->hLog) - { - PA_DEBUG(("Dumping highspeed trace...\n")); - PaUtil_DumpHighSpeedLog(info.stream->hLog, "hp_trace.log"); - PaUtil_DiscardHighSpeedLog(info.stream->hLog); - info.stream->hLog = 0; - } -#endif - info.stream->streamActive = 0; - - if((!info.stream->streamStop)&&(!info.stream->streamAbort)) - { - /* Invoke the user stream finished callback */ - /* Only do it from here if not being stopped/aborted by user */ - if( info.stream->streamRepresentation.streamFinishedCallback != 0 ) - info.stream->streamRepresentation.streamFinishedCallback( info.stream->streamRepresentation.userData ); - } - info.stream->streamStop = 0; - info.stream->streamAbort = 0; - - PA_LOGL_; - return 0; -} - - -static PaError StartStream( PaStream *s ) -{ - PaError result = paNoError; - PaWinWdmStream *stream = (PaWinWdmStream*)s; - - PA_LOGE_; - - if (stream->streamThread != NULL) - { - return paStreamIsNotStopped; - } - - stream->streamStop = 0; - stream->streamAbort = 0; - - ResetStreamEvents(stream); - - PaUtil_ResetBufferProcessor( &stream->bufferProcessor ); - - stream->oldProcessPriority = GetPriorityClass(GetCurrentProcess()); - /* Uncomment the following line to enable dynamic boosting of the process - * priority to real time for best low latency support - * Disabled by default because RT processes can easily block the OS */ - /*ret = SetPriorityClass(GetCurrentProcess(),REALTIME_PRIORITY_CLASS); - PA_DEBUG(("Class ret = %d;",ret));*/ - - stream->streamThread = CREATE_THREAD_FUNCTION (NULL, 0, ProcessingThread, stream, CREATE_SUSPENDED, NULL); - if(stream->streamThread == NULL) - { - result = paInsufficientMemory; - goto end; - } - ResumeThread(stream->streamThread); - - switch (WaitForMultipleObjects(2, stream->eventStreamStart, FALSE, 5000)) - { - case WAIT_OBJECT_0 + StreamStart_kOk: - PA_DEBUG(("Processing thread started!\n")); - result = paNoError; - /* streamActive is set in processing thread */ - stream->streamStarted = 1; - break; - case WAIT_OBJECT_0 + StreamStart_kFailed: - PA_DEBUG(("Processing thread start failed! (result=%d)\n", stream->threadResult)); - result = stream->threadResult; - /* Wait for the stream to really exit */ - WaitForSingleObject(stream->streamThread, 200); - CloseHandle(stream->streamThread); - stream->streamThread = 0; - break; - case WAIT_TIMEOUT: - default: - result = paTimedOut; - PaWinWDM_SetLastErrorInfo(result, "Failed to start processing thread (timeout)!"); - break; - } - -end: - PA_LOGL_; - return result; -} - - -static PaError StopStream( PaStream *s ) -{ - PaError result = paNoError; - PaWinWdmStream *stream = (PaWinWdmStream*)s; - BOOL doCb = FALSE; - - PA_LOGE_; - - if(stream->streamActive) - { - DWORD dwExitCode; - doCb = TRUE; - stream->streamStop = 1; - if (GetExitCodeThread(stream->streamThread, &dwExitCode) && dwExitCode == STILL_ACTIVE) - { - if (WaitForSingleObject(stream->streamThread, INFINITE) != WAIT_OBJECT_0) - { - PA_DEBUG(("StopStream: stream thread terminated\n")); - TerminateThread(stream->streamThread, -1); - result = paTimedOut; - } - } - else - { - PA_DEBUG(("StopStream: GECT says not active, but streamActive is not false ??")); - result = paUnanticipatedHostError; - PaWinWDM_SetLastErrorInfo(result, "StopStream: GECT says not active, but streamActive = %d", stream->streamActive); - } - } - else - { - if (stream->threadResult != paNoError) - { - PA_DEBUG(("StopStream: Stream not active (%d)\n", stream->threadResult)); - result = stream->threadResult; - stream->threadResult = paNoError; - } - } - - if (stream->streamThread != NULL) - { - CloseHandle(stream->streamThread); - stream->streamThread = 0; - } - stream->streamStarted = 0; - stream->streamActive = 0; - - if(doCb) - { - /* Do user callback now after all state has been reset */ - /* This means it should be safe for the called function */ - /* to invoke e.g. StartStream */ - if( stream->streamRepresentation.streamFinishedCallback != 0 ) - stream->streamRepresentation.streamFinishedCallback( stream->streamRepresentation.userData ); - } - - PA_LOGL_; - return result; -} - -static PaError AbortStream( PaStream *s ) -{ - PaError result = paNoError; - PaWinWdmStream *stream = (PaWinWdmStream*)s; - int doCb = 0; - - PA_LOGE_; - - if(stream->streamActive) - { - doCb = 1; - stream->streamAbort = 1; - SetEvent(stream->eventAbort); /* Signal immediately */ - if (WaitForSingleObject(stream->streamThread, 10000) != WAIT_OBJECT_0) - { - TerminateThread(stream->streamThread, -1); - result = paTimedOut; - - PA_DEBUG(("AbortStream: stream thread terminated\n")); - } - assert(!stream->streamActive); - } - CloseHandle(stream->streamThread); - stream->streamThread = NULL; - stream->streamStarted = 0; - - if(doCb) - { - /* Do user callback now after all state has been reset */ - /* This means it should be safe for the called function */ - /* to invoke e.g. StartStream */ - if( stream->streamRepresentation.streamFinishedCallback != 0 ) - stream->streamRepresentation.streamFinishedCallback( stream->streamRepresentation.userData ); - } - - stream->streamActive = 0; - stream->streamStarted = 0; - - PA_LOGL_; - return result; -} - - -static PaError IsStreamStopped( PaStream *s ) -{ - PaWinWdmStream *stream = (PaWinWdmStream*)s; - int result = 0; - - PA_LOGE_; - - if(!stream->streamStarted) - result = 1; - - PA_LOGL_; - return result; -} - - -static PaError IsStreamActive( PaStream *s ) -{ - PaWinWdmStream *stream = (PaWinWdmStream*)s; - int result = 0; - - PA_LOGE_; - - if(stream->streamActive) - result = 1; - - PA_LOGL_; - return result; -} - - -static PaTime GetStreamTime( PaStream* s ) -{ - PA_LOGE_; - PA_LOGL_; - (void)s; - return PaUtil_GetTime(); -} - - -static double GetStreamCpuLoad( PaStream* s ) -{ - PaWinWdmStream *stream = (PaWinWdmStream*)s; - double result; - PA_LOGE_; - result = PaUtil_GetCpuLoad( &stream->cpuLoadMeasurer ); - PA_LOGL_; - return result; -} - - -/* -As separate stream interfaces are used for blocking and callback -streams, the following functions can be guaranteed to only be called -for blocking streams. -*/ - -static PaError ReadStream( PaStream* s, - void *buffer, - unsigned long frames ) -{ - PaWinWdmStream *stream = (PaWinWdmStream*)s; - - PA_LOGE_; - - /* suppress unused variable warnings */ - (void) buffer; - (void) frames; - (void) stream; - - /* IMPLEMENT ME, see portaudio.h for required behavior*/ - PA_LOGL_; - return paInternalError; -} - - -static PaError WriteStream( PaStream* s, - const void *buffer, - unsigned long frames ) -{ - PaWinWdmStream *stream = (PaWinWdmStream*)s; - - PA_LOGE_; - - /* suppress unused variable warnings */ - (void) buffer; - (void) frames; - (void) stream; - - /* IMPLEMENT ME, see portaudio.h for required behavior*/ - PA_LOGL_; - return paInternalError; -} - - -static signed long GetStreamReadAvailable( PaStream* s ) -{ - PaWinWdmStream *stream = (PaWinWdmStream*)s; - - PA_LOGE_; - - /* suppress unused variable warnings */ - (void) stream; - - /* IMPLEMENT ME, see portaudio.h for required behavior*/ - PA_LOGL_; - return 0; -} - - -static signed long GetStreamWriteAvailable( PaStream* s ) -{ - PaWinWdmStream *stream = (PaWinWdmStream*)s; - - PA_LOGE_; - /* suppress unused variable warnings */ - (void) stream; - - /* IMPLEMENT ME, see portaudio.h for required behavior*/ - PA_LOGL_; - return 0; -} - -/***************************************************************************************/ -/* Event and submit handlers for WaveCyclic */ -/***************************************************************************************/ - -static PaError PaPinCaptureEventHandler_WaveCyclic(PaProcessThreadInfo* pInfo, unsigned eventIndex) -{ - PaError result = paNoError; - ring_buffer_size_t frameCount; - DATAPACKET* packet = pInfo->stream->capture.packets + eventIndex; - - assert( eventIndex < pInfo->stream->capture.noOfPackets ); - - if (packet->Header.DataUsed == 0) - { - PA_HP_TRACE((pInfo->stream->hLog, ">>> Capture bogus event (no data): idx=%u", eventIndex)); - - /* Bogus event, reset! This is to handle the behavior of this USB mic: http://shop.xtz.se/measurement-system/microphone-to-dirac-live-room-correction-suite - on startup of streaming, where it erroneously sets the event without the corresponding buffer being filled (DataUsed == 0) */ - ResetEvent(packet->Signal.hEvent); - - result = -1; /* Only need this to be NOT paNoError */ - } - else - { - pInfo->capturePackets[pInfo->captureHead & cPacketsArrayMask].packet = packet; - - frameCount = PaUtil_WriteRingBuffer(&pInfo->stream->ringBuffer, packet->Header.Data, pInfo->stream->capture.framesPerBuffer); - - PA_HP_TRACE((pInfo->stream->hLog, ">>> Capture event: idx=%u (frames=%u)", eventIndex, frameCount)); - ++pInfo->captureHead; - } - - --pInfo->pending; /* This needs to be done in either case */ - return result; -} - -static PaError PaPinCaptureSubmitHandler_WaveCyclic(PaProcessThreadInfo* pInfo, unsigned eventIndex) -{ - PaError result = paNoError; - DATAPACKET* packet = pInfo->capturePackets[pInfo->captureTail & cPacketsArrayMask].packet; - pInfo->capturePackets[pInfo->captureTail & cPacketsArrayMask].packet = 0; - assert(packet != 0); - PA_HP_TRACE((pInfo->stream->hLog, "Capture submit: %u", eventIndex)); - packet->Header.DataUsed = 0; /* Reset for reuse */ - packet->Header.OptionsFlags = 0; /* Reset for reuse. Required for e.g. Focusrite Scarlett 2i4 (1st Gen) see #310 */ - ResetEvent(packet->Signal.hEvent); - result = PinRead(pInfo->stream->capture.pPin->handle, packet); - ++pInfo->pending; - return result; -} - -static PaError PaPinRenderEventHandler_WaveCyclic(PaProcessThreadInfo* pInfo, unsigned eventIndex) -{ - assert( eventIndex < pInfo->stream->render.noOfPackets ); - - pInfo->renderPackets[pInfo->renderHead & cPacketsArrayMask].packet = pInfo->stream->render.packets + eventIndex; - PA_HP_TRACE((pInfo->stream->hLog, "<<< Render event : idx=%u head=%u", eventIndex, pInfo->renderHead)); - ++pInfo->renderHead; - --pInfo->pending; - return paNoError; -} - -static PaError PaPinRenderSubmitHandler_WaveCyclic(PaProcessThreadInfo* pInfo, unsigned eventIndex) -{ - PaError result = paNoError; - DATAPACKET* packet = pInfo->renderPackets[pInfo->renderTail & cPacketsArrayMask].packet; - pInfo->renderPackets[pInfo->renderTail & cPacketsArrayMask].packet = 0; - assert(packet != 0); - - PA_HP_TRACE((pInfo->stream->hLog, "Render submit : %u idx=%u", pInfo->renderTail, (unsigned)(packet - pInfo->stream->render.packets))); - ResetEvent(packet->Signal.hEvent); - result = PinWrite(pInfo->stream->render.pPin->handle, packet); - /* Reset event, just in case we have an analogous situation to capture (see PaPinCaptureSubmitHandler_WaveCyclic) */ - ++pInfo->pending; - if (pInfo->priming) - { - --pInfo->priming; - } - return result; -} - -/***************************************************************************************/ -/* Event and submit handlers for WaveRT */ -/***************************************************************************************/ - -static PaError PaPinCaptureEventHandler_WaveRTEvent(PaProcessThreadInfo* pInfo, unsigned eventIndex) -{ - unsigned long pos; - unsigned realInBuf; - unsigned frameCount; - PaWinWdmIOInfo* pCapture = &pInfo->stream->capture; - const unsigned halfInputBuffer = pCapture->hostBufferSize >> 1; - PaWinWdmPin* pin = pCapture->pPin; - DATAPACKET* packet = 0; - - /* Get hold of current ADC position */ - pin->fnAudioPosition(pin, &pos); - /* Wrap it (robi: why not use hw latency compensation here ?? because pos then gets _way_ off from - where it should be, i.e. at beginning or half buffer position. Why? No idea.) */ - - pos %= pCapture->hostBufferSize; - /* Then realInBuf will point to "other" half of double buffer */ - realInBuf = pos < halfInputBuffer ? 1U : 0U; - - packet = pInfo->stream->capture.packets + realInBuf; - - /* Call barrier (or dummy) */ - pin->fnMemBarrier(); - - /* Put it in queue */ - frameCount = PaUtil_WriteRingBuffer(&pInfo->stream->ringBuffer, packet->Header.Data, pCapture->framesPerBuffer); - - pInfo->capturePackets[pInfo->captureHead & cPacketsArrayMask].packet = packet; - - PA_HP_TRACE((pInfo->stream->hLog, "Capture event (WaveRT): idx=%u head=%u (pos = %4.1lf%%, frames=%u)", realInBuf, pInfo->captureHead, (pos * 100.0 / pCapture->hostBufferSize), frameCount)); - - ++pInfo->captureHead; - --pInfo->pending; - - return paNoError; -} - -static PaError PaPinCaptureEventHandler_WaveRTPolled(PaProcessThreadInfo* pInfo, unsigned eventIndex) -{ - unsigned long pos; - unsigned bytesToRead; - PaWinWdmIOInfo* pCapture = &pInfo->stream->capture; - const unsigned halfInputBuffer = pCapture->hostBufferSize>>1; - PaWinWdmPin* pin = pInfo->stream->capture.pPin; - - /* Get hold of current ADC position */ - pin->fnAudioPosition(pin, &pos); - /* Wrap it (robi: why not use hw latency compensation here ?? because pos then gets _way_ off from - where it should be, i.e. at beginning or half buffer position. Why? No idea.) */ - /* Compensate for HW FIFO to get to last read buffer position */ - pos += pin->hwLatency; - pos %= pCapture->hostBufferSize; - /* Need to align position on frame boundary */ - pos &= ~(pCapture->bytesPerFrame - 1); - - /* Call barrier (or dummy) */ - pin->fnMemBarrier(); - - /* Put it in "queue" */ - bytesToRead = (pCapture->hostBufferSize + pos - pCapture->lastPosition) % pCapture->hostBufferSize; - if (bytesToRead > 0) - { - unsigned frameCount = PaUtil_WriteRingBuffer(&pInfo->stream->ringBuffer, - pCapture->hostBuffer + pCapture->lastPosition, - bytesToRead / pCapture->bytesPerFrame); - - pCapture->lastPosition = (pCapture->lastPosition + frameCount * pCapture->bytesPerFrame) % pCapture->hostBufferSize; - - PA_HP_TRACE((pInfo->stream->hLog, "Capture event (WaveRTPolled): pos = %4.1lf%%, framesRead=%u", (pos * 100.0 / pCapture->hostBufferSize), frameCount)); - ++pInfo->captureHead; - --pInfo->pending; - } - return paNoError; -} - -static PaError PaPinCaptureSubmitHandler_WaveRTEvent(PaProcessThreadInfo* pInfo, unsigned eventIndex) -{ - pInfo->capturePackets[pInfo->captureTail & cPacketsArrayMask].packet = 0; - ++pInfo->pending; - return paNoError; -} - -static PaError PaPinCaptureSubmitHandler_WaveRTPolled(PaProcessThreadInfo* pInfo, unsigned eventIndex) -{ - pInfo->capturePackets[pInfo->captureTail & cPacketsArrayMask].packet = 0; - ++pInfo->pending; - return paNoError; -} - -static PaError PaPinRenderEventHandler_WaveRTEvent(PaProcessThreadInfo* pInfo, unsigned eventIndex) -{ - unsigned long pos; - unsigned realOutBuf; - PaWinWdmIOInfo* pRender = &pInfo->stream->render; - const unsigned halfOutputBuffer = pRender->hostBufferSize >> 1; - PaWinWdmPin* pin = pInfo->stream->render.pPin; - PaIOPacket* ioPacket = &pInfo->renderPackets[pInfo->renderHead & cPacketsArrayMask]; - - /* Get hold of current DAC position */ - pin->fnAudioPosition(pin, &pos); - /* Compensate for HW FIFO to get to last read buffer position */ - pos += pin->hwLatency; - /* Wrap it */ - pos %= pRender->hostBufferSize; - /* And align it, not sure its really needed though */ - pos &= ~(pRender->bytesPerFrame - 1); - /* Then realOutBuf will point to "other" half of double buffer */ - realOutBuf = pos < halfOutputBuffer ? 1U : 0U; - - if (pInfo->priming) - { - realOutBuf = pInfo->renderHead & 0x1; - } - ioPacket->packet = pInfo->stream->render.packets + realOutBuf; - ioPacket->startByte = realOutBuf * halfOutputBuffer; - ioPacket->lengthBytes = halfOutputBuffer; - - PA_HP_TRACE((pInfo->stream->hLog, "Render event (WaveRT) : idx=%u head=%u (pos = %4.1lf%%)", realOutBuf, pInfo->renderHead, (pos * 100.0 / pRender->hostBufferSize) )); - - ++pInfo->renderHead; - --pInfo->pending; - return paNoError; -} - -static PaError PaPinRenderEventHandler_WaveRTPolled(PaProcessThreadInfo* pInfo, unsigned eventIndex) -{ - unsigned long pos; - unsigned realOutBuf; - unsigned bytesToWrite; - - PaWinWdmIOInfo* pRender = &pInfo->stream->render; - const unsigned halfOutputBuffer = pRender->hostBufferSize >> 1; - PaWinWdmPin* pin = pInfo->stream->render.pPin; - PaIOPacket* ioPacket = &pInfo->renderPackets[pInfo->renderHead & cPacketsArrayMask]; - - /* Get hold of current DAC position */ - pin->fnAudioPosition(pin, &pos); - /* Compensate for HW FIFO to get to last read buffer position */ - pos += pin->hwLatency; - /* Wrap it */ - pos %= pRender->hostBufferSize; - /* And align it, not sure its really needed though */ - pos &= ~(pRender->bytesPerFrame - 1); - - if (pInfo->priming) - { - realOutBuf = pInfo->renderHead & 0x1; - ioPacket->packet = pInfo->stream->render.packets + realOutBuf; - ioPacket->startByte = realOutBuf * halfOutputBuffer; - ioPacket->lengthBytes = halfOutputBuffer; - ++pInfo->renderHead; - --pInfo->pending; - } - else - { - bytesToWrite = (pRender->hostBufferSize + pos - pRender->lastPosition) % pRender->hostBufferSize; - ++pRender->pollCntr; - if (bytesToWrite >= halfOutputBuffer) - { - realOutBuf = (pos < halfOutputBuffer) ? 1U : 0U; - ioPacket->packet = pInfo->stream->render.packets + realOutBuf; - pRender->lastPosition = realOutBuf ? 0U : halfOutputBuffer; - ioPacket->startByte = realOutBuf * halfOutputBuffer; - ioPacket->lengthBytes = halfOutputBuffer; - ++pInfo->renderHead; - --pInfo->pending; - PA_HP_TRACE((pInfo->stream->hLog, "Render event (WaveRTPolled) : idx=%u head=%u (pos = %4.1lf%%, cnt=%u)", realOutBuf, pInfo->renderHead, (pos * 100.0 / pRender->hostBufferSize), pRender->pollCntr)); - pRender->pollCntr = 0; - } - } - return paNoError; -} - -static PaError PaPinRenderSubmitHandler_WaveRTEvent(PaProcessThreadInfo* pInfo, unsigned eventIndex) -{ - PaWinWdmPin* pin = pInfo->stream->render.pPin; - pInfo->renderPackets[pInfo->renderTail & cPacketsArrayMask].packet = 0; - /* Call barrier (if needed) */ - pin->fnMemBarrier(); - PA_HP_TRACE((pInfo->stream->hLog, "Render submit (WaveRT) : submit=%u", pInfo->renderTail)); - ++pInfo->pending; - if (pInfo->priming) - { - --pInfo->priming; - if (pInfo->priming) - { - PA_HP_TRACE((pInfo->stream->hLog, "Setting WaveRT event for priming (2)")); - SetEvent(pInfo->stream->render.events[0]); - } - } - return paNoError; -} - -static PaError PaPinRenderSubmitHandler_WaveRTPolled(PaProcessThreadInfo* pInfo, unsigned eventIndex) -{ - PaWinWdmPin* pin = pInfo->stream->render.pPin; - pInfo->renderPackets[pInfo->renderTail & cPacketsArrayMask].packet = 0; - /* Call barrier (if needed) */ - pin->fnMemBarrier(); - PA_HP_TRACE((pInfo->stream->hLog, "Render submit (WaveRTPolled) : submit=%u", pInfo->renderTail)); - ++pInfo->pending; - if (pInfo->priming) - { - --pInfo->priming; - if (pInfo->priming) - { - PA_HP_TRACE((pInfo->stream->hLog, "Setting WaveRT event for priming (2)")); - SetEvent(pInfo->stream->render.events[0]); - } - } - return paNoError; -} diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/_binary.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/_binary.py deleted file mode 100644 index a74ee9eb6f341aca9e074c0acc4b306a354175a0..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/_binary.py +++ /dev/null @@ -1,102 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# Binary input/output support routines. -# -# Copyright (c) 1997-2003 by Secret Labs AB -# Copyright (c) 1995-2003 by Fredrik Lundh -# Copyright (c) 2012 by Brian Crowell -# -# See the README file for information on usage and redistribution. -# - - -"""Binary input/output support routines.""" - - -from struct import pack, unpack_from - - -def i8(c): - return c if c.__class__ is int else c[0] - - -def o8(i): - return bytes((i & 255,)) - - -# Input, le = little endian, be = big endian -def i16le(c, o=0): - """ - Converts a 2-bytes (16 bits) string to an unsigned integer. - - :param c: string containing bytes to convert - :param o: offset of bytes to convert in string - """ - return unpack_from("h", c, o)[0] - - -def i32le(c, o=0): - """ - Converts a 4-bytes (32 bits) string to an unsigned integer. - - :param c: string containing bytes to convert - :param o: offset of bytes to convert in string - """ - return unpack_from("H", c, o)[0] - - -def i32be(c, o=0): - return unpack_from(">I", c, o)[0] - - -# Output, le = little endian, be = big endian -def o16le(i): - return pack("H", i) - - -def o32be(i): - return pack(">I", i) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/annotated_types/test_cases.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/annotated_types/test_cases.py deleted file mode 100644 index f54df700283bb31f60106443af0e54c8cfbdb834..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/annotated_types/test_cases.py +++ /dev/null @@ -1,147 +0,0 @@ -import math -import sys -from datetime import date, datetime, timedelta, timezone -from decimal import Decimal -from typing import Any, Dict, Iterable, Iterator, List, NamedTuple, Set, Tuple - -if sys.version_info < (3, 9): - from typing_extensions import Annotated -else: - from typing import Annotated - -import annotated_types as at - - -class Case(NamedTuple): - """ - A test case for `annotated_types`. - """ - - annotation: Any - valid_cases: Iterable[Any] - invalid_cases: Iterable[Any] - - -def cases() -> Iterable[Case]: - # Gt, Ge, Lt, Le - yield Case(Annotated[int, at.Gt(4)], (5, 6, 1000), (4, 0, -1)) - yield Case(Annotated[float, at.Gt(0.5)], (0.6, 0.7, 0.8, 0.9), (0.5, 0.0, -0.1)) - yield Case( - Annotated[datetime, at.Gt(datetime(2000, 1, 1))], - [datetime(2000, 1, 2), datetime(2000, 1, 3)], - [datetime(2000, 1, 1), datetime(1999, 12, 31)], - ) - yield Case( - Annotated[datetime, at.Gt(date(2000, 1, 1))], - [date(2000, 1, 2), date(2000, 1, 3)], - [date(2000, 1, 1), date(1999, 12, 31)], - ) - yield Case( - Annotated[datetime, at.Gt(Decimal('1.123'))], - [Decimal('1.1231'), Decimal('123')], - [Decimal('1.123'), Decimal('0')], - ) - - yield Case(Annotated[int, at.Ge(4)], (4, 5, 6, 1000, 4), (0, -1)) - yield Case(Annotated[float, at.Ge(0.5)], (0.5, 0.6, 0.7, 0.8, 0.9), (0.4, 0.0, -0.1)) - yield Case( - Annotated[datetime, at.Ge(datetime(2000, 1, 1))], - [datetime(2000, 1, 2), datetime(2000, 1, 3)], - [datetime(1998, 1, 1), datetime(1999, 12, 31)], - ) - - yield Case(Annotated[int, at.Lt(4)], (0, -1), (4, 5, 6, 1000, 4)) - yield Case(Annotated[float, at.Lt(0.5)], (0.4, 0.0, -0.1), (0.5, 0.6, 0.7, 0.8, 0.9)) - yield Case( - Annotated[datetime, at.Lt(datetime(2000, 1, 1))], - [datetime(1999, 12, 31), datetime(1999, 12, 31)], - [datetime(2000, 1, 2), datetime(2000, 1, 3)], - ) - - yield Case(Annotated[int, at.Le(4)], (4, 0, -1), (5, 6, 1000)) - yield Case(Annotated[float, at.Le(0.5)], (0.5, 0.0, -0.1), (0.6, 0.7, 0.8, 0.9)) - yield Case( - Annotated[datetime, at.Le(datetime(2000, 1, 1))], - [datetime(2000, 1, 1), datetime(1999, 12, 31)], - [datetime(2000, 1, 2), datetime(2000, 1, 3)], - ) - - # Interval - yield Case(Annotated[int, at.Interval(gt=4)], (5, 6, 1000), (4, 0, -1)) - yield Case(Annotated[int, at.Interval(gt=4, lt=10)], (5, 6), (4, 10, 1000, 0, -1)) - yield Case(Annotated[float, at.Interval(ge=0.5, le=1)], (0.5, 0.9, 1), (0.49, 1.1)) - yield Case( - Annotated[datetime, at.Interval(gt=datetime(2000, 1, 1), le=datetime(2000, 1, 3))], - [datetime(2000, 1, 2), datetime(2000, 1, 3)], - [datetime(2000, 1, 1), datetime(2000, 1, 4)], - ) - - yield Case(Annotated[int, at.MultipleOf(multiple_of=3)], (0, 3, 9), (1, 2, 4)) - yield Case(Annotated[float, at.MultipleOf(multiple_of=0.5)], (0, 0.5, 1, 1.5), (0.4, 1.1)) - - # lengths - - yield Case(Annotated[str, at.MinLen(3)], ('123', '1234', 'x' * 10), ('', '1', '12')) - yield Case(Annotated[str, at.Len(3)], ('123', '1234', 'x' * 10), ('', '1', '12')) - yield Case(Annotated[List[int], at.MinLen(3)], ([1, 2, 3], [1, 2, 3, 4], [1] * 10), ([], [1], [1, 2])) - yield Case(Annotated[List[int], at.Len(3)], ([1, 2, 3], [1, 2, 3, 4], [1] * 10), ([], [1], [1, 2])) - - yield Case(Annotated[str, at.MaxLen(4)], ('', '1234'), ('12345', 'x' * 10)) - yield Case(Annotated[str, at.Len(0, 4)], ('', '1234'), ('12345', 'x' * 10)) - yield Case(Annotated[List[str], at.MaxLen(4)], ([], ['a', 'bcdef'], ['a', 'b', 'c']), (['a'] * 5, ['b'] * 10)) - yield Case(Annotated[List[str], at.Len(0, 4)], ([], ['a', 'bcdef'], ['a', 'b', 'c']), (['a'] * 5, ['b'] * 10)) - - yield Case(Annotated[str, at.Len(3, 5)], ('123', '12345'), ('', '1', '12', '123456', 'x' * 10)) - yield Case(Annotated[str, at.Len(3, 3)], ('123',), ('12', '1234')) - - yield Case(Annotated[Dict[int, int], at.Len(2, 3)], [{1: 1, 2: 2}], [{}, {1: 1}, {1: 1, 2: 2, 3: 3, 4: 4}]) - yield Case(Annotated[Set[int], at.Len(2, 3)], ({1, 2}, {1, 2, 3}), (set(), {1}, {1, 2, 3, 4})) - yield Case(Annotated[Tuple[int, ...], at.Len(2, 3)], ((1, 2), (1, 2, 3)), ((), (1,), (1, 2, 3, 4))) - - # Timezone - - yield Case( - Annotated[datetime, at.Timezone(None)], [datetime(2000, 1, 1)], [datetime(2000, 1, 1, tzinfo=timezone.utc)] - ) - yield Case( - Annotated[datetime, at.Timezone(...)], [datetime(2000, 1, 1, tzinfo=timezone.utc)], [datetime(2000, 1, 1)] - ) - yield Case( - Annotated[datetime, at.Timezone(timezone.utc)], - [datetime(2000, 1, 1, tzinfo=timezone.utc)], - [datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=timezone(timedelta(hours=6)))], - ) - yield Case( - Annotated[datetime, at.Timezone('Europe/London')], - [datetime(2000, 1, 1, tzinfo=timezone(timedelta(0), name='Europe/London'))], - [datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=timezone(timedelta(hours=6)))], - ) - - # predicate types - - yield Case(at.LowerCase[str], ['abc', 'foobar'], ['', 'A', 'Boom']) - yield Case(at.UpperCase[str], ['ABC', 'DEFO'], ['', 'a', 'abc', 'AbC']) - yield Case(at.IsDigits[str], ['123'], ['', 'ab', 'a1b2']) - yield Case(at.IsAscii[str], ['123', 'foo bar'], ['£100', '😊', 'whatever 👀']) - - yield Case(Annotated[int, at.Predicate(lambda x: x % 2 == 0)], [0, 2, 4], [1, 3, 5]) - - yield Case(at.IsFinite[float], [1.23], [math.nan, math.inf, -math.inf]) - yield Case(at.IsNotFinite[float], [math.nan, math.inf], [1.23]) - yield Case(at.IsNan[float], [math.nan], [1.23, math.inf]) - yield Case(at.IsNotNan[float], [1.23, math.inf], [math.nan]) - yield Case(at.IsInfinite[float], [math.inf], [math.nan, 1.23]) - yield Case(at.IsNotInfinite[float], [math.nan, 1.23], [math.inf]) - - # check stacked predicates - yield Case(at.IsInfinite[Annotated[float, at.Predicate(lambda x: x > 0)]], [math.inf], [-math.inf, 1.23, math.nan]) - - # doc - yield Case(Annotated[int, at.doc("A number")], [1, 2], []) - - # custom GroupedMetadata - class MyCustomGroupedMetadata(at.GroupedMetadata): - def __iter__(self) -> Iterator[at.Predicate]: - yield at.Predicate(lambda x: float(x).is_integer()) - - yield Case(Annotated[float, MyCustomGroupedMetadata()], [0, 2.0], [0.01, 1.5]) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/click/_textwrap.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/click/_textwrap.py deleted file mode 100644 index b47dcbd4264e86715adfae1c5124c288b67a983e..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/click/_textwrap.py +++ /dev/null @@ -1,49 +0,0 @@ -import textwrap -import typing as t -from contextlib import contextmanager - - -class TextWrapper(textwrap.TextWrapper): - def _handle_long_word( - self, - reversed_chunks: t.List[str], - cur_line: t.List[str], - cur_len: int, - width: int, - ) -> None: - space_left = max(width - cur_len, 1) - - if self.break_long_words: - last = reversed_chunks[-1] - cut = last[:space_left] - res = last[space_left:] - cur_line.append(cut) - reversed_chunks[-1] = res - elif not cur_line: - cur_line.append(reversed_chunks.pop()) - - @contextmanager - def extra_indent(self, indent: str) -> t.Iterator[None]: - old_initial_indent = self.initial_indent - old_subsequent_indent = self.subsequent_indent - self.initial_indent += indent - self.subsequent_indent += indent - - try: - yield - finally: - self.initial_indent = old_initial_indent - self.subsequent_indent = old_subsequent_indent - - def indent_only(self, text: str) -> str: - rv = [] - - for idx, line in enumerate(text.splitlines()): - indent = self.initial_indent - - if idx > 0: - indent = self.subsequent_indent - - rv.append(f"{indent}{line}") - - return "\n".join(rv) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/tables/C_O_L_R_.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/tables/C_O_L_R_.py deleted file mode 100644 index b4bc5d0c200e58f793fff6d3ffe95b2d76d36c64..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/tables/C_O_L_R_.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright 2013 Google, Inc. All Rights Reserved. -# -# Google Author(s): Behdad Esfahbod - -from fontTools.misc.textTools import safeEval -from . import DefaultTable - - -class table_C_O_L_R_(DefaultTable.DefaultTable): - - """This table is structured so that you can treat it like a dictionary keyed by glyph name. - - ``ttFont['COLR'][]`` will return the color layers for any glyph. - - ``ttFont['COLR'][] = `` will set the color layers for any glyph. - """ - - @staticmethod - def _decompileColorLayersV0(table): - if not table.LayerRecordArray: - return {} - colorLayerLists = {} - layerRecords = table.LayerRecordArray.LayerRecord - numLayerRecords = len(layerRecords) - for baseRec in table.BaseGlyphRecordArray.BaseGlyphRecord: - baseGlyph = baseRec.BaseGlyph - firstLayerIndex = baseRec.FirstLayerIndex - numLayers = baseRec.NumLayers - assert firstLayerIndex + numLayers <= numLayerRecords - layers = [] - for i in range(firstLayerIndex, firstLayerIndex + numLayers): - layerRec = layerRecords[i] - layers.append(LayerRecord(layerRec.LayerGlyph, layerRec.PaletteIndex)) - colorLayerLists[baseGlyph] = layers - return colorLayerLists - - def _toOTTable(self, ttFont): - from . import otTables - from fontTools.colorLib.builder import populateCOLRv0 - - tableClass = getattr(otTables, self.tableTag) - table = tableClass() - table.Version = self.version - - populateCOLRv0( - table, - { - baseGlyph: [(layer.name, layer.colorID) for layer in layers] - for baseGlyph, layers in self.ColorLayers.items() - }, - glyphMap=ttFont.getReverseGlyphMap(rebuild=True), - ) - return table - - def decompile(self, data, ttFont): - from .otBase import OTTableReader - from . import otTables - - # We use otData to decompile, but we adapt the decompiled otTables to the - # existing COLR v0 API for backward compatibility. - reader = OTTableReader(data, tableTag=self.tableTag) - tableClass = getattr(otTables, self.tableTag) - table = tableClass() - table.decompile(reader, ttFont) - - self.version = table.Version - if self.version == 0: - self.ColorLayers = self._decompileColorLayersV0(table) - else: - # for new versions, keep the raw otTables around - self.table = table - - def compile(self, ttFont): - from .otBase import OTTableWriter - - if hasattr(self, "table"): - table = self.table - else: - table = self._toOTTable(ttFont) - - writer = OTTableWriter(tableTag=self.tableTag) - table.compile(writer, ttFont) - return writer.getAllData() - - def toXML(self, writer, ttFont): - if hasattr(self, "table"): - self.table.toXML2(writer, ttFont) - else: - writer.simpletag("version", value=self.version) - writer.newline() - for baseGlyph in sorted(self.ColorLayers.keys(), key=ttFont.getGlyphID): - writer.begintag("ColorGlyph", name=baseGlyph) - writer.newline() - for layer in self.ColorLayers[baseGlyph]: - layer.toXML(writer, ttFont) - writer.endtag("ColorGlyph") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "version": # old COLR v0 API - setattr(self, name, safeEval(attrs["value"])) - elif name == "ColorGlyph": - if not hasattr(self, "ColorLayers"): - self.ColorLayers = {} - glyphName = attrs["name"] - for element in content: - if isinstance(element, str): - continue - layers = [] - for element in content: - if isinstance(element, str): - continue - layer = LayerRecord() - layer.fromXML(element[0], element[1], element[2], ttFont) - layers.append(layer) - self.ColorLayers[glyphName] = layers - else: # new COLR v1 API - from . import otTables - - if not hasattr(self, "table"): - tableClass = getattr(otTables, self.tableTag) - self.table = tableClass() - self.table.fromXML(name, attrs, content, ttFont) - self.table.populateDefaults() - self.version = self.table.Version - - def __getitem__(self, glyphName): - if not isinstance(glyphName, str): - raise TypeError(f"expected str, found {type(glyphName).__name__}") - return self.ColorLayers[glyphName] - - def __setitem__(self, glyphName, value): - if not isinstance(glyphName, str): - raise TypeError(f"expected str, found {type(glyphName).__name__}") - if value is not None: - self.ColorLayers[glyphName] = value - elif glyphName in self.ColorLayers: - del self.ColorLayers[glyphName] - - def __delitem__(self, glyphName): - del self.ColorLayers[glyphName] - - -class LayerRecord(object): - def __init__(self, name=None, colorID=None): - self.name = name - self.colorID = colorID - - def toXML(self, writer, ttFont): - writer.simpletag("layer", name=self.name, colorID=self.colorID) - writer.newline() - - def fromXML(self, eltname, attrs, content, ttFont): - for (name, value) in attrs.items(): - if name == "name": - setattr(self, name, value) - else: - setattr(self, name, safeEval(value)) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/jsonschema/validators.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/jsonschema/validators.py deleted file mode 100644 index 740658bab3aa242b9f0661376d71bc4b95ac0169..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/jsonschema/validators.py +++ /dev/null @@ -1,1381 +0,0 @@ -""" -Creation and extension of validators, with implementations for existing drafts. -""" -from __future__ import annotations - -from collections import deque -from collections.abc import Iterable, Mapping, Sequence -from functools import lru_cache -from operator import methodcaller -from urllib.parse import unquote, urldefrag, urljoin, urlsplit -from urllib.request import urlopen -from warnings import warn -import contextlib -import json -import reprlib -import warnings - -from attrs import define, field, fields -from jsonschema_specifications import REGISTRY as SPECIFICATIONS -from rpds import HashTrieMap -import referencing.exceptions -import referencing.jsonschema - -from jsonschema import ( - _format, - _keywords, - _legacy_keywords, - _types, - _typing, - _utils, - exceptions, -) -from jsonschema.protocols import Validator - -_UNSET = _utils.Unset() - -_VALIDATORS: dict[str, Validator] = {} -_META_SCHEMAS = _utils.URIDict() - - -def __getattr__(name): - if name == "ErrorTree": - warnings.warn( - "Importing ErrorTree from jsonschema.validators is deprecated. " - "Instead import it from jsonschema.exceptions.", - DeprecationWarning, - stacklevel=2, - ) - from jsonschema.exceptions import ErrorTree - return ErrorTree - elif name == "validators": - warnings.warn( - "Accessing jsonschema.validators.validators is deprecated. " - "Use jsonschema.validators.validator_for with a given schema.", - DeprecationWarning, - stacklevel=2, - ) - return _VALIDATORS - elif name == "meta_schemas": - warnings.warn( - "Accessing jsonschema.validators.meta_schemas is deprecated. " - "Use jsonschema.validators.validator_for with a given schema.", - DeprecationWarning, - stacklevel=2, - ) - return _META_SCHEMAS - elif name == "RefResolver": - warnings.warn( - _RefResolver._DEPRECATION_MESSAGE, - DeprecationWarning, - stacklevel=2, - ) - return _RefResolver - raise AttributeError(f"module {__name__} has no attribute {name}") - - -def validates(version): - """ - Register the decorated validator for a ``version`` of the specification. - - Registered validators and their meta schemas will be considered when - parsing :kw:`$schema` keywords' URIs. - - Arguments: - - version (str): - - An identifier to use as the version's name - - Returns: - - collections.abc.Callable: - - a class decorator to decorate the validator with the version - """ - - def _validates(cls): - _VALIDATORS[version] = cls - meta_schema_id = cls.ID_OF(cls.META_SCHEMA) - _META_SCHEMAS[meta_schema_id] = cls - return cls - return _validates - - -def _warn_for_remote_retrieve(uri: str): - from urllib.request import Request, urlopen - headers = {"User-Agent": "python-jsonschema (deprecated $ref resolution)"} - request = Request(uri, headers=headers) - with urlopen(request) as response: - warnings.warn( - "Automatically retrieving remote references can be a security " - "vulnerability and is discouraged by the JSON Schema " - "specifications. Relying on this behavior is deprecated " - "and will shortly become an error. If you are sure you want to " - "remotely retrieve your reference and that it is safe to do so, " - "you can find instructions for doing so via referencing.Registry " - "in the referencing documentation " - "(https://referencing.readthedocs.org).", - DeprecationWarning, - stacklevel=9, # Ha ha ha ha magic numbers :/ - ) - return referencing.Resource.from_contents( - json.load(response), - default_specification=referencing.jsonschema.DRAFT202012, - ) - - -_REMOTE_WARNING_REGISTRY = SPECIFICATIONS.combine( - referencing.Registry(retrieve=_warn_for_remote_retrieve), # type: ignore[call-arg] -) - - -def create( - meta_schema: referencing.jsonschema.ObjectSchema, - validators: ( - Mapping[str, _typing.SchemaKeywordValidator] - | Iterable[tuple[str, _typing.SchemaKeywordValidator]] - ) = (), - version: str | None = None, - type_checker: _types.TypeChecker = _types.draft202012_type_checker, - format_checker: _format.FormatChecker = _format.draft202012_format_checker, - id_of: _typing.id_of = referencing.jsonschema.DRAFT202012.id_of, - applicable_validators: _typing.ApplicableValidators = methodcaller( - "items", - ), -): - """ - Create a new validator class. - - Arguments: - - meta_schema: - - the meta schema for the new validator class - - validators: - - a mapping from names to callables, where each callable will - validate the schema property with the given name. - - Each callable should take 4 arguments: - - 1. a validator instance, - 2. the value of the property being validated within the - instance - 3. the instance - 4. the schema - - version: - - an identifier for the version that this validator class will - validate. If provided, the returned validator class will - have its ``__name__`` set to include the version, and also - will have `jsonschema.validators.validates` automatically - called for the given version. - - type_checker: - - a type checker, used when applying the :kw:`type` keyword. - - If unprovided, a `jsonschema.TypeChecker` will be created - with a set of default types typical of JSON Schema drafts. - - format_checker: - - a format checker, used when applying the :kw:`format` keyword. - - If unprovided, a `jsonschema.FormatChecker` will be created - with a set of default formats typical of JSON Schema drafts. - - id_of: - - A function that given a schema, returns its ID. - - applicable_validators: - - A function that, given a schema, returns the list of - applicable schema keywords and associated values - which will be used to validate the instance. - This is mostly used to support pre-draft 7 versions of JSON Schema - which specified behavior around ignoring keywords if they were - siblings of a ``$ref`` keyword. If you're not attempting to - implement similar behavior, you can typically ignore this argument - and leave it at its default. - - Returns: - - a new `jsonschema.protocols.Validator` class - """ - # preemptively don't shadow the `Validator.format_checker` local - format_checker_arg = format_checker - - specification = referencing.jsonschema.specification_with( - dialect_id=id_of(meta_schema) or "urn:unknown-dialect", - default=referencing.Specification.OPAQUE, - ) - - @define - class Validator: - - VALIDATORS = dict(validators) # noqa: RUF012 - META_SCHEMA = dict(meta_schema) # noqa: RUF012 - TYPE_CHECKER = type_checker - FORMAT_CHECKER = format_checker_arg - ID_OF = staticmethod(id_of) - - _APPLICABLE_VALIDATORS = applicable_validators - - schema: referencing.jsonschema.Schema = field(repr=reprlib.repr) - _ref_resolver = field(default=None, repr=False, alias="resolver") - format_checker: _format.FormatChecker | None = field(default=None) - # TODO: include new meta-schemas added at runtime - _registry: referencing.jsonschema.SchemaRegistry = field( - default=_REMOTE_WARNING_REGISTRY, - kw_only=True, - repr=False, - ) - _resolver = field( - alias="_resolver", - default=None, - kw_only=True, - repr=False, - ) - - def __init_subclass__(cls): - warnings.warn( - ( - "Subclassing validator classes is not intended to " - "be part of their public API. A future version " - "will make doing so an error, as the behavior of " - "subclasses isn't guaranteed to stay the same " - "between releases of jsonschema. Instead, prefer " - "composition of validators, wrapping them in an object " - "owned entirely by the downstream library." - ), - DeprecationWarning, - stacklevel=2, - ) - - def evolve(self, **changes): - cls = self.__class__ - schema = changes.setdefault("schema", self.schema) - NewValidator = validator_for(schema, default=cls) - - for field in fields(cls): # noqa: F402 - if not field.init: - continue - attr_name = field.name - init_name = field.alias - if init_name not in changes: - changes[init_name] = getattr(self, attr_name) - - return NewValidator(**changes) - - cls.evolve = evolve - - def __attrs_post_init__(self): - if self._resolver is None: - registry = self._registry - if registry is not _REMOTE_WARNING_REGISTRY: - registry = SPECIFICATIONS.combine(registry) - resource = specification.create_resource(self.schema) - self._resolver = registry.resolver_with_root(resource) - - # REMOVEME: Legacy ref resolution state management. - push_scope = getattr(self._ref_resolver, "push_scope", None) - if push_scope is not None: - id = id_of(self.schema) - if id is not None: - push_scope(id) - - @classmethod - def check_schema(cls, schema, format_checker=_UNSET): - Validator = validator_for(cls.META_SCHEMA, default=cls) - if format_checker is _UNSET: - format_checker = Validator.FORMAT_CHECKER - validator = Validator( - schema=cls.META_SCHEMA, - format_checker=format_checker, - ) - for error in validator.iter_errors(schema): - raise exceptions.SchemaError.create_from(error) - - @property - def resolver(self): - warnings.warn( - ( - f"Accessing {self.__class__.__name__}.resolver is " - "deprecated as of v4.18.0, in favor of the " - "https://github.com/python-jsonschema/referencing " - "library, which provides more compliant referencing " - "behavior as well as more flexible APIs for " - "customization." - ), - DeprecationWarning, - stacklevel=2, - ) - if self._ref_resolver is None: - self._ref_resolver = _RefResolver.from_schema( - self.schema, - id_of=id_of, - ) - return self._ref_resolver - - def evolve(self, **changes): - schema = changes.setdefault("schema", self.schema) - NewValidator = validator_for(schema, default=self.__class__) - - for (attr_name, init_name) in evolve_fields: - if init_name not in changes: - changes[init_name] = getattr(self, attr_name) - - return NewValidator(**changes) - - def iter_errors(self, instance, _schema=None): - if _schema is not None: - warnings.warn( - ( - "Passing a schema to Validator.iter_errors " - "is deprecated and will be removed in a future " - "release. Call validator.evolve(schema=new_schema)." - "iter_errors(...) instead." - ), - DeprecationWarning, - stacklevel=2, - ) - else: - _schema = self.schema - - if _schema is True: - return - elif _schema is False: - yield exceptions.ValidationError( - f"False schema does not allow {instance!r}", - validator=None, - validator_value=None, - instance=instance, - schema=_schema, - ) - return - - for k, v in applicable_validators(_schema): - validator = self.VALIDATORS.get(k) - if validator is None: - continue - - errors = validator(self, v, instance, _schema) or () - for error in errors: - # set details if not already set by the called fn - error._set( - validator=k, - validator_value=v, - instance=instance, - schema=_schema, - type_checker=self.TYPE_CHECKER, - ) - if k not in {"if", "$ref"}: - error.schema_path.appendleft(k) - yield error - - def descend( - self, - instance, - schema, - path=None, - schema_path=None, - resolver=None, - ): - if schema is True: - return - elif schema is False: - yield exceptions.ValidationError( - f"False schema does not allow {instance!r}", - validator=None, - validator_value=None, - instance=instance, - schema=schema, - ) - return - - if self._ref_resolver is not None: - evolved = self.evolve(schema=schema) - else: - if resolver is None: - resolver = self._resolver.in_subresource( - specification.create_resource(schema), - ) - evolved = self.evolve(schema=schema, _resolver=resolver) - - for k, v in applicable_validators(schema): - validator = evolved.VALIDATORS.get(k) - if validator is None: - continue - - errors = validator(evolved, v, instance, schema) or () - for error in errors: - # set details if not already set by the called fn - error._set( - validator=k, - validator_value=v, - instance=instance, - schema=schema, - type_checker=evolved.TYPE_CHECKER, - ) - if k not in {"if", "$ref"}: - error.schema_path.appendleft(k) - if path is not None: - error.path.appendleft(path) - if schema_path is not None: - error.schema_path.appendleft(schema_path) - yield error - - def validate(self, *args, **kwargs): - for error in self.iter_errors(*args, **kwargs): - raise error - - def is_type(self, instance, type): - try: - return self.TYPE_CHECKER.is_type(instance, type) - except exceptions.UndefinedTypeCheck: - raise exceptions.UnknownType(type, instance, self.schema) - - def _validate_reference(self, ref, instance): - if self._ref_resolver is None: - try: - resolved = self._resolver.lookup(ref) - except referencing.exceptions.Unresolvable as err: - raise exceptions._WrappedReferencingError(err) - - return self.descend( - instance, - resolved.contents, - resolver=resolved.resolver, - ) - else: - resolve = getattr(self._ref_resolver, "resolve", None) - if resolve is None: - with self._ref_resolver.resolving(ref) as resolved: - return self.descend(instance, resolved) - else: - scope, resolved = resolve(ref) - self._ref_resolver.push_scope(scope) - - try: - return list(self.descend(instance, resolved)) - finally: - self._ref_resolver.pop_scope() - - def is_valid(self, instance, _schema=None): - if _schema is not None: - warnings.warn( - ( - "Passing a schema to Validator.is_valid is deprecated " - "and will be removed in a future release. Call " - "validator.evolve(schema=new_schema).is_valid(...) " - "instead." - ), - DeprecationWarning, - stacklevel=2, - ) - self = self.evolve(schema=_schema) - - error = next(self.iter_errors(instance), None) - return error is None - - evolve_fields = [ - (field.name, field.alias) - for field in fields(Validator) - if field.init - ] - - if version is not None: - safe = version.title().replace(" ", "").replace("-", "") - Validator.__name__ = Validator.__qualname__ = f"{safe}Validator" - Validator = validates(version)(Validator) # type: ignore[misc] - - return Validator - - -def extend( - validator, - validators=(), - version=None, - type_checker=None, - format_checker=None, -): - """ - Create a new validator class by extending an existing one. - - Arguments: - - validator (jsonschema.protocols.Validator): - - an existing validator class - - validators (collections.abc.Mapping): - - a mapping of new validator callables to extend with, whose - structure is as in `create`. - - .. note:: - - Any validator callables with the same name as an - existing one will (silently) replace the old validator - callable entirely, effectively overriding any validation - done in the "parent" validator class. - - If you wish to instead extend the behavior of a parent's - validator callable, delegate and call it directly in - the new validator function by retrieving it using - ``OldValidator.VALIDATORS["validation_keyword_name"]``. - - version (str): - - a version for the new validator class - - type_checker (jsonschema.TypeChecker): - - a type checker, used when applying the :kw:`type` keyword. - - If unprovided, the type checker of the extended - `jsonschema.protocols.Validator` will be carried along. - - format_checker (jsonschema.FormatChecker): - - a format checker, used when applying the :kw:`format` keyword. - - If unprovided, the format checker of the extended - `jsonschema.protocols.Validator` will be carried along. - - Returns: - - a new `jsonschema.protocols.Validator` class extending the one - provided - - .. note:: Meta Schemas - - The new validator class will have its parent's meta schema. - - If you wish to change or extend the meta schema in the new - validator class, modify ``META_SCHEMA`` directly on the returned - class. Note that no implicit copying is done, so a copy should - likely be made before modifying it, in order to not affect the - old validator. - """ - all_validators = dict(validator.VALIDATORS) - all_validators.update(validators) - - if type_checker is None: - type_checker = validator.TYPE_CHECKER - if format_checker is None: - format_checker = validator.FORMAT_CHECKER - return create( - meta_schema=validator.META_SCHEMA, - validators=all_validators, - version=version, - type_checker=type_checker, - format_checker=format_checker, - id_of=validator.ID_OF, - applicable_validators=validator._APPLICABLE_VALIDATORS, - ) - - -Draft3Validator = create( - meta_schema=SPECIFICATIONS.contents( - "http://json-schema.org/draft-03/schema#", - ), - validators={ - "$ref": _keywords.ref, - "additionalItems": _legacy_keywords.additionalItems, - "additionalProperties": _keywords.additionalProperties, - "dependencies": _legacy_keywords.dependencies_draft3, - "disallow": _legacy_keywords.disallow_draft3, - "divisibleBy": _keywords.multipleOf, - "enum": _keywords.enum, - "extends": _legacy_keywords.extends_draft3, - "format": _keywords.format, - "items": _legacy_keywords.items_draft3_draft4, - "maxItems": _keywords.maxItems, - "maxLength": _keywords.maxLength, - "maximum": _legacy_keywords.maximum_draft3_draft4, - "minItems": _keywords.minItems, - "minLength": _keywords.minLength, - "minimum": _legacy_keywords.minimum_draft3_draft4, - "pattern": _keywords.pattern, - "patternProperties": _keywords.patternProperties, - "properties": _legacy_keywords.properties_draft3, - "type": _legacy_keywords.type_draft3, - "uniqueItems": _keywords.uniqueItems, - }, - type_checker=_types.draft3_type_checker, - format_checker=_format.draft3_format_checker, - version="draft3", - id_of=referencing.jsonschema.DRAFT3.id_of, - applicable_validators=_legacy_keywords.ignore_ref_siblings, -) - -Draft4Validator = create( - meta_schema=SPECIFICATIONS.contents( - "http://json-schema.org/draft-04/schema#", - ), - validators={ - "$ref": _keywords.ref, - "additionalItems": _legacy_keywords.additionalItems, - "additionalProperties": _keywords.additionalProperties, - "allOf": _keywords.allOf, - "anyOf": _keywords.anyOf, - "dependencies": _legacy_keywords.dependencies_draft4_draft6_draft7, - "enum": _keywords.enum, - "format": _keywords.format, - "items": _legacy_keywords.items_draft3_draft4, - "maxItems": _keywords.maxItems, - "maxLength": _keywords.maxLength, - "maxProperties": _keywords.maxProperties, - "maximum": _legacy_keywords.maximum_draft3_draft4, - "minItems": _keywords.minItems, - "minLength": _keywords.minLength, - "minProperties": _keywords.minProperties, - "minimum": _legacy_keywords.minimum_draft3_draft4, - "multipleOf": _keywords.multipleOf, - "not": _keywords.not_, - "oneOf": _keywords.oneOf, - "pattern": _keywords.pattern, - "patternProperties": _keywords.patternProperties, - "properties": _keywords.properties, - "required": _keywords.required, - "type": _keywords.type, - "uniqueItems": _keywords.uniqueItems, - }, - type_checker=_types.draft4_type_checker, - format_checker=_format.draft4_format_checker, - version="draft4", - id_of=referencing.jsonschema.DRAFT4.id_of, - applicable_validators=_legacy_keywords.ignore_ref_siblings, -) - -Draft6Validator = create( - meta_schema=SPECIFICATIONS.contents( - "http://json-schema.org/draft-06/schema#", - ), - validators={ - "$ref": _keywords.ref, - "additionalItems": _legacy_keywords.additionalItems, - "additionalProperties": _keywords.additionalProperties, - "allOf": _keywords.allOf, - "anyOf": _keywords.anyOf, - "const": _keywords.const, - "contains": _legacy_keywords.contains_draft6_draft7, - "dependencies": _legacy_keywords.dependencies_draft4_draft6_draft7, - "enum": _keywords.enum, - "exclusiveMaximum": _keywords.exclusiveMaximum, - "exclusiveMinimum": _keywords.exclusiveMinimum, - "format": _keywords.format, - "items": _legacy_keywords.items_draft6_draft7_draft201909, - "maxItems": _keywords.maxItems, - "maxLength": _keywords.maxLength, - "maxProperties": _keywords.maxProperties, - "maximum": _keywords.maximum, - "minItems": _keywords.minItems, - "minLength": _keywords.minLength, - "minProperties": _keywords.minProperties, - "minimum": _keywords.minimum, - "multipleOf": _keywords.multipleOf, - "not": _keywords.not_, - "oneOf": _keywords.oneOf, - "pattern": _keywords.pattern, - "patternProperties": _keywords.patternProperties, - "properties": _keywords.properties, - "propertyNames": _keywords.propertyNames, - "required": _keywords.required, - "type": _keywords.type, - "uniqueItems": _keywords.uniqueItems, - }, - type_checker=_types.draft6_type_checker, - format_checker=_format.draft6_format_checker, - version="draft6", - id_of=referencing.jsonschema.DRAFT6.id_of, - applicable_validators=_legacy_keywords.ignore_ref_siblings, -) - -Draft7Validator = create( - meta_schema=SPECIFICATIONS.contents( - "http://json-schema.org/draft-07/schema#", - ), - validators={ - "$ref": _keywords.ref, - "additionalItems": _legacy_keywords.additionalItems, - "additionalProperties": _keywords.additionalProperties, - "allOf": _keywords.allOf, - "anyOf": _keywords.anyOf, - "const": _keywords.const, - "contains": _legacy_keywords.contains_draft6_draft7, - "dependencies": _legacy_keywords.dependencies_draft4_draft6_draft7, - "enum": _keywords.enum, - "exclusiveMaximum": _keywords.exclusiveMaximum, - "exclusiveMinimum": _keywords.exclusiveMinimum, - "format": _keywords.format, - "if": _keywords.if_, - "items": _legacy_keywords.items_draft6_draft7_draft201909, - "maxItems": _keywords.maxItems, - "maxLength": _keywords.maxLength, - "maxProperties": _keywords.maxProperties, - "maximum": _keywords.maximum, - "minItems": _keywords.minItems, - "minLength": _keywords.minLength, - "minProperties": _keywords.minProperties, - "minimum": _keywords.minimum, - "multipleOf": _keywords.multipleOf, - "not": _keywords.not_, - "oneOf": _keywords.oneOf, - "pattern": _keywords.pattern, - "patternProperties": _keywords.patternProperties, - "properties": _keywords.properties, - "propertyNames": _keywords.propertyNames, - "required": _keywords.required, - "type": _keywords.type, - "uniqueItems": _keywords.uniqueItems, - }, - type_checker=_types.draft7_type_checker, - format_checker=_format.draft7_format_checker, - version="draft7", - id_of=referencing.jsonschema.DRAFT7.id_of, - applicable_validators=_legacy_keywords.ignore_ref_siblings, -) - -Draft201909Validator = create( - meta_schema=SPECIFICATIONS.contents( - "https://json-schema.org/draft/2019-09/schema", - ), - validators={ - "$recursiveRef": _legacy_keywords.recursiveRef, - "$ref": _keywords.ref, - "additionalItems": _legacy_keywords.additionalItems, - "additionalProperties": _keywords.additionalProperties, - "allOf": _keywords.allOf, - "anyOf": _keywords.anyOf, - "const": _keywords.const, - "contains": _keywords.contains, - "dependentRequired": _keywords.dependentRequired, - "dependentSchemas": _keywords.dependentSchemas, - "enum": _keywords.enum, - "exclusiveMaximum": _keywords.exclusiveMaximum, - "exclusiveMinimum": _keywords.exclusiveMinimum, - "format": _keywords.format, - "if": _keywords.if_, - "items": _legacy_keywords.items_draft6_draft7_draft201909, - "maxItems": _keywords.maxItems, - "maxLength": _keywords.maxLength, - "maxProperties": _keywords.maxProperties, - "maximum": _keywords.maximum, - "minItems": _keywords.minItems, - "minLength": _keywords.minLength, - "minProperties": _keywords.minProperties, - "minimum": _keywords.minimum, - "multipleOf": _keywords.multipleOf, - "not": _keywords.not_, - "oneOf": _keywords.oneOf, - "pattern": _keywords.pattern, - "patternProperties": _keywords.patternProperties, - "properties": _keywords.properties, - "propertyNames": _keywords.propertyNames, - "required": _keywords.required, - "type": _keywords.type, - "unevaluatedItems": _legacy_keywords.unevaluatedItems_draft2019, - "unevaluatedProperties": _keywords.unevaluatedProperties, - "uniqueItems": _keywords.uniqueItems, - }, - type_checker=_types.draft201909_type_checker, - format_checker=_format.draft201909_format_checker, - version="draft2019-09", -) - -Draft202012Validator = create( - meta_schema=SPECIFICATIONS.contents( - "https://json-schema.org/draft/2020-12/schema", - ), - validators={ - "$dynamicRef": _keywords.dynamicRef, - "$ref": _keywords.ref, - "additionalProperties": _keywords.additionalProperties, - "allOf": _keywords.allOf, - "anyOf": _keywords.anyOf, - "const": _keywords.const, - "contains": _keywords.contains, - "dependentRequired": _keywords.dependentRequired, - "dependentSchemas": _keywords.dependentSchemas, - "enum": _keywords.enum, - "exclusiveMaximum": _keywords.exclusiveMaximum, - "exclusiveMinimum": _keywords.exclusiveMinimum, - "format": _keywords.format, - "if": _keywords.if_, - "items": _keywords.items, - "maxItems": _keywords.maxItems, - "maxLength": _keywords.maxLength, - "maxProperties": _keywords.maxProperties, - "maximum": _keywords.maximum, - "minItems": _keywords.minItems, - "minLength": _keywords.minLength, - "minProperties": _keywords.minProperties, - "minimum": _keywords.minimum, - "multipleOf": _keywords.multipleOf, - "not": _keywords.not_, - "oneOf": _keywords.oneOf, - "pattern": _keywords.pattern, - "patternProperties": _keywords.patternProperties, - "prefixItems": _keywords.prefixItems, - "properties": _keywords.properties, - "propertyNames": _keywords.propertyNames, - "required": _keywords.required, - "type": _keywords.type, - "unevaluatedItems": _keywords.unevaluatedItems, - "unevaluatedProperties": _keywords.unevaluatedProperties, - "uniqueItems": _keywords.uniqueItems, - }, - type_checker=_types.draft202012_type_checker, - format_checker=_format.draft202012_format_checker, - version="draft2020-12", -) - -_LATEST_VERSION = Draft202012Validator - - -class _RefResolver: - """ - Resolve JSON References. - - Arguments: - - base_uri (str): - - The URI of the referring document - - referrer: - - The actual referring document - - store (dict): - - A mapping from URIs to documents to cache - - cache_remote (bool): - - Whether remote refs should be cached after first resolution - - handlers (dict): - - A mapping from URI schemes to functions that should be used - to retrieve them - - urljoin_cache (:func:`functools.lru_cache`): - - A cache that will be used for caching the results of joining - the resolution scope to subscopes. - - remote_cache (:func:`functools.lru_cache`): - - A cache that will be used for caching the results of - resolved remote URLs. - - Attributes: - - cache_remote (bool): - - Whether remote refs should be cached after first resolution - - .. deprecated:: v4.18.0 - - ``RefResolver`` has been deprecated in favor of `referencing`. - """ - - _DEPRECATION_MESSAGE = ( - "jsonschema.RefResolver is deprecated as of v4.18.0, in favor of the " - "https://github.com/python-jsonschema/referencing library, which " - "provides more compliant referencing behavior as well as more " - "flexible APIs for customization. A future release will remove " - "RefResolver. Please file a feature request (on referencing) if you " - "are missing an API for the kind of customization you need." - ) - - def __init__( - self, - base_uri, - referrer, - store=HashTrieMap(), - cache_remote=True, - handlers=(), - urljoin_cache=None, - remote_cache=None, - ): - if urljoin_cache is None: - urljoin_cache = lru_cache(1024)(urljoin) - if remote_cache is None: - remote_cache = lru_cache(1024)(self.resolve_from_url) - - self.referrer = referrer - self.cache_remote = cache_remote - self.handlers = dict(handlers) - - self._scopes_stack = [base_uri] - - self.store = _utils.URIDict( - (uri, each.contents) for uri, each in SPECIFICATIONS.items() - ) - self.store.update( - (id, each.META_SCHEMA) for id, each in _META_SCHEMAS.items() - ) - self.store.update(store) - self.store.update( - (schema["$id"], schema) - for schema in store.values() - if isinstance(schema, Mapping) and "$id" in schema - ) - self.store[base_uri] = referrer - - self._urljoin_cache = urljoin_cache - self._remote_cache = remote_cache - - @classmethod - def from_schema( # noqa: D417 - cls, - schema, - id_of=referencing.jsonschema.DRAFT202012.id_of, - *args, - **kwargs, - ): - """ - Construct a resolver from a JSON schema object. - - Arguments: - - schema: - - the referring schema - - Returns: - - `_RefResolver` - """ - return cls(base_uri=id_of(schema) or "", referrer=schema, *args, **kwargs) # noqa: B026, E501 - - def push_scope(self, scope): - """ - Enter a given sub-scope. - - Treats further dereferences as being performed underneath the - given scope. - """ - self._scopes_stack.append( - self._urljoin_cache(self.resolution_scope, scope), - ) - - def pop_scope(self): - """ - Exit the most recent entered scope. - - Treats further dereferences as being performed underneath the - original scope. - - Don't call this method more times than `push_scope` has been - called. - """ - try: - self._scopes_stack.pop() - except IndexError: - raise exceptions._RefResolutionError( - "Failed to pop the scope from an empty stack. " - "`pop_scope()` should only be called once for every " - "`push_scope()`", - ) - - @property - def resolution_scope(self): - """ - Retrieve the current resolution scope. - """ - return self._scopes_stack[-1] - - @property - def base_uri(self): - """ - Retrieve the current base URI, not including any fragment. - """ - uri, _ = urldefrag(self.resolution_scope) - return uri - - @contextlib.contextmanager - def in_scope(self, scope): - """ - Temporarily enter the given scope for the duration of the context. - - .. deprecated:: v4.0.0 - """ - warnings.warn( - "jsonschema.RefResolver.in_scope is deprecated and will be " - "removed in a future release.", - DeprecationWarning, - stacklevel=3, - ) - self.push_scope(scope) - try: - yield - finally: - self.pop_scope() - - @contextlib.contextmanager - def resolving(self, ref): - """ - Resolve the given ``ref`` and enter its resolution scope. - - Exits the scope on exit of this context manager. - - Arguments: - - ref (str): - - The reference to resolve - """ - url, resolved = self.resolve(ref) - self.push_scope(url) - try: - yield resolved - finally: - self.pop_scope() - - def _find_in_referrer(self, key): - return self._get_subschemas_cache()[key] - - @lru_cache # noqa: B019 - def _get_subschemas_cache(self): - cache = {key: [] for key in _SUBSCHEMAS_KEYWORDS} - for keyword, subschema in _search_schema( - self.referrer, _match_subschema_keywords, - ): - cache[keyword].append(subschema) - return cache - - @lru_cache # noqa: B019 - def _find_in_subschemas(self, url): - subschemas = self._get_subschemas_cache()["$id"] - if not subschemas: - return None - uri, fragment = urldefrag(url) - for subschema in subschemas: - id = subschema["$id"] - if not isinstance(id, str): - continue - target_uri = self._urljoin_cache(self.resolution_scope, id) - if target_uri.rstrip("/") == uri.rstrip("/"): - if fragment: - subschema = self.resolve_fragment(subschema, fragment) - self.store[url] = subschema - return url, subschema - return None - - def resolve(self, ref): - """ - Resolve the given reference. - """ - url = self._urljoin_cache(self.resolution_scope, ref).rstrip("/") - - match = self._find_in_subschemas(url) - if match is not None: - return match - - return url, self._remote_cache(url) - - def resolve_from_url(self, url): - """ - Resolve the given URL. - """ - url, fragment = urldefrag(url) - if not url: - url = self.base_uri - - try: - document = self.store[url] - except KeyError: - try: - document = self.resolve_remote(url) - except Exception as exc: - raise exceptions._RefResolutionError(exc) - - return self.resolve_fragment(document, fragment) - - def resolve_fragment(self, document, fragment): - """ - Resolve a ``fragment`` within the referenced ``document``. - - Arguments: - - document: - - The referent document - - fragment (str): - - a URI fragment to resolve within it - """ - fragment = fragment.lstrip("/") - - if not fragment: - return document - - if document is self.referrer: - find = self._find_in_referrer - else: - - def find(key): - yield from _search_schema(document, _match_keyword(key)) - - for keyword in ["$anchor", "$dynamicAnchor"]: - for subschema in find(keyword): - if fragment == subschema[keyword]: - return subschema - for keyword in ["id", "$id"]: - for subschema in find(keyword): - if "#" + fragment == subschema[keyword]: - return subschema - - # Resolve via path - parts = unquote(fragment).split("/") if fragment else [] - for part in parts: - part = part.replace("~1", "/").replace("~0", "~") - - if isinstance(document, Sequence): - try: # noqa: SIM105 - part = int(part) - except ValueError: - pass - try: - document = document[part] - except (TypeError, LookupError): - raise exceptions._RefResolutionError( - f"Unresolvable JSON pointer: {fragment!r}", - ) - - return document - - def resolve_remote(self, uri): - """ - Resolve a remote ``uri``. - - If called directly, does not check the store first, but after - retrieving the document at the specified URI it will be saved in - the store if :attr:`cache_remote` is True. - - .. note:: - - If the requests_ library is present, ``jsonschema`` will use it to - request the remote ``uri``, so that the correct encoding is - detected and used. - - If it isn't, or if the scheme of the ``uri`` is not ``http`` or - ``https``, UTF-8 is assumed. - - Arguments: - - uri (str): - - The URI to resolve - - Returns: - - The retrieved document - - .. _requests: https://pypi.org/project/requests/ - """ - try: - import requests - except ImportError: - requests = None - - scheme = urlsplit(uri).scheme - - if scheme in self.handlers: - result = self.handlers[scheme](uri) - elif scheme in ["http", "https"] and requests: - # Requests has support for detecting the correct encoding of - # json over http - result = requests.get(uri).json() - else: - # Otherwise, pass off to urllib and assume utf-8 - with urlopen(uri) as url: - result = json.loads(url.read().decode("utf-8")) - - if self.cache_remote: - self.store[uri] = result - return result - - -_SUBSCHEMAS_KEYWORDS = ("$id", "id", "$anchor", "$dynamicAnchor") - - -def _match_keyword(keyword): - - def matcher(value): - if keyword in value: - yield value - - return matcher - - -def _match_subschema_keywords(value): - for keyword in _SUBSCHEMAS_KEYWORDS: - if keyword in value: - yield keyword, value - - -def _search_schema(schema, matcher): - """Breadth-first search routine.""" - values = deque([schema]) - while values: - value = values.pop() - if not isinstance(value, dict): - continue - yield from matcher(value) - values.extendleft(value.values()) - - -def validate(instance, schema, cls=None, *args, **kwargs): # noqa: D417 - """ - Validate an instance under the given schema. - - >>> validate([2, 3, 4], {"maxItems": 2}) - Traceback (most recent call last): - ... - ValidationError: [2, 3, 4] is too long - - :func:`~jsonschema.validators.validate` will first verify that the - provided schema is itself valid, since not doing so can lead to less - obvious error messages and fail in less obvious or consistent ways. - - If you know you have a valid schema already, especially - if you intend to validate multiple instances with - the same schema, you likely would prefer using the - `jsonschema.protocols.Validator.validate` method directly on a - specific validator (e.g. ``Draft202012Validator.validate``). - - - Arguments: - - instance: - - The instance to validate - - schema: - - The schema to validate with - - cls (jsonschema.protocols.Validator): - - The class that will be used to validate the instance. - - If the ``cls`` argument is not provided, two things will happen - in accordance with the specification. First, if the schema has a - :kw:`$schema` keyword containing a known meta-schema [#]_ then the - proper validator will be used. The specification recommends that - all schemas contain :kw:`$schema` properties for this reason. If no - :kw:`$schema` property is found, the default validator class is the - latest released draft. - - Any other provided positional and keyword arguments will be passed - on when instantiating the ``cls``. - - Raises: - - `jsonschema.exceptions.ValidationError`: - - if the instance is invalid - - `jsonschema.exceptions.SchemaError`: - - if the schema itself is invalid - - .. rubric:: Footnotes - .. [#] known by a validator registered with - `jsonschema.validators.validates` - """ - if cls is None: - cls = validator_for(schema) - - cls.check_schema(schema) - validator = cls(schema, *args, **kwargs) - error = exceptions.best_match(validator.iter_errors(instance)) - if error is not None: - raise error - - -def validator_for(schema, default=_UNSET): - """ - Retrieve the validator class appropriate for validating the given schema. - - Uses the :kw:`$schema` keyword that should be present in the given - schema to look up the appropriate validator class. - - Arguments: - - schema (collections.abc.Mapping or bool): - - the schema to look at - - default: - - the default to return if the appropriate validator class - cannot be determined. - - If unprovided, the default is to return the latest supported - draft. - - Examples: - - The :kw:`$schema` JSON Schema keyword will control which validator - class is returned: - - >>> schema = { - ... "$schema": "https://json-schema.org/draft/2020-12/schema", - ... "type": "integer", - ... } - >>> jsonschema.validators.validator_for(schema) - - - - Here, a draft 7 schema instead will return the draft 7 validator: - - >>> schema = { - ... "$schema": "http://json-schema.org/draft-07/schema#", - ... "type": "integer", - ... } - >>> jsonschema.validators.validator_for(schema) - - - - Schemas with no ``$schema`` keyword will fallback to the default - argument: - - >>> schema = {"type": "integer"} - >>> jsonschema.validators.validator_for( - ... schema, default=Draft7Validator, - ... ) - - - or if none is provided, to the latest version supported. - Always including the keyword when authoring schemas is highly - recommended. - - """ - DefaultValidator = _LATEST_VERSION if default is _UNSET else default - - if schema is True or schema is False or "$schema" not in schema: - return DefaultValidator - if schema["$schema"] not in _META_SCHEMAS and default is _UNSET: - warn( - ( - "The metaschema specified by $schema was not found. " - "Using the latest draft to validate, but this will raise " - "an error in the future." - ), - DeprecationWarning, - stacklevel=2, - ) - return _META_SCHEMAS.get(schema["$schema"], DefaultValidator) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/tests/examples/limited_api/setup.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/tests/examples/limited_api/setup.py deleted file mode 100644 index 18747dc80896c087f37a878674e7c3c34bbd1e3f..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/tests/examples/limited_api/setup.py +++ /dev/null @@ -1,22 +0,0 @@ -""" -Build an example package using the limited Python C API. -""" - -import numpy as np -from setuptools import setup, Extension -import os - -macros = [("NPY_NO_DEPRECATED_API", 0), ("Py_LIMITED_API", "0x03060000")] - -limited_api = Extension( - "limited_api", - sources=[os.path.join('.', "limited_api.c")], - include_dirs=[np.get_include()], - define_macros=macros, -) - -extensions = [limited_api] - -setup( - ext_modules=extensions -) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/tests/src/quoted_character/foo.f b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/tests/src/quoted_character/foo.f deleted file mode 100644 index 9dc1cfa4446d8c05c0fc0bb1c69360a687d003c3..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/tests/src/quoted_character/foo.f +++ /dev/null @@ -1,14 +0,0 @@ - SUBROUTINE FOO(OUT1, OUT2, OUT3, OUT4, OUT5, OUT6) - CHARACTER SINGLE, DOUBLE, SEMICOL, EXCLA, OPENPAR, CLOSEPAR - PARAMETER (SINGLE="'", DOUBLE='"', SEMICOL=';', EXCLA="!", - 1 OPENPAR="(", CLOSEPAR=")") - CHARACTER OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 -Cf2py intent(out) OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 - OUT1 = SINGLE - OUT2 = DOUBLE - OUT3 = SEMICOL - OUT4 = EXCLA - OUT5 = OPENPAR - OUT6 = CLOSEPAR - RETURN - END diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/plotting/_misc.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/plotting/_misc.py deleted file mode 100644 index 625780ac9fc670b9cbc215ec5ffda61a05196eba..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/plotting/_misc.py +++ /dev/null @@ -1,688 +0,0 @@ -from __future__ import annotations - -from contextlib import contextmanager -from typing import ( - TYPE_CHECKING, - Any, -) - -from pandas.plotting._core import _get_plot_backend - -if TYPE_CHECKING: - from collections.abc import ( - Generator, - Mapping, - ) - - from matplotlib.axes import Axes - from matplotlib.colors import Colormap - from matplotlib.figure import Figure - from matplotlib.table import Table - import numpy as np - - from pandas import ( - DataFrame, - Series, - ) - - -def table(ax: Axes, data: DataFrame | Series, **kwargs) -> Table: - """ - Helper function to convert DataFrame and Series to matplotlib.table. - - Parameters - ---------- - ax : Matplotlib axes object - data : DataFrame or Series - Data for table contents. - **kwargs - Keyword arguments to be passed to matplotlib.table.table. - If `rowLabels` or `colLabels` is not specified, data index or column - name will be used. - - Returns - ------- - matplotlib table object - - Examples - -------- - - .. plot:: - :context: close-figs - - >>> import matplotlib.pyplot as plt - >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}) - >>> fix, ax = plt.subplots() - >>> ax.axis('off') - (0.0, 1.0, 0.0, 1.0) - >>> table = pd.plotting.table(ax, df, loc='center', - ... cellLoc='center', colWidths=list([.2, .2])) - """ - plot_backend = _get_plot_backend("matplotlib") - return plot_backend.table( - ax=ax, data=data, rowLabels=None, colLabels=None, **kwargs - ) - - -def register() -> None: - """ - Register pandas formatters and converters with matplotlib. - - This function modifies the global ``matplotlib.units.registry`` - dictionary. pandas adds custom converters for - - * pd.Timestamp - * pd.Period - * np.datetime64 - * datetime.datetime - * datetime.date - * datetime.time - - See Also - -------- - deregister_matplotlib_converters : Remove pandas formatters and converters. - - Examples - -------- - .. plot:: - :context: close-figs - - The following line is done automatically by pandas so - the plot can be rendered: - - >>> pd.plotting.register_matplotlib_converters() - - >>> df = pd.DataFrame({'ts': pd.period_range('2020', periods=2, freq='M'), - ... 'y': [1, 2] - ... }) - >>> plot = df.plot.line(x='ts', y='y') - - Unsetting the register manually an error will be raised: - - >>> pd.set_option("plotting.matplotlib.register_converters", - ... False) # doctest: +SKIP - >>> df.plot.line(x='ts', y='y') # doctest: +SKIP - Traceback (most recent call last): - TypeError: float() argument must be a string or a real number, not 'Period' - """ - plot_backend = _get_plot_backend("matplotlib") - plot_backend.register() - - -def deregister() -> None: - """ - Remove pandas formatters and converters. - - Removes the custom converters added by :func:`register`. This - attempts to set the state of the registry back to the state before - pandas registered its own units. Converters for pandas' own types like - Timestamp and Period are removed completely. Converters for types - pandas overwrites, like ``datetime.datetime``, are restored to their - original value. - - See Also - -------- - register_matplotlib_converters : Register pandas formatters and converters - with matplotlib. - - Examples - -------- - .. plot:: - :context: close-figs - - The following line is done automatically by pandas so - the plot can be rendered: - - >>> pd.plotting.register_matplotlib_converters() - - >>> df = pd.DataFrame({'ts': pd.period_range('2020', periods=2, freq='M'), - ... 'y': [1, 2] - ... }) - >>> plot = df.plot.line(x='ts', y='y') - - Unsetting the register manually an error will be raised: - - >>> pd.set_option("plotting.matplotlib.register_converters", - ... False) # doctest: +SKIP - >>> df.plot.line(x='ts', y='y') # doctest: +SKIP - Traceback (most recent call last): - TypeError: float() argument must be a string or a real number, not 'Period' - """ - plot_backend = _get_plot_backend("matplotlib") - plot_backend.deregister() - - -def scatter_matrix( - frame: DataFrame, - alpha: float = 0.5, - figsize: tuple[float, float] | None = None, - ax: Axes | None = None, - grid: bool = False, - diagonal: str = "hist", - marker: str = ".", - density_kwds: Mapping[str, Any] | None = None, - hist_kwds: Mapping[str, Any] | None = None, - range_padding: float = 0.05, - **kwargs, -) -> np.ndarray: - """ - Draw a matrix of scatter plots. - - Parameters - ---------- - frame : DataFrame - alpha : float, optional - Amount of transparency applied. - figsize : (float,float), optional - A tuple (width, height) in inches. - ax : Matplotlib axis object, optional - grid : bool, optional - Setting this to True will show the grid. - diagonal : {'hist', 'kde'} - Pick between 'kde' and 'hist' for either Kernel Density Estimation or - Histogram plot in the diagonal. - marker : str, optional - Matplotlib marker type, default '.'. - density_kwds : keywords - Keyword arguments to be passed to kernel density estimate plot. - hist_kwds : keywords - Keyword arguments to be passed to hist function. - range_padding : float, default 0.05 - Relative extension of axis range in x and y with respect to - (x_max - x_min) or (y_max - y_min). - **kwargs - Keyword arguments to be passed to scatter function. - - Returns - ------- - numpy.ndarray - A matrix of scatter plots. - - Examples - -------- - - .. plot:: - :context: close-figs - - >>> df = pd.DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D']) - >>> pd.plotting.scatter_matrix(df, alpha=0.2) - array([[, , - , ], - [, , - , ], - [, , - , ], - [, , - , ]], - dtype=object) - """ - plot_backend = _get_plot_backend("matplotlib") - return plot_backend.scatter_matrix( - frame=frame, - alpha=alpha, - figsize=figsize, - ax=ax, - grid=grid, - diagonal=diagonal, - marker=marker, - density_kwds=density_kwds, - hist_kwds=hist_kwds, - range_padding=range_padding, - **kwargs, - ) - - -def radviz( - frame: DataFrame, - class_column: str, - ax: Axes | None = None, - color: list[str] | tuple[str, ...] | None = None, - colormap: Colormap | str | None = None, - **kwds, -) -> Axes: - """ - Plot a multidimensional dataset in 2D. - - Each Series in the DataFrame is represented as a evenly distributed - slice on a circle. Each data point is rendered in the circle according to - the value on each Series. Highly correlated `Series` in the `DataFrame` - are placed closer on the unit circle. - - RadViz allow to project a N-dimensional data set into a 2D space where the - influence of each dimension can be interpreted as a balance between the - influence of all dimensions. - - More info available at the `original article - `_ - describing RadViz. - - Parameters - ---------- - frame : `DataFrame` - Object holding the data. - class_column : str - Column name containing the name of the data point category. - ax : :class:`matplotlib.axes.Axes`, optional - A plot instance to which to add the information. - color : list[str] or tuple[str], optional - Assign a color to each category. Example: ['blue', 'green']. - colormap : str or :class:`matplotlib.colors.Colormap`, default None - Colormap to select colors from. If string, load colormap with that - name from matplotlib. - **kwds - Options to pass to matplotlib scatter plotting method. - - Returns - ------- - :class:`matplotlib.axes.Axes` - - See Also - -------- - pandas.plotting.andrews_curves : Plot clustering visualization. - - Examples - -------- - - .. plot:: - :context: close-figs - - >>> df = pd.DataFrame( - ... { - ... 'SepalLength': [6.5, 7.7, 5.1, 5.8, 7.6, 5.0, 5.4, 4.6, 6.7, 4.6], - ... 'SepalWidth': [3.0, 3.8, 3.8, 2.7, 3.0, 2.3, 3.0, 3.2, 3.3, 3.6], - ... 'PetalLength': [5.5, 6.7, 1.9, 5.1, 6.6, 3.3, 4.5, 1.4, 5.7, 1.0], - ... 'PetalWidth': [1.8, 2.2, 0.4, 1.9, 2.1, 1.0, 1.5, 0.2, 2.1, 0.2], - ... 'Category': [ - ... 'virginica', - ... 'virginica', - ... 'setosa', - ... 'virginica', - ... 'virginica', - ... 'versicolor', - ... 'versicolor', - ... 'setosa', - ... 'virginica', - ... 'setosa' - ... ] - ... } - ... ) - >>> pd.plotting.radviz(df, 'Category') # doctest: +SKIP - """ - plot_backend = _get_plot_backend("matplotlib") - return plot_backend.radviz( - frame=frame, - class_column=class_column, - ax=ax, - color=color, - colormap=colormap, - **kwds, - ) - - -def andrews_curves( - frame: DataFrame, - class_column: str, - ax: Axes | None = None, - samples: int = 200, - color: list[str] | tuple[str, ...] | None = None, - colormap: Colormap | str | None = None, - **kwargs, -) -> Axes: - """ - Generate a matplotlib plot for visualising clusters of multivariate data. - - Andrews curves have the functional form: - - .. math:: - f(t) = \\frac{x_1}{\\sqrt{2}} + x_2 \\sin(t) + x_3 \\cos(t) + - x_4 \\sin(2t) + x_5 \\cos(2t) + \\cdots - - Where :math:`x` coefficients correspond to the values of each dimension - and :math:`t` is linearly spaced between :math:`-\\pi` and :math:`+\\pi`. - Each row of frame then corresponds to a single curve. - - Parameters - ---------- - frame : DataFrame - Data to be plotted, preferably normalized to (0.0, 1.0). - class_column : label - Name of the column containing class names. - ax : axes object, default None - Axes to use. - samples : int - Number of points to plot in each curve. - color : str, list[str] or tuple[str], optional - Colors to use for the different classes. Colors can be strings - or 3-element floating point RGB values. - colormap : str or matplotlib colormap object, default None - Colormap to select colors from. If a string, load colormap with that - name from matplotlib. - **kwargs - Options to pass to matplotlib plotting method. - - Returns - ------- - :class:`matplotlib.axes.Axes` - - Examples - -------- - - .. plot:: - :context: close-figs - - >>> df = pd.read_csv( - ... 'https://raw.githubusercontent.com/pandas-dev/' - ... 'pandas/main/pandas/tests/io/data/csv/iris.csv' - ... ) - >>> pd.plotting.andrews_curves(df, 'Name') # doctest: +SKIP - """ - plot_backend = _get_plot_backend("matplotlib") - return plot_backend.andrews_curves( - frame=frame, - class_column=class_column, - ax=ax, - samples=samples, - color=color, - colormap=colormap, - **kwargs, - ) - - -def bootstrap_plot( - series: Series, - fig: Figure | None = None, - size: int = 50, - samples: int = 500, - **kwds, -) -> Figure: - """ - Bootstrap plot on mean, median and mid-range statistics. - - The bootstrap plot is used to estimate the uncertainty of a statistic - by relying on random sampling with replacement [1]_. This function will - generate bootstrapping plots for mean, median and mid-range statistics - for the given number of samples of the given size. - - .. [1] "Bootstrapping (statistics)" in \ - https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29 - - Parameters - ---------- - series : pandas.Series - Series from where to get the samplings for the bootstrapping. - fig : matplotlib.figure.Figure, default None - If given, it will use the `fig` reference for plotting instead of - creating a new one with default parameters. - size : int, default 50 - Number of data points to consider during each sampling. It must be - less than or equal to the length of the `series`. - samples : int, default 500 - Number of times the bootstrap procedure is performed. - **kwds - Options to pass to matplotlib plotting method. - - Returns - ------- - matplotlib.figure.Figure - Matplotlib figure. - - See Also - -------- - pandas.DataFrame.plot : Basic plotting for DataFrame objects. - pandas.Series.plot : Basic plotting for Series objects. - - Examples - -------- - This example draws a basic bootstrap plot for a Series. - - .. plot:: - :context: close-figs - - >>> s = pd.Series(np.random.uniform(size=100)) - >>> pd.plotting.bootstrap_plot(s) -
- """ - plot_backend = _get_plot_backend("matplotlib") - return plot_backend.bootstrap_plot( - series=series, fig=fig, size=size, samples=samples, **kwds - ) - - -def parallel_coordinates( - frame: DataFrame, - class_column: str, - cols: list[str] | None = None, - ax: Axes | None = None, - color: list[str] | tuple[str, ...] | None = None, - use_columns: bool = False, - xticks: list | tuple | None = None, - colormap: Colormap | str | None = None, - axvlines: bool = True, - axvlines_kwds: Mapping[str, Any] | None = None, - sort_labels: bool = False, - **kwargs, -) -> Axes: - """ - Parallel coordinates plotting. - - Parameters - ---------- - frame : DataFrame - class_column : str - Column name containing class names. - cols : list, optional - A list of column names to use. - ax : matplotlib.axis, optional - Matplotlib axis object. - color : list or tuple, optional - Colors to use for the different classes. - use_columns : bool, optional - If true, columns will be used as xticks. - xticks : list or tuple, optional - A list of values to use for xticks. - colormap : str or matplotlib colormap, default None - Colormap to use for line colors. - axvlines : bool, optional - If true, vertical lines will be added at each xtick. - axvlines_kwds : keywords, optional - Options to be passed to axvline method for vertical lines. - sort_labels : bool, default False - Sort class_column labels, useful when assigning colors. - **kwargs - Options to pass to matplotlib plotting method. - - Returns - ------- - matplotlib.axes.Axes - - Examples - -------- - - .. plot:: - :context: close-figs - - >>> df = pd.read_csv( - ... 'https://raw.githubusercontent.com/pandas-dev/' - ... 'pandas/main/pandas/tests/io/data/csv/iris.csv' - ... ) - >>> pd.plotting.parallel_coordinates( - ... df, 'Name', color=('#556270', '#4ECDC4', '#C7F464') - ... ) # doctest: +SKIP - """ - plot_backend = _get_plot_backend("matplotlib") - return plot_backend.parallel_coordinates( - frame=frame, - class_column=class_column, - cols=cols, - ax=ax, - color=color, - use_columns=use_columns, - xticks=xticks, - colormap=colormap, - axvlines=axvlines, - axvlines_kwds=axvlines_kwds, - sort_labels=sort_labels, - **kwargs, - ) - - -def lag_plot(series: Series, lag: int = 1, ax: Axes | None = None, **kwds) -> Axes: - """ - Lag plot for time series. - - Parameters - ---------- - series : Series - The time series to visualize. - lag : int, default 1 - Lag length of the scatter plot. - ax : Matplotlib axis object, optional - The matplotlib axis object to use. - **kwds - Matplotlib scatter method keyword arguments. - - Returns - ------- - matplotlib.axes.Axes - - Examples - -------- - Lag plots are most commonly used to look for patterns in time series data. - - Given the following time series - - .. plot:: - :context: close-figs - - >>> np.random.seed(5) - >>> x = np.cumsum(np.random.normal(loc=1, scale=5, size=50)) - >>> s = pd.Series(x) - >>> s.plot() # doctest: +SKIP - - A lag plot with ``lag=1`` returns - - .. plot:: - :context: close-figs - - >>> pd.plotting.lag_plot(s, lag=1) - - """ - plot_backend = _get_plot_backend("matplotlib") - return plot_backend.lag_plot(series=series, lag=lag, ax=ax, **kwds) - - -def autocorrelation_plot(series: Series, ax: Axes | None = None, **kwargs) -> Axes: - """ - Autocorrelation plot for time series. - - Parameters - ---------- - series : Series - The time series to visualize. - ax : Matplotlib axis object, optional - The matplotlib axis object to use. - **kwargs - Options to pass to matplotlib plotting method. - - Returns - ------- - matplotlib.axes.Axes - - Examples - -------- - The horizontal lines in the plot correspond to 95% and 99% confidence bands. - - The dashed line is 99% confidence band. - - .. plot:: - :context: close-figs - - >>> spacing = np.linspace(-9 * np.pi, 9 * np.pi, num=1000) - >>> s = pd.Series(0.7 * np.random.rand(1000) + 0.3 * np.sin(spacing)) - >>> pd.plotting.autocorrelation_plot(s) # doctest: +SKIP - """ - plot_backend = _get_plot_backend("matplotlib") - return plot_backend.autocorrelation_plot(series=series, ax=ax, **kwargs) - - -class _Options(dict): - """ - Stores pandas plotting options. - - Allows for parameter aliasing so you can just use parameter names that are - the same as the plot function parameters, but is stored in a canonical - format that makes it easy to breakdown into groups later. - - Examples - -------- - - .. plot:: - :context: close-figs - - >>> np.random.seed(42) - >>> df = pd.DataFrame({'A': np.random.randn(10), - ... 'B': np.random.randn(10)}, - ... index=pd.date_range("1/1/2000", - ... freq='4MS', periods=10)) - >>> with pd.plotting.plot_params.use("x_compat", True): - ... _ = df["A"].plot(color="r") - ... _ = df["B"].plot(color="g") - """ - - # alias so the names are same as plotting method parameter names - _ALIASES = {"x_compat": "xaxis.compat"} - _DEFAULT_KEYS = ["xaxis.compat"] - - def __init__(self, deprecated: bool = False) -> None: - self._deprecated = deprecated - super().__setitem__("xaxis.compat", False) - - def __getitem__(self, key): - key = self._get_canonical_key(key) - if key not in self: - raise ValueError(f"{key} is not a valid pandas plotting option") - return super().__getitem__(key) - - def __setitem__(self, key, value) -> None: - key = self._get_canonical_key(key) - super().__setitem__(key, value) - - def __delitem__(self, key) -> None: - key = self._get_canonical_key(key) - if key in self._DEFAULT_KEYS: - raise ValueError(f"Cannot remove default parameter {key}") - super().__delitem__(key) - - def __contains__(self, key) -> bool: - key = self._get_canonical_key(key) - return super().__contains__(key) - - def reset(self) -> None: - """ - Reset the option store to its initial state - - Returns - ------- - None - """ - # error: Cannot access "__init__" directly - self.__init__() # type: ignore[misc] - - def _get_canonical_key(self, key): - return self._ALIASES.get(key, key) - - @contextmanager - def use(self, key, value) -> Generator[_Options, None, None]: - """ - Temporarily set a parameter value using the with statement. - Aliasing allowed. - """ - old_value = self[key] - try: - self[key] = value - yield self - finally: - self[key] = old_value - - -plot_params = _Options() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_datetime.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_datetime.py deleted file mode 100644 index d325971e7baf69fb3119afc018c6f90da93e0d3b..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_datetime.py +++ /dev/null @@ -1,50 +0,0 @@ -from datetime import datetime - -import numpy as np - -from pandas import ( - DataFrame, - Index, - MultiIndex, - Period, - Series, - period_range, - to_datetime, -) -import pandas._testing as tm - - -def test_multiindex_period_datetime(): - # GH4861, using datetime in period of multiindex raises exception - - idx1 = Index(["a", "a", "a", "b", "b"]) - idx2 = period_range("2012-01", periods=len(idx1), freq="M") - s = Series(np.random.default_rng(2).standard_normal(len(idx1)), [idx1, idx2]) - - # try Period as index - expected = s.iloc[0] - result = s.loc["a", Period("2012-01")] - assert result == expected - - # try datetime as index - result = s.loc["a", datetime(2012, 1, 1)] - assert result == expected - - -def test_multiindex_datetime_columns(): - # GH35015, using datetime as column indices raises exception - - mi = MultiIndex.from_tuples( - [(to_datetime("02/29/2020"), to_datetime("03/01/2020"))], names=["a", "b"] - ) - - df = DataFrame([], columns=mi) - - expected_df = DataFrame( - [], - columns=MultiIndex.from_arrays( - [[to_datetime("02/29/2020")], [to_datetime("03/01/2020")]], names=["a", "b"] - ), - ) - - tm.assert_frame_equal(df, expected_df) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/parser/test_upcast.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/parser/test_upcast.py deleted file mode 100644 index bc4c4c2e24e9caf8d4ac118b5053fe03d97aafb0..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/parser/test_upcast.py +++ /dev/null @@ -1,102 +0,0 @@ -import numpy as np -import pytest - -from pandas._libs.parsers import ( - _maybe_upcast, - na_values, -) - -import pandas as pd -from pandas import NA -import pandas._testing as tm -from pandas.core.arrays import ( - ArrowStringArray, - BooleanArray, - FloatingArray, - IntegerArray, - StringArray, -) - - -def test_maybe_upcast(any_real_numpy_dtype): - # GH#36712 - - dtype = np.dtype(any_real_numpy_dtype) - na_value = na_values[dtype] - arr = np.array([1, 2, na_value], dtype=dtype) - result = _maybe_upcast(arr, use_dtype_backend=True) - - expected_mask = np.array([False, False, True]) - if issubclass(dtype.type, np.integer): - expected = IntegerArray(arr, mask=expected_mask) - else: - expected = FloatingArray(arr, mask=expected_mask) - - tm.assert_extension_array_equal(result, expected) - - -def test_maybe_upcast_no_na(any_real_numpy_dtype): - # GH#36712 - arr = np.array([1, 2, 3], dtype=any_real_numpy_dtype) - result = _maybe_upcast(arr, use_dtype_backend=True) - - expected_mask = np.array([False, False, False]) - if issubclass(np.dtype(any_real_numpy_dtype).type, np.integer): - expected = IntegerArray(arr, mask=expected_mask) - else: - expected = FloatingArray(arr, mask=expected_mask) - - tm.assert_extension_array_equal(result, expected) - - -def test_maybe_upcaste_bool(): - # GH#36712 - dtype = np.bool_ - na_value = na_values[dtype] - arr = np.array([True, False, na_value], dtype="uint8").view(dtype) - result = _maybe_upcast(arr, use_dtype_backend=True) - - expected_mask = np.array([False, False, True]) - expected = BooleanArray(arr, mask=expected_mask) - tm.assert_extension_array_equal(result, expected) - - -def test_maybe_upcaste_bool_no_nan(): - # GH#36712 - dtype = np.bool_ - arr = np.array([True, False, False], dtype="uint8").view(dtype) - result = _maybe_upcast(arr, use_dtype_backend=True) - - expected_mask = np.array([False, False, False]) - expected = BooleanArray(arr, mask=expected_mask) - tm.assert_extension_array_equal(result, expected) - - -def test_maybe_upcaste_all_nan(): - # GH#36712 - dtype = np.int64 - na_value = na_values[dtype] - arr = np.array([na_value, na_value], dtype=dtype) - result = _maybe_upcast(arr, use_dtype_backend=True) - - expected_mask = np.array([True, True]) - expected = IntegerArray(arr, mask=expected_mask) - tm.assert_extension_array_equal(result, expected) - - -@pytest.mark.parametrize("val", [na_values[np.object_], "c"]) -def test_maybe_upcast_object(val, string_storage): - # GH#36712 - pa = pytest.importorskip("pyarrow") - - with pd.option_context("mode.string_storage", string_storage): - arr = np.array(["a", "b", val], dtype=np.object_) - result = _maybe_upcast(arr, use_dtype_backend=True) - - if string_storage == "python": - exp_val = "c" if val == "c" else NA - expected = StringArray(np.array(["a", "b", exp_val], dtype=np.object_)) - else: - exp_val = "c" if val == "c" else None - expected = ArrowStringArray(pa.array(["a", "b", exp_val])) - tm.assert_extension_array_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/test_fsspec.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/test_fsspec.py deleted file mode 100644 index 030505f617b972a380d6bb4cde2dab45dc9d8918..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/test_fsspec.py +++ /dev/null @@ -1,319 +0,0 @@ -import io - -import numpy as np -import pytest - -from pandas import ( - DataFrame, - date_range, - read_csv, - read_excel, - read_feather, - read_json, - read_parquet, - read_pickle, - read_stata, - read_table, -) -import pandas._testing as tm -from pandas.util import _test_decorators as td - - -@pytest.fixture -def df1(): - return DataFrame( - { - "int": [1, 3], - "float": [2.0, np.nan], - "str": ["t", "s"], - "dt": date_range("2018-06-18", periods=2), - } - ) - - -@pytest.fixture -def cleared_fs(): - fsspec = pytest.importorskip("fsspec") - - memfs = fsspec.filesystem("memory") - yield memfs - memfs.store.clear() - - -def test_read_csv(cleared_fs, df1): - text = str(df1.to_csv(index=False)).encode() - with cleared_fs.open("test/test.csv", "wb") as w: - w.write(text) - df2 = read_csv("memory://test/test.csv", parse_dates=["dt"]) - - tm.assert_frame_equal(df1, df2) - - -def test_reasonable_error(monkeypatch, cleared_fs): - from fsspec.registry import known_implementations - - with pytest.raises(ValueError, match="nosuchprotocol"): - read_csv("nosuchprotocol://test/test.csv") - err_msg = "test error message" - monkeypatch.setitem( - known_implementations, - "couldexist", - {"class": "unimportable.CouldExist", "err": err_msg}, - ) - with pytest.raises(ImportError, match=err_msg): - read_csv("couldexist://test/test.csv") - - -def test_to_csv(cleared_fs, df1): - df1.to_csv("memory://test/test.csv", index=True) - - df2 = read_csv("memory://test/test.csv", parse_dates=["dt"], index_col=0) - - tm.assert_frame_equal(df1, df2) - - -def test_to_excel(cleared_fs, df1): - pytest.importorskip("openpyxl") - ext = "xlsx" - path = f"memory://test/test.{ext}" - df1.to_excel(path, index=True) - - df2 = read_excel(path, parse_dates=["dt"], index_col=0) - - tm.assert_frame_equal(df1, df2) - - -@pytest.mark.parametrize("binary_mode", [False, True]) -def test_to_csv_fsspec_object(cleared_fs, binary_mode, df1): - fsspec = pytest.importorskip("fsspec") - - path = "memory://test/test.csv" - mode = "wb" if binary_mode else "w" - with fsspec.open(path, mode=mode).open() as fsspec_object: - df1.to_csv(fsspec_object, index=True) - assert not fsspec_object.closed - - mode = mode.replace("w", "r") - with fsspec.open(path, mode=mode) as fsspec_object: - df2 = read_csv( - fsspec_object, - parse_dates=["dt"], - index_col=0, - ) - assert not fsspec_object.closed - - tm.assert_frame_equal(df1, df2) - - -def test_csv_options(fsspectest): - df = DataFrame({"a": [0]}) - df.to_csv( - "testmem://test/test.csv", storage_options={"test": "csv_write"}, index=False - ) - assert fsspectest.test[0] == "csv_write" - read_csv("testmem://test/test.csv", storage_options={"test": "csv_read"}) - assert fsspectest.test[0] == "csv_read" - - -def test_read_table_options(fsspectest): - # GH #39167 - df = DataFrame({"a": [0]}) - df.to_csv( - "testmem://test/test.csv", storage_options={"test": "csv_write"}, index=False - ) - assert fsspectest.test[0] == "csv_write" - read_table("testmem://test/test.csv", storage_options={"test": "csv_read"}) - assert fsspectest.test[0] == "csv_read" - - -def test_excel_options(fsspectest): - pytest.importorskip("openpyxl") - extension = "xlsx" - - df = DataFrame({"a": [0]}) - - path = f"testmem://test/test.{extension}" - - df.to_excel(path, storage_options={"test": "write"}, index=False) - assert fsspectest.test[0] == "write" - read_excel(path, storage_options={"test": "read"}) - assert fsspectest.test[0] == "read" - - -def test_to_parquet_new_file(cleared_fs, df1): - """Regression test for writing to a not-yet-existent GCS Parquet file.""" - pytest.importorskip("fastparquet") - - df1.to_parquet( - "memory://test/test.csv", index=True, engine="fastparquet", compression=None - ) - - -def test_arrowparquet_options(fsspectest): - """Regression test for writing to a not-yet-existent GCS Parquet file.""" - pytest.importorskip("pyarrow") - df = DataFrame({"a": [0]}) - df.to_parquet( - "testmem://test/test.csv", - engine="pyarrow", - compression=None, - storage_options={"test": "parquet_write"}, - ) - assert fsspectest.test[0] == "parquet_write" - read_parquet( - "testmem://test/test.csv", - engine="pyarrow", - storage_options={"test": "parquet_read"}, - ) - assert fsspectest.test[0] == "parquet_read" - - -@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fastparquet -def test_fastparquet_options(fsspectest): - """Regression test for writing to a not-yet-existent GCS Parquet file.""" - pytest.importorskip("fastparquet") - - df = DataFrame({"a": [0]}) - df.to_parquet( - "testmem://test/test.csv", - engine="fastparquet", - compression=None, - storage_options={"test": "parquet_write"}, - ) - assert fsspectest.test[0] == "parquet_write" - read_parquet( - "testmem://test/test.csv", - engine="fastparquet", - storage_options={"test": "parquet_read"}, - ) - assert fsspectest.test[0] == "parquet_read" - - -@pytest.mark.single_cpu -def test_from_s3_csv(s3_public_bucket_with_data, tips_file, s3so): - pytest.importorskip("s3fs") - tm.assert_equal( - read_csv( - f"s3://{s3_public_bucket_with_data.name}/tips.csv", storage_options=s3so - ), - read_csv(tips_file), - ) - # the following are decompressed by pandas, not fsspec - tm.assert_equal( - read_csv( - f"s3://{s3_public_bucket_with_data.name}/tips.csv.gz", storage_options=s3so - ), - read_csv(tips_file), - ) - tm.assert_equal( - read_csv( - f"s3://{s3_public_bucket_with_data.name}/tips.csv.bz2", storage_options=s3so - ), - read_csv(tips_file), - ) - - -@pytest.mark.single_cpu -@pytest.mark.parametrize("protocol", ["s3", "s3a", "s3n"]) -def test_s3_protocols(s3_public_bucket_with_data, tips_file, protocol, s3so): - pytest.importorskip("s3fs") - tm.assert_equal( - read_csv( - f"{protocol}://{s3_public_bucket_with_data.name}/tips.csv", - storage_options=s3so, - ), - read_csv(tips_file), - ) - - -@pytest.mark.single_cpu -@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fastparquet -def test_s3_parquet(s3_public_bucket, s3so, df1): - pytest.importorskip("fastparquet") - pytest.importorskip("s3fs") - - fn = f"s3://{s3_public_bucket.name}/test.parquet" - df1.to_parquet( - fn, index=False, engine="fastparquet", compression=None, storage_options=s3so - ) - df2 = read_parquet(fn, engine="fastparquet", storage_options=s3so) - tm.assert_equal(df1, df2) - - -@td.skip_if_installed("fsspec") -def test_not_present_exception(): - msg = "Missing optional dependency 'fsspec'|fsspec library is required" - with pytest.raises(ImportError, match=msg): - read_csv("memory://test/test.csv") - - -def test_feather_options(fsspectest): - pytest.importorskip("pyarrow") - df = DataFrame({"a": [0]}) - df.to_feather("testmem://mockfile", storage_options={"test": "feather_write"}) - assert fsspectest.test[0] == "feather_write" - out = read_feather("testmem://mockfile", storage_options={"test": "feather_read"}) - assert fsspectest.test[0] == "feather_read" - tm.assert_frame_equal(df, out) - - -def test_pickle_options(fsspectest): - df = DataFrame({"a": [0]}) - df.to_pickle("testmem://mockfile", storage_options={"test": "pickle_write"}) - assert fsspectest.test[0] == "pickle_write" - out = read_pickle("testmem://mockfile", storage_options={"test": "pickle_read"}) - assert fsspectest.test[0] == "pickle_read" - tm.assert_frame_equal(df, out) - - -def test_json_options(fsspectest, compression): - df = DataFrame({"a": [0]}) - df.to_json( - "testmem://mockfile", - compression=compression, - storage_options={"test": "json_write"}, - ) - assert fsspectest.test[0] == "json_write" - out = read_json( - "testmem://mockfile", - compression=compression, - storage_options={"test": "json_read"}, - ) - assert fsspectest.test[0] == "json_read" - tm.assert_frame_equal(df, out) - - -def test_stata_options(fsspectest): - df = DataFrame({"a": [0]}) - df.to_stata( - "testmem://mockfile", storage_options={"test": "stata_write"}, write_index=False - ) - assert fsspectest.test[0] == "stata_write" - out = read_stata("testmem://mockfile", storage_options={"test": "stata_read"}) - assert fsspectest.test[0] == "stata_read" - tm.assert_frame_equal(df, out.astype("int64")) - - -def test_markdown_options(fsspectest): - pytest.importorskip("tabulate") - df = DataFrame({"a": [0]}) - df.to_markdown("testmem://mockfile", storage_options={"test": "md_write"}) - assert fsspectest.test[0] == "md_write" - assert fsspectest.cat("testmem://mockfile") - - -def test_non_fsspec_options(): - pytest.importorskip("pyarrow") - with pytest.raises(ValueError, match="storage_options"): - read_csv("localfile", storage_options={"a": True}) - with pytest.raises(ValueError, match="storage_options"): - # separate test for parquet, which has a different code path - read_parquet("localfile", storage_options={"a": True}) - by = io.BytesIO() - - with pytest.raises(ValueError, match="storage_options"): - read_csv(by, storage_options={"a": True}) - - df = DataFrame({"a": [0]}) - with pytest.raises(ValueError, match="storage_options"): - df.to_parquet("nonfsspecpath", storage_options={"a": True}) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/operations/prepare.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/operations/prepare.py deleted file mode 100644 index a726f031a4fd4d695bcba86129ba192ec65528b7..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/operations/prepare.py +++ /dev/null @@ -1,642 +0,0 @@ -"""Prepares a distribution for installation -""" - -# The following comment should be removed at some point in the future. -# mypy: strict-optional=False - -import logging -import mimetypes -import os -import shutil -from typing import Dict, Iterable, List, Optional - -from pip._vendor.packaging.utils import canonicalize_name - -from pip._internal.distributions import make_distribution_for_install_requirement -from pip._internal.distributions.installed import InstalledDistribution -from pip._internal.exceptions import ( - DirectoryUrlHashUnsupported, - HashMismatch, - HashUnpinned, - InstallationError, - NetworkConnectionError, - PreviousBuildDirError, - VcsHashUnsupported, -) -from pip._internal.index.package_finder import PackageFinder -from pip._internal.metadata import BaseDistribution -from pip._internal.models.link import Link -from pip._internal.models.wheel import Wheel -from pip._internal.network.download import BatchDownloader, Downloader -from pip._internal.network.lazy_wheel import ( - HTTPRangeRequestUnsupported, - dist_from_wheel_url, -) -from pip._internal.network.session import PipSession -from pip._internal.req.req_install import InstallRequirement -from pip._internal.req.req_tracker import RequirementTracker -from pip._internal.utils.filesystem import copy2_fixed -from pip._internal.utils.hashes import Hashes, MissingHashes -from pip._internal.utils.logging import indent_log -from pip._internal.utils.misc import display_path, hide_url, is_installable_dir, rmtree -from pip._internal.utils.temp_dir import TempDirectory -from pip._internal.utils.unpacking import unpack_file -from pip._internal.vcs import vcs - -logger = logging.getLogger(__name__) - - -def _get_prepared_distribution( - req: InstallRequirement, - req_tracker: RequirementTracker, - finder: PackageFinder, - build_isolation: bool, -) -> BaseDistribution: - """Prepare a distribution for installation.""" - abstract_dist = make_distribution_for_install_requirement(req) - with req_tracker.track(req): - abstract_dist.prepare_distribution_metadata(finder, build_isolation) - return abstract_dist.get_metadata_distribution() - - -def unpack_vcs_link(link: Link, location: str, verbosity: int) -> None: - vcs_backend = vcs.get_backend_for_scheme(link.scheme) - assert vcs_backend is not None - vcs_backend.unpack(location, url=hide_url(link.url), verbosity=verbosity) - - -class File: - def __init__(self, path: str, content_type: Optional[str]) -> None: - self.path = path - if content_type is None: - self.content_type = mimetypes.guess_type(path)[0] - else: - self.content_type = content_type - - -def get_http_url( - link: Link, - download: Downloader, - download_dir: Optional[str] = None, - hashes: Optional[Hashes] = None, -) -> File: - temp_dir = TempDirectory(kind="unpack", globally_managed=True) - # If a download dir is specified, is the file already downloaded there? - already_downloaded_path = None - if download_dir: - already_downloaded_path = _check_download_dir(link, download_dir, hashes) - - if already_downloaded_path: - from_path = already_downloaded_path - content_type = None - else: - # let's download to a tmp dir - from_path, content_type = download(link, temp_dir.path) - if hashes: - hashes.check_against_path(from_path) - - return File(from_path, content_type) - - -def _copy2_ignoring_special_files(src: str, dest: str) -> None: - """Copying special files is not supported, but as a convenience to users - we skip errors copying them. This supports tools that may create e.g. - socket files in the project source directory. - """ - try: - copy2_fixed(src, dest) - except shutil.SpecialFileError as e: - # SpecialFileError may be raised due to either the source or - # destination. If the destination was the cause then we would actually - # care, but since the destination directory is deleted prior to - # copy we ignore all of them assuming it is caused by the source. - logger.warning( - "Ignoring special file error '%s' encountered copying %s to %s.", - str(e), - src, - dest, - ) - - -def _copy_source_tree(source: str, target: str) -> None: - target_abspath = os.path.abspath(target) - target_basename = os.path.basename(target_abspath) - target_dirname = os.path.dirname(target_abspath) - - def ignore(d: str, names: List[str]) -> List[str]: - skipped: List[str] = [] - if d == source: - # Pulling in those directories can potentially be very slow, - # exclude the following directories if they appear in the top - # level dir (and only it). - # See discussion at https://github.com/pypa/pip/pull/6770 - skipped += [".tox", ".nox"] - if os.path.abspath(d) == target_dirname: - # Prevent an infinite recursion if the target is in source. - # This can happen when TMPDIR is set to ${PWD}/... - # and we copy PWD to TMPDIR. - skipped += [target_basename] - return skipped - - shutil.copytree( - source, - target, - ignore=ignore, - symlinks=True, - copy_function=_copy2_ignoring_special_files, - ) - - -def get_file_url( - link: Link, download_dir: Optional[str] = None, hashes: Optional[Hashes] = None -) -> File: - """Get file and optionally check its hash.""" - # If a download dir is specified, is the file already there and valid? - already_downloaded_path = None - if download_dir: - already_downloaded_path = _check_download_dir(link, download_dir, hashes) - - if already_downloaded_path: - from_path = already_downloaded_path - else: - from_path = link.file_path - - # If --require-hashes is off, `hashes` is either empty, the - # link's embedded hash, or MissingHashes; it is required to - # match. If --require-hashes is on, we are satisfied by any - # hash in `hashes` matching: a URL-based or an option-based - # one; no internet-sourced hash will be in `hashes`. - if hashes: - hashes.check_against_path(from_path) - return File(from_path, None) - - -def unpack_url( - link: Link, - location: str, - download: Downloader, - verbosity: int, - download_dir: Optional[str] = None, - hashes: Optional[Hashes] = None, -) -> Optional[File]: - """Unpack link into location, downloading if required. - - :param hashes: A Hashes object, one of whose embedded hashes must match, - or HashMismatch will be raised. If the Hashes is empty, no matches are - required, and unhashable types of requirements (like VCS ones, which - would ordinarily raise HashUnsupported) are allowed. - """ - # non-editable vcs urls - if link.is_vcs: - unpack_vcs_link(link, location, verbosity=verbosity) - return None - - # Once out-of-tree-builds are no longer supported, could potentially - # replace the below condition with `assert not link.is_existing_dir` - # - unpack_url does not need to be called for in-tree-builds. - # - # As further cleanup, _copy_source_tree and accompanying tests can - # be removed. - # - # TODO when use-deprecated=out-of-tree-build is removed - if link.is_existing_dir(): - if os.path.isdir(location): - rmtree(location) - _copy_source_tree(link.file_path, location) - return None - - # file urls - if link.is_file: - file = get_file_url(link, download_dir, hashes=hashes) - - # http urls - else: - file = get_http_url( - link, - download, - download_dir, - hashes=hashes, - ) - - # unpack the archive to the build dir location. even when only downloading - # archives, they have to be unpacked to parse dependencies, except wheels - if not link.is_wheel: - unpack_file(file.path, location, file.content_type) - - return file - - -def _check_download_dir( - link: Link, download_dir: str, hashes: Optional[Hashes] -) -> Optional[str]: - """Check download_dir for previously downloaded file with correct hash - If a correct file is found return its path else None - """ - download_path = os.path.join(download_dir, link.filename) - - if not os.path.exists(download_path): - return None - - # If already downloaded, does its hash match? - logger.info("File was already downloaded %s", download_path) - if hashes: - try: - hashes.check_against_path(download_path) - except HashMismatch: - logger.warning( - "Previously-downloaded file %s has bad hash. Re-downloading.", - download_path, - ) - os.unlink(download_path) - return None - return download_path - - -class RequirementPreparer: - """Prepares a Requirement""" - - def __init__( - self, - build_dir: str, - download_dir: Optional[str], - src_dir: str, - build_isolation: bool, - req_tracker: RequirementTracker, - session: PipSession, - progress_bar: str, - finder: PackageFinder, - require_hashes: bool, - use_user_site: bool, - lazy_wheel: bool, - verbosity: int, - in_tree_build: bool, - ) -> None: - super().__init__() - - self.src_dir = src_dir - self.build_dir = build_dir - self.req_tracker = req_tracker - self._session = session - self._download = Downloader(session, progress_bar) - self._batch_download = BatchDownloader(session, progress_bar) - self.finder = finder - - # Where still-packed archives should be written to. If None, they are - # not saved, and are deleted immediately after unpacking. - self.download_dir = download_dir - - # Is build isolation allowed? - self.build_isolation = build_isolation - - # Should hash-checking be required? - self.require_hashes = require_hashes - - # Should install in user site-packages? - self.use_user_site = use_user_site - - # Should wheels be downloaded lazily? - self.use_lazy_wheel = lazy_wheel - - # How verbose should underlying tooling be? - self.verbosity = verbosity - - # Should in-tree builds be used for local paths? - self.in_tree_build = in_tree_build - - # Memoized downloaded files, as mapping of url: path. - self._downloaded: Dict[str, str] = {} - - # Previous "header" printed for a link-based InstallRequirement - self._previous_requirement_header = ("", "") - - def _log_preparing_link(self, req: InstallRequirement) -> None: - """Provide context for the requirement being prepared.""" - if req.link.is_file and not req.original_link_is_in_wheel_cache: - message = "Processing %s" - information = str(display_path(req.link.file_path)) - else: - message = "Collecting %s" - information = str(req.req or req) - - if (message, information) != self._previous_requirement_header: - self._previous_requirement_header = (message, information) - logger.info(message, information) - - if req.original_link_is_in_wheel_cache: - with indent_log(): - logger.info("Using cached %s", req.link.filename) - - def _ensure_link_req_src_dir( - self, req: InstallRequirement, parallel_builds: bool - ) -> None: - """Ensure source_dir of a linked InstallRequirement.""" - # Since source_dir is only set for editable requirements. - if req.link.is_wheel: - # We don't need to unpack wheels, so no need for a source - # directory. - return - assert req.source_dir is None - if req.link.is_existing_dir() and self.in_tree_build: - # build local directories in-tree - req.source_dir = req.link.file_path - return - - # We always delete unpacked sdists after pip runs. - req.ensure_has_source_dir( - self.build_dir, - autodelete=True, - parallel_builds=parallel_builds, - ) - - # If a checkout exists, it's unwise to keep going. version - # inconsistencies are logged later, but do not fail the - # installation. - # FIXME: this won't upgrade when there's an existing - # package unpacked in `req.source_dir` - # TODO: this check is now probably dead code - if is_installable_dir(req.source_dir): - raise PreviousBuildDirError( - "pip can't proceed with requirements '{}' due to a" - "pre-existing build directory ({}). This is likely " - "due to a previous installation that failed . pip is " - "being responsible and not assuming it can delete this. " - "Please delete it and try again.".format(req, req.source_dir) - ) - - def _get_linked_req_hashes(self, req: InstallRequirement) -> Hashes: - # By the time this is called, the requirement's link should have - # been checked so we can tell what kind of requirements req is - # and raise some more informative errors than otherwise. - # (For example, we can raise VcsHashUnsupported for a VCS URL - # rather than HashMissing.) - if not self.require_hashes: - return req.hashes(trust_internet=True) - - # We could check these first 2 conditions inside unpack_url - # and save repetition of conditions, but then we would - # report less-useful error messages for unhashable - # requirements, complaining that there's no hash provided. - if req.link.is_vcs: - raise VcsHashUnsupported() - if req.link.is_existing_dir(): - raise DirectoryUrlHashUnsupported() - - # Unpinned packages are asking for trouble when a new version - # is uploaded. This isn't a security check, but it saves users - # a surprising hash mismatch in the future. - # file:/// URLs aren't pinnable, so don't complain about them - # not being pinned. - if req.original_link is None and not req.is_pinned: - raise HashUnpinned() - - # If known-good hashes are missing for this requirement, - # shim it with a facade object that will provoke hash - # computation and then raise a HashMissing exception - # showing the user what the hash should be. - return req.hashes(trust_internet=False) or MissingHashes() - - def _fetch_metadata_using_lazy_wheel( - self, - link: Link, - ) -> Optional[BaseDistribution]: - """Fetch metadata using lazy wheel, if possible.""" - if not self.use_lazy_wheel: - return None - if self.require_hashes: - logger.debug("Lazy wheel is not used as hash checking is required") - return None - if link.is_file or not link.is_wheel: - logger.debug( - "Lazy wheel is not used as %r does not points to a remote wheel", - link, - ) - return None - - wheel = Wheel(link.filename) - name = canonicalize_name(wheel.name) - logger.info( - "Obtaining dependency information from %s %s", - name, - wheel.version, - ) - url = link.url.split("#", 1)[0] - try: - return dist_from_wheel_url(name, url, self._session) - except HTTPRangeRequestUnsupported: - logger.debug("%s does not support range requests", url) - return None - - def _complete_partial_requirements( - self, - partially_downloaded_reqs: Iterable[InstallRequirement], - parallel_builds: bool = False, - ) -> None: - """Download any requirements which were only fetched by metadata.""" - # Download to a temporary directory. These will be copied over as - # needed for downstream 'download', 'wheel', and 'install' commands. - temp_dir = TempDirectory(kind="unpack", globally_managed=True).path - - # Map each link to the requirement that owns it. This allows us to set - # `req.local_file_path` on the appropriate requirement after passing - # all the links at once into BatchDownloader. - links_to_fully_download: Dict[Link, InstallRequirement] = {} - for req in partially_downloaded_reqs: - assert req.link - links_to_fully_download[req.link] = req - - batch_download = self._batch_download( - links_to_fully_download.keys(), - temp_dir, - ) - for link, (filepath, _) in batch_download: - logger.debug("Downloading link %s to %s", link, filepath) - req = links_to_fully_download[link] - req.local_file_path = filepath - - # This step is necessary to ensure all lazy wheels are processed - # successfully by the 'download', 'wheel', and 'install' commands. - for req in partially_downloaded_reqs: - self._prepare_linked_requirement(req, parallel_builds) - - def prepare_linked_requirement( - self, req: InstallRequirement, parallel_builds: bool = False - ) -> BaseDistribution: - """Prepare a requirement to be obtained from req.link.""" - assert req.link - link = req.link - self._log_preparing_link(req) - with indent_log(): - # Check if the relevant file is already available - # in the download directory - file_path = None - if self.download_dir is not None and link.is_wheel: - hashes = self._get_linked_req_hashes(req) - file_path = _check_download_dir(req.link, self.download_dir, hashes) - - if file_path is not None: - # The file is already available, so mark it as downloaded - self._downloaded[req.link.url] = file_path - else: - # The file is not available, attempt to fetch only metadata - wheel_dist = self._fetch_metadata_using_lazy_wheel(link) - if wheel_dist is not None: - req.needs_more_preparation = True - return wheel_dist - - # None of the optimizations worked, fully prepare the requirement - return self._prepare_linked_requirement(req, parallel_builds) - - def prepare_linked_requirements_more( - self, reqs: Iterable[InstallRequirement], parallel_builds: bool = False - ) -> None: - """Prepare linked requirements more, if needed.""" - reqs = [req for req in reqs if req.needs_more_preparation] - for req in reqs: - # Determine if any of these requirements were already downloaded. - if self.download_dir is not None and req.link.is_wheel: - hashes = self._get_linked_req_hashes(req) - file_path = _check_download_dir(req.link, self.download_dir, hashes) - if file_path is not None: - self._downloaded[req.link.url] = file_path - req.needs_more_preparation = False - - # Prepare requirements we found were already downloaded for some - # reason. The other downloads will be completed separately. - partially_downloaded_reqs: List[InstallRequirement] = [] - for req in reqs: - if req.needs_more_preparation: - partially_downloaded_reqs.append(req) - else: - self._prepare_linked_requirement(req, parallel_builds) - - # TODO: separate this part out from RequirementPreparer when the v1 - # resolver can be removed! - self._complete_partial_requirements( - partially_downloaded_reqs, - parallel_builds=parallel_builds, - ) - - def _prepare_linked_requirement( - self, req: InstallRequirement, parallel_builds: bool - ) -> BaseDistribution: - assert req.link - link = req.link - - self._ensure_link_req_src_dir(req, parallel_builds) - hashes = self._get_linked_req_hashes(req) - - if link.is_existing_dir() and self.in_tree_build: - local_file = None - elif link.url not in self._downloaded: - try: - local_file = unpack_url( - link, - req.source_dir, - self._download, - self.verbosity, - self.download_dir, - hashes, - ) - except NetworkConnectionError as exc: - raise InstallationError( - "Could not install requirement {} because of HTTP " - "error {} for URL {}".format(req, exc, link) - ) - else: - file_path = self._downloaded[link.url] - if hashes: - hashes.check_against_path(file_path) - local_file = File(file_path, content_type=None) - - # For use in later processing, - # preserve the file path on the requirement. - if local_file: - req.local_file_path = local_file.path - - dist = _get_prepared_distribution( - req, - self.req_tracker, - self.finder, - self.build_isolation, - ) - return dist - - def save_linked_requirement(self, req: InstallRequirement) -> None: - assert self.download_dir is not None - assert req.link is not None - link = req.link - if link.is_vcs or (link.is_existing_dir() and req.editable): - # Make a .zip of the source_dir we already created. - req.archive(self.download_dir) - return - - if link.is_existing_dir(): - logger.debug( - "Not copying link to destination directory " - "since it is a directory: %s", - link, - ) - return - if req.local_file_path is None: - # No distribution was downloaded for this requirement. - return - - download_location = os.path.join(self.download_dir, link.filename) - if not os.path.exists(download_location): - shutil.copy(req.local_file_path, download_location) - download_path = display_path(download_location) - logger.info("Saved %s", download_path) - - def prepare_editable_requirement( - self, - req: InstallRequirement, - ) -> BaseDistribution: - """Prepare an editable requirement.""" - assert req.editable, "cannot prepare a non-editable req as editable" - - logger.info("Obtaining %s", req) - - with indent_log(): - if self.require_hashes: - raise InstallationError( - "The editable requirement {} cannot be installed when " - "requiring hashes, because there is no single file to " - "hash.".format(req) - ) - req.ensure_has_source_dir(self.src_dir) - req.update_editable() - - dist = _get_prepared_distribution( - req, - self.req_tracker, - self.finder, - self.build_isolation, - ) - - req.check_if_exists(self.use_user_site) - - return dist - - def prepare_installed_requirement( - self, - req: InstallRequirement, - skip_reason: str, - ) -> BaseDistribution: - """Prepare an already-installed requirement.""" - assert req.satisfied_by, "req should have been satisfied but isn't" - assert skip_reason is not None, ( - "did not get skip reason skipped but req.satisfied_by " - "is set to {}".format(req.satisfied_by) - ) - logger.info( - "Requirement %s: %s (%s)", skip_reason, req, req.satisfied_by.version - ) - with indent_log(): - if self.require_hashes: - logger.debug( - "Since it is already installed, we are trusting this " - "package without checking its hash. To ensure a " - "completely repeatable environment, install into an " - "empty virtualenv." - ) - return InstalledDistribution(req).get_metadata_distribution() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/_compat.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/_compat.py deleted file mode 100644 index e54bd4ede8761df5882a3354bc22bdee7a5e8a8b..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/_compat.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import sys - -from ._typing import TYPE_CHECKING - -if TYPE_CHECKING: # pragma: no cover - from typing import Any, Dict, Tuple, Type - - -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 - -# flake8: noqa - -if PY3: - string_types = (str,) -else: - string_types = (basestring,) - - -def with_metaclass(meta, *bases): - # type: (Type[Any], Tuple[Type[Any], ...]) -> Any - """ - Create a base class with a metaclass. - """ - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(meta): # type: ignore - def __new__(cls, name, this_bases, d): - # type: (Type[Any], str, Tuple[Any], Dict[Any, Any]) -> Any - return meta(name, bases, d) - - return type.__new__(metaclass, "temporary_class", (), {}) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/apl.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/apl.py deleted file mode 100644 index 815184da12b699348b3429cd1f7542979374f680..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/apl.py +++ /dev/null @@ -1,104 +0,0 @@ -""" - pygments.lexers.apl - ~~~~~~~~~~~~~~~~~~~ - - Lexers for APL. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer -from pygments.token import Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Whitespace - -__all__ = ['APLLexer'] - - -class APLLexer(RegexLexer): - """ - A simple APL lexer. - - .. versionadded:: 2.0 - """ - name = 'APL' - url = 'https://en.m.wikipedia.org/wiki/APL_(programming_language)' - aliases = ['apl'] - filenames = [ - '*.apl', '*.aplf', '*.aplo', '*.apln', - '*.aplc', '*.apli', '*.dyalog', - ] - - tokens = { - 'root': [ - # Whitespace - # ========== - (r'\s+', Whitespace), - # - # Comment - # ======= - # '⍝' is traditional; '#' is supported by GNU APL and NGN (but not Dyalog) - (r'[⍝#].*$', Comment.Single), - # - # Strings - # ======= - (r'\'((\'\')|[^\'])*\'', String.Single), - (r'"(("")|[^"])*"', String.Double), # supported by NGN APL - # - # Punctuation - # =========== - # This token type is used for diamond and parenthesis - # but not for bracket and ; (see below) - (r'[⋄◇()]', Punctuation), - # - # Array indexing - # ============== - # Since this token type is very important in APL, it is not included in - # the punctuation token type but rather in the following one - (r'[\[\];]', String.Regex), - # - # Distinguished names - # =================== - # following IBM APL2 standard - (r'⎕[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*', Name.Function), - # - # Labels - # ====== - # following IBM APL2 standard - # (r'[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*:', Name.Label), - # - # Variables - # ========= - # following IBM APL2 standard (with a leading _ ok for GNU APL and Dyalog) - (r'[A-Za-zΔ∆⍙_][A-Za-zΔ∆⍙_¯0-9]*', Name.Variable), - # - # Numbers - # ======= - (r'¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞)' - r'([Jj]¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞))?', - Number), - # - # Operators - # ========== - (r'[\.\\\/⌿⍀¨⍣⍨⍠⍤∘⌸&⌶@⌺⍥⍛⍢]', Name.Attribute), # closest token type - (r'[+\-×÷⌈⌊∣|⍳?*⍟○!⌹<≤=>≥≠≡≢∊⍷∪∩~∨∧⍱⍲⍴,⍪⌽⊖⍉↑↓⊂⊃⌷⍋⍒⊤⊥⍕⍎⊣⊢⍁⍂≈⌸⍯↗⊆⊇⍸√⌾…⍮]', - Operator), - # - # Constant - # ======== - (r'⍬', Name.Constant), - # - # Quad symbol - # =========== - (r'[⎕⍞]', Name.Variable.Global), - # - # Arrows left/right - # ================= - (r'[←→]', Keyword.Declaration), - # - # D-Fn - # ==== - (r'[⍺⍵⍶⍹∇:]', Name.Builtin.Pseudo), - (r'[{}]', Keyword.Type), - ], - } diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/asc.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/asc.py deleted file mode 100644 index e261f41156cf227299f490480e2f6c41536a4494..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/asc.py +++ /dev/null @@ -1,55 +0,0 @@ -""" - pygments.lexers.asc - ~~~~~~~~~~~~~~~~~~~ - - Lexer for various ASCII armored files. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" -import re - -from pygments.lexer import RegexLexer, bygroups -from pygments.token import Comment, Generic, Name, Operator, String, Whitespace - -__all__ = ['AscLexer'] - - -class AscLexer(RegexLexer): - """ - Lexer for ASCII armored files, containing `-----BEGIN/END ...-----` wrapped - base64 data. - - .. versionadded:: 2.10 - """ - name = 'ASCII armored' - aliases = ['asc', 'pem'] - filenames = [ - '*.asc', # PGP; *.gpg, *.pgp, and *.sig too, but those can be binary - '*.pem', # X.509; *.cer, *.crt, *.csr, and key etc too, but those can be binary - 'id_dsa', 'id_ecdsa', 'id_ecdsa_sk', 'id_ed25519', 'id_ed25519_sk', - 'id_rsa', # SSH private keys - ] - mimetypes = ['application/pgp-keys', 'application/pgp-encrypted', - 'application/pgp-signature', 'application/pem-certificate-chain'] - - flags = re.MULTILINE - - tokens = { - 'root': [ - (r'\s+', Whitespace), - (r'^-----BEGIN [^\n]+-----$', Generic.Heading, 'data'), - (r'\S+', Comment), - ], - 'data': [ - (r'\s+', Whitespace), - (r'^([^:]+)(:)([ \t]+)(.*)', - bygroups(Name.Attribute, Operator, Whitespace, String)), - (r'^-----END [^\n]+-----$', Generic.Heading, 'root'), - (r'\S+', String), - ], - } - - def analyse_text(text): - if re.search(r'^-----BEGIN [^\n]+-----\r?\n', text): - return True diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/pawn.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/pawn.py deleted file mode 100644 index 36b48fcbf29e78b5eec704295f0e2cb6b8c037da..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/pawn.py +++ /dev/null @@ -1,202 +0,0 @@ -""" - pygments.lexers.pawn - ~~~~~~~~~~~~~~~~~~~~ - - Lexers for the Pawn languages. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation -from pygments.util import get_bool_opt - -__all__ = ['SourcePawnLexer', 'PawnLexer'] - - -class SourcePawnLexer(RegexLexer): - """ - For SourcePawn source code with preprocessor directives. - - .. versionadded:: 1.6 - """ - name = 'SourcePawn' - aliases = ['sp'] - filenames = ['*.sp'] - mimetypes = ['text/x-sourcepawn'] - - #: optional Comment or Whitespace - _ws = r'(?:\s|//.*?\n|/\*.*?\*/)+' - #: only one /* */ style comment - _ws1 = r'\s*(?:/[*].*?[*]/\s*)*' - - tokens = { - 'root': [ - # preprocessor directives: without whitespace - (r'^#if\s+0', Comment.Preproc, 'if0'), - ('^#', Comment.Preproc, 'macro'), - # or with whitespace - ('^' + _ws1 + r'#if\s+0', Comment.Preproc, 'if0'), - ('^' + _ws1 + '#', Comment.Preproc, 'macro'), - (r'\n', Text), - (r'\s+', Text), - (r'\\\n', Text), # line continuation - (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), - (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), - (r'[{}]', Punctuation), - (r'L?"', String, 'string'), - (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), - (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), - (r'0[0-7]+[LlUu]*', Number.Oct), - (r'\d+[LlUu]*', Number.Integer), - (r'[~!%^&*+=|?:<>/-]', Operator), - (r'[()\[\],.;]', Punctuation), - (r'(case|const|continue|native|' - r'default|else|enum|for|if|new|operator|' - r'public|return|sizeof|static|decl|struct|switch)\b', Keyword), - (r'(bool|Float)\b', Keyword.Type), - (r'(true|false)\b', Keyword.Constant), - (r'[a-zA-Z_]\w*', Name), - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), # all other characters - (r'\\\n', String), # line continuation - (r'\\', String), # stray backslash - ], - 'macro': [ - (r'[^/\n]+', Comment.Preproc), - (r'/\*(.|\n)*?\*/', Comment.Multiline), - (r'//.*?\n', Comment.Single, '#pop'), - (r'/', Comment.Preproc), - (r'(?<=\\)\n', Comment.Preproc), - (r'\n', Comment.Preproc, '#pop'), - ], - 'if0': [ - (r'^\s*#if.*?(?/-]', Operator), - (r'[()\[\],.;]', Punctuation), - (r'(switch|case|default|const|new|static|char|continue|break|' - r'if|else|for|while|do|operator|enum|' - r'public|return|sizeof|tagof|state|goto)\b', Keyword), - (r'(bool|Float)\b', Keyword.Type), - (r'(true|false)\b', Keyword.Constant), - (r'[a-zA-Z_]\w*', Name), - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), # all other characters - (r'\\\n', String), # line continuation - (r'\\', String), # stray backslash - ], - 'macro': [ - (r'[^/\n]+', Comment.Preproc), - (r'/\*(.|\n)*?\*/', Comment.Multiline), - (r'//.*?\n', Comment.Single, '#pop'), - (r'/', Comment.Preproc), - (r'(?<=\\)\n', Comment.Preproc), - (r'\n', Comment.Preproc, '#pop'), - ], - 'if0': [ - (r'^\s*#if.*?(?= (3, 9), - reason="Curio breaks on Python 3.9+ on Windows. Fix was not released yet", -) -def test_curio(): - import curio - - with pytest.raises(AsyncLibraryNotFoundError): - current_async_library() - - ran = [] - - async def this_is_curio(): - assert current_async_library() == "curio" - # Call it a second time to exercise the caching logic - assert current_async_library() == "curio" - ran.append(True) - - curio.run(this_is_curio) - assert ran == [True] - - with pytest.raises(AsyncLibraryNotFoundError): - current_async_library() diff --git a/spaces/qingxu98/academic-chatgpt-beta/main.py b/spaces/qingxu98/academic-chatgpt-beta/main.py deleted file mode 100644 index f7a3a79fede1c36c1151cfd76b02fefd5d278ae0..0000000000000000000000000000000000000000 --- a/spaces/qingxu98/academic-chatgpt-beta/main.py +++ /dev/null @@ -1,190 +0,0 @@ -import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 - -def main(): - import gradio as gr - from request_llm.bridge_all import predict - from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith - # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到 - proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY, AVAIL_LLM_MODELS = \ - get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY', 'AVAIL_LLM_MODELS') - - # 如果WEB_PORT是-1, 则随机选取WEB端口 - PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT - if not AUTHENTICATION: AUTHENTICATION = None - - from check_proxy import get_current_version - initial_prompt = "Serve me as a writing and programming assistant." - title_html = f"

ChatGPT 学术优化 {get_current_version()}

" - description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)""" - - # 问询记录, python 版本建议3.9+(越新越好) - import logging - os.makedirs("gpt_log", exist_ok=True) - try:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8") - except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO) - print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!") - - # 一些普通功能模块 - from core_functional import get_core_functions - functional = get_core_functions() - - # 高级函数插件 - from crazy_functional import get_crazy_functions - crazy_fns = get_crazy_functions() - - # 处理markdown文本格式的转变 - gr.Chatbot.postprocess = format_io - - # 做一些外观色彩上的调整 - from theme import adjust_theme, advanced_css - set_theme = adjust_theme() - - # 代理与自动更新 - from check_proxy import check_proxy, auto_update, warm_up_modules - proxy_info = check_proxy(proxies) - - gr_L1 = lambda: gr.Row().style() - gr_L2 = lambda scale: gr.Column(scale=scale) - if LAYOUT == "TOP-DOWN": - gr_L1 = lambda: DummyWith() - gr_L2 = lambda scale: gr.Row() - CHATBOT_HEIGHT /= 2 - - cancel_handles = [] - with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo: - gr.HTML(title_html) - gr.HTML('''
Duplicate Space请您打开此页面后务必点击上方的“复制空间”(Duplicate Space)按钮!使用时,先在输入框填入API-KEY然后回车。
切忌在“复制空间”(Duplicate Space)之前填入API_KEY或进行提问,否则您的API_KEY将极可能被空间所有者攫取!
支持任意数量的OpenAI的密钥和API2D的密钥共存,例如输入"OpenAI密钥1,API2D密钥2",然后提交,即可同时使用两种模型接口。
''') - cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL}) - with gr_L1(): - with gr_L2(scale=2): - chatbot = gr.Chatbot() - chatbot.style(height=CHATBOT_HEIGHT) - history = gr.State([]) - with gr_L2(scale=1): - with gr.Accordion("输入区", open=True) as area_input_primary: - with gr.Row(): - txt = gr.Textbox(show_label=False, lines=2, placeholder="输入问题或API密钥,输入多个密钥时,用英文逗号间隔。支持OpenAI密钥和API2D密钥共存。").style(container=False) - with gr.Row(): - submitBtn = gr.Button("提交", variant="primary") - with gr.Row(): - resetBtn = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm") - stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm") - clearBtn = gr.Button("清除", variant="secondary", visible=False); clearBtn.style(size="sm") - with gr.Row(): - status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}") - with gr.Accordion("基础功能区", open=True) as area_basic_fn: - with gr.Row(): - for k in functional: - variant = functional[k]["Color"] if "Color" in functional[k] else "secondary" - functional[k]["Button"] = gr.Button(k, variant=variant) - with gr.Accordion("函数插件区", open=True) as area_crazy_fn: - with gr.Row(): - gr.Markdown("注意:以下“红颜色”标识的函数插件需从输入区读取路径作为参数.") - with gr.Row(): - for k in crazy_fns: - if not crazy_fns[k].get("AsButton", True): continue - variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary" - crazy_fns[k]["Button"] = gr.Button(k, variant=variant) - crazy_fns[k]["Button"].style(size="sm") - with gr.Row(): - with gr.Accordion("更多函数插件", open=True): - dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)] - with gr.Column(scale=1): - dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(container=False) - with gr.Column(scale=1): - switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary") - with gr.Row(): - with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up: - file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple") - with gr.Accordion("更换模型 & SysPrompt & 交互界面布局", open=(LAYOUT == "TOP-DOWN")): - system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt) - top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",) - temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",) - max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="Local LLM MaxLength",) - checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区") - md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False) - - gr.Markdown(description) - with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary: - with gr.Row(): - txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(container=False) - with gr.Row(): - submitBtn2 = gr.Button("提交", variant="primary") - with gr.Row(): - resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm") - stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm") - clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn.style(size="sm") - # 功能区显示开关与功能区的互动 - def fn_area_visibility(a): - ret = {} - ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))}) - ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))}) - ret.update({area_input_primary: gr.update(visible=("底部输入区" not in a))}) - ret.update({area_input_secondary: gr.update(visible=("底部输入区" in a))}) - ret.update({clearBtn: gr.update(visible=("输入清除键" in a))}) - ret.update({clearBtn2: gr.update(visible=("输入清除键" in a))}) - if "底部输入区" in a: ret.update({txt: gr.update(value="")}) - return ret - checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, clearBtn, clearBtn2] ) - # 整理反复出现的控件句柄组合 - input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt] - output_combo = [cookies, chatbot, history, status] - predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=input_combo, outputs=output_combo) - # 提交按钮、重置按钮 - cancel_handles.append(txt.submit(**predict_args)) - cancel_handles.append(txt2.submit(**predict_args)) - cancel_handles.append(submitBtn.click(**predict_args)) - cancel_handles.append(submitBtn2.click(**predict_args)) - resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) - resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) - clearBtn.click(lambda: ("",""), None, [txt, txt2]) - clearBtn2.click(lambda: ("",""), None, [txt, txt2]) - # 基础功能区的回调函数注册 - for k in functional: - click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo) - cancel_handles.append(click_handle) - # 文件上传区,接收文件后与chatbot的互动 - file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes], [chatbot, txt, txt2]) - # 函数插件-固定按钮区 - for k in crazy_fns: - if not crazy_fns[k].get("AsButton", True): continue - click_handle = crazy_fns[k]["Button"].click(ArgsGeneralWrapper(crazy_fns[k]["Function"]), [*input_combo, gr.State(PORT)], output_combo) - click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot]) - cancel_handles.append(click_handle) - # 函数插件-下拉菜单与随变按钮的互动 - def on_dropdown_changed(k): - variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary" - return {switchy_bt: gr.update(value=k, variant=variant)} - dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt] ) - # 随变按钮的回调函数注册 - def route(k, *args, **kwargs): - if k in [r"打开插件列表", r"请先从插件列表中选择"]: return - yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs) - click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo) - click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot]) - # def expand_file_area(file_upload, area_file_up): - # if len(file_upload)>0: return {area_file_up: gr.update(open=True)} - # click_handle.then(expand_file_area, [file_upload, area_file_up], [area_file_up]) - cancel_handles.append(click_handle) - # 终止按钮的回调函数注册 - stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles) - stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles) - - # gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数 - def auto_opentab_delay(): - import threading, webbrowser, time - print(f"如果浏览器没有自动打开,请复制并转到以下URL:") - print(f"\t(亮色主题): http://localhost:{PORT}") - print(f"\t(暗色主题): http://localhost:{PORT}/?__dark-theme=true") - def open(): - time.sleep(2) # 打开浏览器 - webbrowser.open_new_tab(f"http://localhost:{PORT}/?__dark-theme=true") - threading.Thread(target=open, name="open-browser", daemon=True).start() - threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start() - threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start() - - auto_opentab_delay() - demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=False, favicon_path="docs/logo.png") - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/spaces/qingxu98/gpt-academic/request_llm/bridge_claude.py b/spaces/qingxu98/gpt-academic/request_llm/bridge_claude.py deleted file mode 100644 index 6084b1f15c9832fd11a36bb58d8187f4e2a7a931..0000000000000000000000000000000000000000 --- a/spaces/qingxu98/gpt-academic/request_llm/bridge_claude.py +++ /dev/null @@ -1,228 +0,0 @@ -# 借鉴了 https://github.com/GaiZhenbiao/ChuanhuChatGPT 项目 - -""" - 该文件中主要包含2个函数 - - 不具备多线程能力的函数: - 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程 - - 具备多线程调用能力的函数 - 2. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程 -""" - -import os -import json -import time -import gradio as gr -import logging -import traceback -import requests -import importlib - -# config_private.py放自己的秘密如API和代理网址 -# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件 -from toolbox import get_conf, update_ui, trimmed_format_exc, ProxyNetworkActivate -proxies, TIMEOUT_SECONDS, MAX_RETRY, ANTHROPIC_API_KEY = \ - get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'ANTHROPIC_API_KEY') - -timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \ - '网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。' - -def get_full_error(chunk, stream_response): - """ - 获取完整的从Openai返回的报错 - """ - while True: - try: - chunk += next(stream_response) - except: - break - return chunk - - -def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False): - """ - 发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。 - inputs: - 是本次问询的输入 - sys_prompt: - 系统静默prompt - llm_kwargs: - chatGPT的内部调优参数 - history: - 是之前的对话列表 - observe_window = None: - 用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗 - """ - from anthropic import Anthropic - watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可 - prompt = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True) - retry = 0 - if len(ANTHROPIC_API_KEY) == 0: - raise RuntimeError("没有设置ANTHROPIC_API_KEY选项") - - while True: - try: - # make a POST request to the API endpoint, stream=False - from .bridge_all import model_info - anthropic = Anthropic(api_key=ANTHROPIC_API_KEY) - # endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] - # with ProxyNetworkActivate() - stream = anthropic.completions.create( - prompt=prompt, - max_tokens_to_sample=4096, # The maximum number of tokens to generate before stopping. - model=llm_kwargs['llm_model'], - stream=True, - temperature = llm_kwargs['temperature'] - ) - break - except Exception as e: - retry += 1 - traceback.print_exc() - if retry > MAX_RETRY: raise TimeoutError - if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……') - result = '' - try: - for completion in stream: - result += completion.completion - if not console_slience: print(completion.completion, end='') - if observe_window is not None: - # 观测窗,把已经获取的数据显示出去 - if len(observe_window) >= 1: observe_window[0] += completion.completion - # 看门狗,如果超过期限没有喂狗,则终止 - if len(observe_window) >= 2: - if (time.time()-observe_window[1]) > watch_dog_patience: - raise RuntimeError("用户取消了程序。") - except Exception as e: - traceback.print_exc() - - return result - - -def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): - """ - 发送至chatGPT,流式获取输出。 - 用于基础的对话功能。 - inputs 是本次问询的输入 - top_p, temperature是chatGPT的内部调优参数 - history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误) - chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容 - additional_fn代表点击的哪个按钮,按钮见functional.py - """ - from anthropic import Anthropic - if len(ANTHROPIC_API_KEY) == 0: - chatbot.append((inputs, "没有设置ANTHROPIC_API_KEY")) - yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面 - return - - if additional_fn is not None: - from core_functional import handle_core_functionality - inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot) - - raw_input = inputs - logging.info(f'[raw_input] {raw_input}') - chatbot.append((inputs, "")) - yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面 - - try: - prompt = generate_payload(inputs, llm_kwargs, history, system_prompt, stream) - except RuntimeError as e: - chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。") - yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面 - return - - history.append(inputs); history.append("") - - retry = 0 - while True: - try: - # make a POST request to the API endpoint, stream=True - from .bridge_all import model_info - anthropic = Anthropic(api_key=ANTHROPIC_API_KEY) - # endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] - # with ProxyNetworkActivate() - stream = anthropic.completions.create( - prompt=prompt, - max_tokens_to_sample=4096, # The maximum number of tokens to generate before stopping. - model=llm_kwargs['llm_model'], - stream=True, - temperature = llm_kwargs['temperature'] - ) - - break - except: - retry += 1 - chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg)) - retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else "" - yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面 - if retry > MAX_RETRY: raise TimeoutError - - gpt_replying_buffer = "" - - for completion in stream: - try: - gpt_replying_buffer = gpt_replying_buffer + completion.completion - history[-1] = gpt_replying_buffer - chatbot[-1] = (history[-2], history[-1]) - yield from update_ui(chatbot=chatbot, history=history, msg='正常') # 刷新界面 - - except Exception as e: - from toolbox import regular_txt_to_markdown - tb_str = '```\n' + trimmed_format_exc() + '```' - chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str}") - yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + tb_str) # 刷新界面 - return - - - - -# https://github.com/jtsang4/claude-to-chatgpt/blob/main/claude_to_chatgpt/adapter.py -def convert_messages_to_prompt(messages): - prompt = "" - role_map = { - "system": "Human", - "user": "Human", - "assistant": "Assistant", - } - for message in messages: - role = message["role"] - content = message["content"] - transformed_role = role_map[role] - prompt += f"\n\n{transformed_role.capitalize()}: {content}" - prompt += "\n\nAssistant: " - return prompt - -def generate_payload(inputs, llm_kwargs, history, system_prompt, stream): - """ - 整合所有信息,选择LLM模型,生成http请求,为发送请求做准备 - """ - from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT - - conversation_cnt = len(history) // 2 - - messages = [{"role": "system", "content": system_prompt}] - if conversation_cnt: - for index in range(0, 2*conversation_cnt, 2): - what_i_have_asked = {} - what_i_have_asked["role"] = "user" - what_i_have_asked["content"] = history[index] - what_gpt_answer = {} - what_gpt_answer["role"] = "assistant" - what_gpt_answer["content"] = history[index+1] - if what_i_have_asked["content"] != "": - if what_gpt_answer["content"] == "": continue - if what_gpt_answer["content"] == timeout_bot_msg: continue - messages.append(what_i_have_asked) - messages.append(what_gpt_answer) - else: - messages[-1]['content'] = what_gpt_answer['content'] - - what_i_ask_now = {} - what_i_ask_now["role"] = "user" - what_i_ask_now["content"] = inputs - messages.append(what_i_ask_now) - prompt = convert_messages_to_prompt(messages) - - return prompt - - diff --git a/spaces/quidiaMuxgu/Expedit-SAM/BCL EasyConverter Desktop 3 Word Version Serial Numberl.md b/spaces/quidiaMuxgu/Expedit-SAM/BCL EasyConverter Desktop 3 Word Version Serial Numberl.md deleted file mode 100644 index c5cbaa49a76145275de55ad87f4639977c8d93f0..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/BCL EasyConverter Desktop 3 Word Version Serial Numberl.md +++ /dev/null @@ -1,7 +0,0 @@ - -

authentic skyrim xbox boot, Launcher Game Mobile, Windows 7 Hack 2015 Offline Rar Installer, 30day 7 Hack for mac windows mac and windows mobile, The Age Of Kings, Android Boot Game, canada ai cara code, cheat engine con pc jogos, world war 3 free hack, Password generator, weapon, jailbreak, das, smash bros brawl, Super Smash Bros S
Launcher Game (Windows Mobile 7) eAi boot.tar.gz, PPSSPP Projects PPSSPP - PSP Emulator - Windows 7 Hack 2015 Offline Rar Installer, Dida xbox boot, Launcher Game Mobile, Android Boot Game, cheatsPPSSPP is a PSP emulator that allows you to play PSP games without install the emulators and Windows OS. The main idea to help these games is packed with support for hardware based profiles. To switch between profiles, please click on profiles, then choose your profile. Most games support profiles for languages, HDD and Memory Card.

-

BCL EasyConverter Desktop 3 Word Version Serial Numberl


Download File » https://geags.com/2uCsgN



-

epass code nike free 3.0 pro review for razer keyboard black w/ lights download
3 CODES - CHEAT MANUAL FOR 1ST PERSON SHOOTER XBOX
HOW TO FIX STUCK SCREEN IN SSF
Alfa XF 3.0 - How to Download Free Web&Desktop Antivirus!
Dope 1.9d: Free For PC/Windows: Download Dope 1.9d with Crack or Serial Number Generator Software!
How To Get Warez 4 Girls And Guys Hack
How To Play CD/DVD/BD Games Using Xbox 360 Games Console and Xbox 360 Cable

-

Activation key
Get the activation code for free
Ad-blocked for free
no money to grab or crack
Get the activation key
no ikon
Get the latest serial number
legit
Get the activation key
Offline activation
no offline activation

899543212b
-
-
\ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Fifa15 Data1 Bin.epub.md b/spaces/quidiaMuxgu/Expedit-SAM/Fifa15 Data1 Bin.epub.md deleted file mode 100644 index f3f337a550bd92ac063b30e36fe56fb97a6bef22..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Fifa15 Data1 Bin.epub.md +++ /dev/null @@ -1,6 +0,0 @@ -

Fifa15 Data1 Bin.epub


DOWNLOADhttps://geags.com/2uCqGx



-
- 4d29de3e1b
-
-
-

diff --git a/spaces/quidiaMuxgu/Expedit-SAM/HACK Foxit PhantomPDF Business 7.3.6.321 Multilingual Incl Patch- TEA HOT.md b/spaces/quidiaMuxgu/Expedit-SAM/HACK Foxit PhantomPDF Business 7.3.6.321 Multilingual Incl Patch- TEA HOT.md deleted file mode 100644 index 3937e99680655256eda06a141a45958dcd869faa..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/HACK Foxit PhantomPDF Business 7.3.6.321 Multilingual Incl Patch- TEA HOT.md +++ /dev/null @@ -1,6 +0,0 @@ -

HACK Foxit PhantomPDF Business 7.3.6.321 Multilingual Incl Patch- TEA


Download Filehttps://geags.com/2uCs7E



- -OMSI 2: Steam Edition Free Download (Incl. omsi 2 addon ... HACK Foxit PhantomPDF Business 7.3.6.321 Multilingual Incl Patch- TEA 1fdad05405
-
-
-

diff --git a/spaces/quidiaMuxgu/Expedit-SAM/HD Online Player (English Vinglish Tamil Movie Downloa).md b/spaces/quidiaMuxgu/Expedit-SAM/HD Online Player (English Vinglish Tamil Movie Downloa).md deleted file mode 100644 index d4ce46b613109f8281b4d8afd6cbb99e9905055d..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/HD Online Player (English Vinglish Tamil Movie Downloa).md +++ /dev/null @@ -1,12 +0,0 @@ -

HD Online Player (English Vinglish Tamil Movie Downloa)


Download Filehttps://geags.com/2uCs8Y



-
-Watch English Vinglish Part-2/6 - Arisara08822 on Dailymotion. . We play on. 22:19. English . English. YouTube. -In this video you will learn how to pronounce English words correctly. -And also how. -English From Scratch Online Video English Lessons For . -English for beginners. -Video lessons . -English from scratch . 8a78ff9644
-
-
-

diff --git a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/globals/globals.py b/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/globals/globals.py deleted file mode 100644 index d0da59d56e8c2e482bcda5eeae7cf797b830560e..0000000000000000000000000000000000000000 --- a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/globals/globals.py +++ /dev/null @@ -1,5 +0,0 @@ -DoFormant: bool = False -Quefrency: float = 8.0 -Timbre: float = 1.2 - -NotesOrHertz: bool = False \ No newline at end of file diff --git a/spaces/radames/Speech-Recognition-Example/README.md b/spaces/radames/Speech-Recognition-Example/README.md deleted file mode 100644 index 5d53e441581ca1403f5c9e6943c64b25494e0b38..0000000000000000000000000000000000000000 --- a/spaces/radames/Speech-Recognition-Example/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Speech Recognition Example -emoji: 🗣 -colorFrom: red -colorTo: pink -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/radames/UserControllableLT-Latent-Transformer/expansion/utils/flowlib.py b/spaces/radames/UserControllableLT-Latent-Transformer/expansion/utils/flowlib.py deleted file mode 100644 index 59096c15da5e529ecb2d85be4881ee467f0c838f..0000000000000000000000000000000000000000 --- a/spaces/radames/UserControllableLT-Latent-Transformer/expansion/utils/flowlib.py +++ /dev/null @@ -1,656 +0,0 @@ -""" -# ============================== -# flowlib.py -# library for optical flow processing -# Author: Ruoteng Li -# Date: 6th Aug 2016 -# ============================== -""" -import png -from . import pfm -import numpy as np -import matplotlib.colors as cl -import matplotlib.pyplot as plt -from PIL import Image -import cv2 -import pdb - - -UNKNOWN_FLOW_THRESH = 1e7 -SMALLFLOW = 0.0 -LARGEFLOW = 1e8 - -""" -============= -Flow Section -============= -""" - -def point_vec(img,flow,skip=16): - #img[:] = 255 - maxsize=256 - extendfac=2. - resize_factor = max(1,int(max(maxsize/img.shape[0], maxsize/img.shape[1]))) - meshgrid = np.meshgrid(range(img.shape[1]),range(img.shape[0])) - dispimg = cv2.resize(img[:,:,::-1].copy(), None,fx=resize_factor,fy=resize_factor) - colorflow = flow_to_image(flow).astype(int) - - for i in range(img.shape[1]): # x - for j in range(img.shape[0]): # y - if flow[j,i,2] != 1: continue - if j%skip!=0 or i%skip!=0: continue - xend = int((meshgrid[0][j,i]+extendfac*flow[j,i,0])*resize_factor) - yend = int((meshgrid[1][j,i]+extendfac*flow[j,i,1])*resize_factor) - leng = np.linalg.norm(flow[j,i,:2]*extendfac) - if leng<3:continue - dispimg = cv2.arrowedLine(dispimg, (meshgrid[0][j,i]*resize_factor,meshgrid[1][j,i]*resize_factor),\ - (xend,yend), - (int(colorflow[j,i,2]),int(colorflow[j,i,1]),int(colorflow[j,i,0])),4,tipLength=2/leng,line_type=cv2.LINE_AA) - return dispimg - - -def show_flow(filename): - """ - visualize optical flow map using matplotlib - :param filename: optical flow file - :return: None - """ - flow = read_flow(filename) - img = flow_to_image(flow) - plt.imshow(img) - plt.show() - - -def visualize_flow(flow, mode='Y'): - """ - this function visualize the input flow - :param flow: input flow in array - :param mode: choose which color mode to visualize the flow (Y: Ccbcr, RGB: RGB color) - :return: None - """ - if mode == 'Y': - # Ccbcr color wheel - img = flow_to_image(flow) - plt.imshow(img) - plt.show() - elif mode == 'RGB': - (h, w) = flow.shape[0:2] - du = flow[:, :, 0] - dv = flow[:, :, 1] - valid = flow[:, :, 2] - max_flow = max(np.max(du), np.max(dv)) - img = np.zeros((h, w, 3), dtype=np.float64) - # angle layer - img[:, :, 0] = np.arctan2(dv, du) / (2 * np.pi) - # magnitude layer, normalized to 1 - img[:, :, 1] = np.sqrt(du * du + dv * dv) * 8 / max_flow - # phase layer - img[:, :, 2] = 8 - img[:, :, 1] - # clip to [0,1] - small_idx = img[:, :, 0:3] < 0 - large_idx = img[:, :, 0:3] > 1 - img[small_idx] = 0 - img[large_idx] = 1 - # convert to rgb - img = cl.hsv_to_rgb(img) - # remove invalid point - import pdb; pdb.set_trace() - img[:, :, 0] = img[:, :, 0] * valid - img[:, :, 1] = img[:, :, 1] * valid - img[:, :, 2] = img[:, :, 2] * valid - # show - plt.imshow(img) - plt.show() - - return None - - -def read_flow(filename): - """ - read optical flow data from flow file - :param filename: name of the flow file - :return: optical flow data in numpy array - """ - if filename.endswith('.flo'): - flow = read_flo_file(filename) - elif filename.endswith('.png'): - flow = read_png_file(filename) - elif filename.endswith('.pfm'): - flow = read_pfm_file(filename) - else: - raise Exception('Invalid flow file format!') - - return flow - - -def write_flow(flow, filename): - """ - write optical flow in Middlebury .flo format - :param flow: optical flow map - :param filename: optical flow file path to be saved - :return: None - """ - f = open(filename, 'wb') - magic = np.array([202021.25], dtype=np.float32) - (height, width) = flow.shape[0:2] - w = np.array([width], dtype=np.int32) - h = np.array([height], dtype=np.int32) - magic.tofile(f) - w.tofile(f) - h.tofile(f) - flow.tofile(f) - f.close() - - -def save_flow_image(flow, image_file): - """ - save flow visualization into image file - :param flow: optical flow data - :param flow_fil - :return: None - """ - flow_img = flow_to_image(flow) - img_out = Image.fromarray(flow_img) - img_out.save(image_file) - - -def flowfile_to_imagefile(flow_file, image_file): - """ - convert flowfile into image file - :param flow: optical flow data - :param flow_fil - :return: None - """ - flow = read_flow(flow_file) - save_flow_image(flow, image_file) - - -def segment_flow(flow): - h = flow.shape[0] - w = flow.shape[1] - u = flow[:, :, 0] - v = flow[:, :, 1] - - idx = ((abs(u) > LARGEFLOW) | (abs(v) > LARGEFLOW)) - idx2 = (abs(u) == SMALLFLOW) - class0 = (v == 0) & (u == 0) - u[idx2] = 0.00001 - tan_value = v / u - - class1 = (tan_value < 1) & (tan_value >= 0) & (u > 0) & (v >= 0) - class2 = (tan_value >= 1) & (u >= 0) & (v >= 0) - class3 = (tan_value < -1) & (u <= 0) & (v >= 0) - class4 = (tan_value < 0) & (tan_value >= -1) & (u < 0) & (v >= 0) - class8 = (tan_value >= -1) & (tan_value < 0) & (u > 0) & (v <= 0) - class7 = (tan_value < -1) & (u >= 0) & (v <= 0) - class6 = (tan_value >= 1) & (u <= 0) & (v <= 0) - class5 = (tan_value >= 0) & (tan_value < 1) & (u < 0) & (v <= 0) - - seg = np.zeros((h, w)) - - seg[class1] = 1 - seg[class2] = 2 - seg[class3] = 3 - seg[class4] = 4 - seg[class5] = 5 - seg[class6] = 6 - seg[class7] = 7 - seg[class8] = 8 - seg[class0] = 0 - seg[idx] = 0 - - return seg - - -def flow_error(tu, tv, u, v): - """ - Calculate average end point error - :param tu: ground-truth horizontal flow map - :param tv: ground-truth vertical flow map - :param u: estimated horizontal flow map - :param v: estimated vertical flow map - :return: End point error of the estimated flow - """ - smallflow = 0.0 - ''' - stu = tu[bord+1:end-bord,bord+1:end-bord] - stv = tv[bord+1:end-bord,bord+1:end-bord] - su = u[bord+1:end-bord,bord+1:end-bord] - sv = v[bord+1:end-bord,bord+1:end-bord] - ''' - stu = tu[:] - stv = tv[:] - su = u[:] - sv = v[:] - - idxUnknow = (abs(stu) > UNKNOWN_FLOW_THRESH) | (abs(stv) > UNKNOWN_FLOW_THRESH) - stu[idxUnknow] = 0 - stv[idxUnknow] = 0 - su[idxUnknow] = 0 - sv[idxUnknow] = 0 - - ind2 = [(np.absolute(stu) > smallflow) | (np.absolute(stv) > smallflow)] - index_su = su[ind2] - index_sv = sv[ind2] - an = 1.0 / np.sqrt(index_su ** 2 + index_sv ** 2 + 1) - un = index_su * an - vn = index_sv * an - - index_stu = stu[ind2] - index_stv = stv[ind2] - tn = 1.0 / np.sqrt(index_stu ** 2 + index_stv ** 2 + 1) - tun = index_stu * tn - tvn = index_stv * tn - - ''' - angle = un * tun + vn * tvn + (an * tn) - index = [angle == 1.0] - angle[index] = 0.999 - ang = np.arccos(angle) - mang = np.mean(ang) - mang = mang * 180 / np.pi - ''' - - epe = np.sqrt((stu - su) ** 2 + (stv - sv) ** 2) - epe = epe[ind2] - mepe = np.mean(epe) - return mepe - - -def flow_to_image(flow): - """ - Convert flow into middlebury color code image - :param flow: optical flow map - :return: optical flow image in middlebury color - """ - u = flow[:, :, 0] - v = flow[:, :, 1] - - maxu = -999. - maxv = -999. - minu = 999. - minv = 999. - - idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH) - u[idxUnknow] = 0 - v[idxUnknow] = 0 - - maxu = max(maxu, np.max(u)) - minu = min(minu, np.min(u)) - - maxv = max(maxv, np.max(v)) - minv = min(minv, np.min(v)) - - rad = np.sqrt(u ** 2 + v ** 2) - maxrad = max(-1, np.max(rad)) - - u = u/(maxrad + np.finfo(float).eps) - v = v/(maxrad + np.finfo(float).eps) - - img = compute_color(u, v) - - idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2) - img[idx] = 0 - - return np.uint8(img) - - -def evaluate_flow_file(gt_file, pred_file): - """ - evaluate the estimated optical flow end point error according to ground truth provided - :param gt_file: ground truth file path - :param pred_file: estimated optical flow file path - :return: end point error, float32 - """ - # Read flow files and calculate the errors - gt_flow = read_flow(gt_file) # ground truth flow - eva_flow = read_flow(pred_file) # predicted flow - # Calculate errors - average_pe = flow_error(gt_flow[:, :, 0], gt_flow[:, :, 1], eva_flow[:, :, 0], eva_flow[:, :, 1]) - return average_pe - - -def evaluate_flow(gt_flow, pred_flow): - """ - gt: ground-truth flow - pred: estimated flow - """ - average_pe = flow_error(gt_flow[:, :, 0], gt_flow[:, :, 1], pred_flow[:, :, 0], pred_flow[:, :, 1]) - return average_pe - - -""" -============== -Disparity Section -============== -""" - - -def read_disp_png(file_name): - """ - Read optical flow from KITTI .png file - :param file_name: name of the flow file - :return: optical flow data in matrix - """ - image_object = png.Reader(filename=file_name) - image_direct = image_object.asDirect() - image_data = list(image_direct[2]) - (w, h) = image_direct[3]['size'] - channel = len(image_data[0]) / w - flow = np.zeros((h, w, channel), dtype=np.uint16) - for i in range(len(image_data)): - for j in range(channel): - flow[i, :, j] = image_data[i][j::channel] - return flow[:, :, 0] / 256 - - -def disp_to_flowfile(disp, filename): - """ - Read KITTI disparity file in png format - :param disp: disparity matrix - :param filename: the flow file name to save - :return: None - """ - f = open(filename, 'wb') - magic = np.array([202021.25], dtype=np.float32) - (height, width) = disp.shape[0:2] - w = np.array([width], dtype=np.int32) - h = np.array([height], dtype=np.int32) - empty_map = np.zeros((height, width), dtype=np.float32) - data = np.dstack((disp, empty_map)) - magic.tofile(f) - w.tofile(f) - h.tofile(f) - data.tofile(f) - f.close() - - -""" -============== -Image Section -============== -""" - - -def read_image(filename): - """ - Read normal image of any format - :param filename: name of the image file - :return: image data in matrix uint8 type - """ - img = Image.open(filename) - im = np.array(img) - return im - -def warp_flow(img, flow): - h, w = flow.shape[:2] - flow = flow.copy().astype(np.float32) - flow[:,:,0] += np.arange(w) - flow[:,:,1] += np.arange(h)[:,np.newaxis] - res = cv2.remap(img, flow, None, cv2.INTER_LINEAR) - return res - -def warp_image(im, flow): - """ - Use optical flow to warp image to the next - :param im: image to warp - :param flow: optical flow - :return: warped image - """ - from scipy import interpolate - image_height = im.shape[0] - image_width = im.shape[1] - flow_height = flow.shape[0] - flow_width = flow.shape[1] - n = image_height * image_width - (iy, ix) = np.mgrid[0:image_height, 0:image_width] - (fy, fx) = np.mgrid[0:flow_height, 0:flow_width] - fx = fx.astype(np.float64) - fy = fy.astype(np.float64) - fx += flow[:,:,0] - fy += flow[:,:,1] - mask = np.logical_or(fx <0 , fx > flow_width) - mask = np.logical_or(mask, fy < 0) - mask = np.logical_or(mask, fy > flow_height) - fx = np.minimum(np.maximum(fx, 0), flow_width) - fy = np.minimum(np.maximum(fy, 0), flow_height) - points = np.concatenate((ix.reshape(n,1), iy.reshape(n,1)), axis=1) - xi = np.concatenate((fx.reshape(n, 1), fy.reshape(n,1)), axis=1) - warp = np.zeros((image_height, image_width, im.shape[2])) - for i in range(im.shape[2]): - channel = im[:, :, i] - plt.imshow(channel, cmap='gray') - values = channel.reshape(n, 1) - new_channel = interpolate.griddata(points, values, xi, method='cubic') - new_channel = np.reshape(new_channel, [flow_height, flow_width]) - new_channel[mask] = 1 - warp[:, :, i] = new_channel.astype(np.uint8) - - return warp.astype(np.uint8) - - -""" -============== -Others -============== -""" - -def pfm_to_flo(pfm_file): - flow_filename = pfm_file[0:pfm_file.find('.pfm')] + '.flo' - (data, scale) = pfm.readPFM(pfm_file) - flow = data[:, :, 0:2] - write_flow(flow, flow_filename) - - -def scale_image(image, new_range): - """ - Linearly scale the image into desired range - :param image: input image - :param new_range: the new range to be aligned - :return: image normalized in new range - """ - min_val = np.min(image).astype(np.float32) - max_val = np.max(image).astype(np.float32) - min_val_new = np.array(min(new_range), dtype=np.float32) - max_val_new = np.array(max(new_range), dtype=np.float32) - scaled_image = (image - min_val) / (max_val - min_val) * (max_val_new - min_val_new) + min_val_new - return scaled_image.astype(np.uint8) - - -def compute_color(u, v): - """ - compute optical flow color map - :param u: optical flow horizontal map - :param v: optical flow vertical map - :return: optical flow in color code - """ - [h, w] = u.shape - img = np.zeros([h, w, 3]) - nanIdx = np.isnan(u) | np.isnan(v) - u[nanIdx] = 0 - v[nanIdx] = 0 - - colorwheel = make_color_wheel() - ncols = np.size(colorwheel, 0) - - rad = np.sqrt(u**2+v**2) - - a = np.arctan2(-v, -u) / np.pi - - fk = (a+1) / 2 * (ncols - 1) + 1 - - k0 = np.floor(fk).astype(int) - - k1 = k0 + 1 - k1[k1 == ncols+1] = 1 - f = fk - k0 - - for i in range(0, np.size(colorwheel,1)): - tmp = colorwheel[:, i] - col0 = tmp[k0-1] / 255 - col1 = tmp[k1-1] / 255 - col = (1-f) * col0 + f * col1 - - idx = rad <= 1 - col[idx] = 1-rad[idx]*(1-col[idx]) - notidx = np.logical_not(idx) - - col[notidx] *= 0.75 - img[:, :, i] = np.uint8(np.floor(255 * col*(1-nanIdx))) - - return img - - -def make_color_wheel(): - """ - Generate color wheel according Middlebury color code - :return: Color wheel - """ - RY = 15 - YG = 6 - GC = 4 - CB = 11 - BM = 13 - MR = 6 - - ncols = RY + YG + GC + CB + BM + MR - - colorwheel = np.zeros([ncols, 3]) - - col = 0 - - # RY - colorwheel[0:RY, 0] = 255 - colorwheel[0:RY, 1] = np.transpose(np.floor(255*np.arange(0, RY) / RY)) - col += RY - - # YG - colorwheel[col:col+YG, 0] = 255 - np.transpose(np.floor(255*np.arange(0, YG) / YG)) - colorwheel[col:col+YG, 1] = 255 - col += YG - - # GC - colorwheel[col:col+GC, 1] = 255 - colorwheel[col:col+GC, 2] = np.transpose(np.floor(255*np.arange(0, GC) / GC)) - col += GC - - # CB - colorwheel[col:col+CB, 1] = 255 - np.transpose(np.floor(255*np.arange(0, CB) / CB)) - colorwheel[col:col+CB, 2] = 255 - col += CB - - # BM - colorwheel[col:col+BM, 2] = 255 - colorwheel[col:col+BM, 0] = np.transpose(np.floor(255*np.arange(0, BM) / BM)) - col += + BM - - # MR - colorwheel[col:col+MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR)) - colorwheel[col:col+MR, 0] = 255 - - return colorwheel - - -def read_flo_file(filename): - """ - Read from Middlebury .flo file - :param flow_file: name of the flow file - :return: optical flow data in matrix - """ - f = open(filename, 'rb') - magic = np.fromfile(f, np.float32, count=1) - data2d = None - - if 202021.25 != magic: - print('Magic number incorrect. Invalid .flo file') - else: - w = np.fromfile(f, np.int32, count=1) - h = np.fromfile(f, np.int32, count=1) - #print("Reading %d x %d flow file in .flo format" % (h, w)) - flow = np.ones((h[0],w[0],3)) - data2d = np.fromfile(f, np.float32, count=2 * w[0] * h[0]) - # reshape data into 3D array (columns, rows, channels) - data2d = np.resize(data2d, (h[0], w[0], 2)) - flow[:,:,:2] = data2d - f.close() - return flow - - -def read_png_file(flow_file): - """ - Read from KITTI .png file - :param flow_file: name of the flow file - :return: optical flow data in matrix - """ - flow = cv2.imread(flow_file,-1)[:,:,::-1].astype(np.float64) - # flow_object = png.Reader(filename=flow_file) - # flow_direct = flow_object.asDirect() - # flow_data = list(flow_direct[2]) - # (w, h) = flow_direct[3]['size'] - # #print("Reading %d x %d flow file in .png format" % (h, w)) - # flow = np.zeros((h, w, 3), dtype=np.float64) - # for i in range(len(flow_data)): - # flow[i, :, 0] = flow_data[i][0::3] - # flow[i, :, 1] = flow_data[i][1::3] - # flow[i, :, 2] = flow_data[i][2::3] - - invalid_idx = (flow[:, :, 2] == 0) - flow[:, :, 0:2] = (flow[:, :, 0:2] - 2 ** 15) / 64.0 - flow[invalid_idx, 0] = 0 - flow[invalid_idx, 1] = 0 - return flow - - -def read_pfm_file(flow_file): - """ - Read from .pfm file - :param flow_file: name of the flow file - :return: optical flow data in matrix - """ - (data, scale) = pfm.readPFM(flow_file) - return data - - -# fast resample layer -def resample(img, sz): - """ - img: flow map to be resampled - sz: new flow map size. Must be [height,weight] - """ - original_image_size = img.shape - in_height = img.shape[0] - in_width = img.shape[1] - out_height = sz[0] - out_width = sz[1] - out_flow = np.zeros((out_height, out_width, 2)) - # find scale - height_scale = float(in_height) / float(out_height) - width_scale = float(in_width) / float(out_width) - - [x,y] = np.meshgrid(range(out_width), range(out_height)) - xx = x * width_scale - yy = y * height_scale - x0 = np.floor(xx).astype(np.int32) - x1 = x0 + 1 - y0 = np.floor(yy).astype(np.int32) - y1 = y0 + 1 - - x0 = np.clip(x0,0,in_width-1) - x1 = np.clip(x1,0,in_width-1) - y0 = np.clip(y0,0,in_height-1) - y1 = np.clip(y1,0,in_height-1) - - Ia = img[y0,x0,:] - Ib = img[y1,x0,:] - Ic = img[y0,x1,:] - Id = img[y1,x1,:] - - wa = (y1-yy) * (x1-xx) - wb = (yy-y0) * (x1-xx) - wc = (y1-yy) * (xx-x0) - wd = (yy-y0) * (xx-x0) - out_flow[:,:,0] = (Ia[:,:,0]*wa + Ib[:,:,0]*wb + Ic[:,:,0]*wc + Id[:,:,0]*wd) * out_width / in_width - out_flow[:,:,1] = (Ia[:,:,1]*wa + Ib[:,:,1]*wb + Ic[:,:,1]*wc + Id[:,:,1]*wd) * out_height / in_height - - return out_flow - diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Digital Signal Processing Sanjit Mitra 4th Edition PDF.58 A Computer-Based Approach to Discrete-Time Systems.md b/spaces/raedeXanto/academic-chatgpt-beta/Digital Signal Processing Sanjit Mitra 4th Edition PDF.58 A Computer-Based Approach to Discrete-Time Systems.md deleted file mode 100644 index a8fb4998ae7307fd5bfc41c7e84f123d1b941e3a..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Digital Signal Processing Sanjit Mitra 4th Edition PDF.58 A Computer-Based Approach to Discrete-Time Systems.md +++ /dev/null @@ -1,128 +0,0 @@ -
-

Digital Signal Processing: A Computer-Based Approach by Sanjit K. Mitra (4th Edition)

-

Digital signal processing (DSP) is a branch of engineering that deals with the analysis, design, and implementation of systems that process signals, such as audio, speech, image, video, radar, and biomedical signals. DSP is essential for many applications in communications, multimedia, entertainment, medicine, security, and science.

-

digital signal processing sanjit mitra 4th edition pdf.58


DOWNLOAD ☆☆☆☆☆ https://tinourl.com/2uKZKG



-

One of the most popular and comprehensive books on DSP is Digital Signal Processing: A Computer-Based Approach by Sanjit K. Mitra, a professor emeritus at the University of California - Santa Barbara. The book, now in its fourth edition, introduces the tools used in the analysis and design of discrete-time systems for DSP. The book is intended for a course on DSP for seniors or first-year graduate students who have a background in calculus, linear algebra, and basic circuit theory.

-

In this article, we will review the main features of the book by Mitra, summarize its contents, and discuss how it can help readers learn and apply DSP.

-

Features of the book

-

The book by Mitra is organized into 13 chapters that cover topics such as discrete-time signals and systems, frequency analysis, sampling and reconstruction, z-transforms, discrete Fourier transforms (DFTs), fast Fourier transforms (FFTs), digital filter design, finite impulse response (FIR) filters, infinite impulse response (IIR) filters, multirate signal processing, adaptive filters, wavelets, and DSP applications.

-

The fourth edition of the book contains several new features that enhance its quality and usefulness. Some of these features are:

-
    -
  • The extensive use of MATLAB-based examples that illustrate how to implement DSP algorithms and techniques using a popular software tool.
  • -
  • A major reorganization of material that improves the logical flow and clarity of presentation. Chapters 2, 3, and 4 have been reorganized into three new chapters that focus on discrete-time signals and systems; linear time-invariant systems; frequency analysis: continuous-time signals; frequency analysis: discrete-time signals; sampling and reconstruction; z-transforms; DFTs; FFTs; digital filter design; FIR filters; IIR filters; multirate signal processing; adaptive filters; wavelets; and DSP applications.
  • -
  • Worked-out examples that explain new and difficult concepts and expose readers to real-life signal processing problems.
  • -
  • The inclusion of new topics such as compressed sensing, sparse signal processing, empirical mode decomposition (EMD), Hilbert-Huang transform (HHT), singular value decomposition (SVD), principal component analysis (PCA), independent component analysis (ICA), linear prediction coding (LPC), vector quantization (VQ), speech coding standards, JPEG2000 image compression standard, biorthogonal wavelets, lifting scheme for wavelet construction.
  • -
  • A CD that accompanies the textbook includes all of the MATLAB programs used in the book as well as additional material such as solutions to selected problems, lecture slides, laboratory experiments.
  • -
-

The benefits of using a computer-based approach for DSP are manifold. A computer-based approach allows readers to:

-
    -
  • Visualize signals and systems in different domains (time, frequency, z-plane) using graphical tools.
  • -
  • Verify theoretical results using numerical simulations.
  • -
  • Experiment with different parameters and scenarios using interactive tools.
  • -
  • Design and test practical DSP systems using real-world data.
  • -
  • Learn how to use a widely used software tool for DSP research and development.
  • -
-

Chapter summaries

-

In this section, we will briefly summarize the main concepts and techniques discussed in each chapter of the book by Mitra. We will also mention some examples of applications and problems solved in each chapter.

-

digital signal processing by sanjit k mitra 4th edition pdf free download
-sanjit mitra digital signal processing 4th edition solutions manual pdf
-digital signal processing sanjit k mitra 4th edition ebook
-digital signal processing a computer based approach 4th edition by sanjit k mitra pdf
-digital signal processing sanjit mitra 4th edition pdf download
-digital signal processing book by sanjit k mitra 4th edition pdf
-digital signal processing a computer based approach by sanjit k mitra 4th edition pdf
-digital signal processing sanjit k mitra 4th edition pdf online
-digital signal processing textbook by sanjit k mitra 4th edition pdf
-digital signal processing principles algorithms and applications by sanjit k mitra 4th edition pdf
-digital signal processing sanjit k mitra 4th edition solution manual pdf
-digital signal processing a computer based approach with matlab by sanjit k mitra 4th edition pdf
-digital signal processing theory and practice by sanjit k mitra 4th edition pdf
-digital signal processing using matlab by sanjit k mitra 4th edition pdf
-digital signal processing lecture notes by sanjit k mitra 4th edition pdf
-digital signal processing examples and applications by sanjit k mitra 4th edition pdf
-digital signal processing fundamentals and techniques by sanjit k mitra 4th edition pdf
-digital signal processing projects using matlab by sanjit k mitra 4th edition pdf
-digital signal processing lab manual by sanjit k mitra 4th edition pdf
-digital signal processing mcq questions and answers by sanjit k mitra 4th edition pdf
-digital signal processing interview questions and answers by sanjit k mitra 4th edition pdf
-digital signal processing objective questions and answers by sanjit k mitra 4th edition pdf
-digital signal processing quiz questions and answers by sanjit k mitra 4th edition pdf
-digital signal processing viva questions and answers by sanjit k mitra 4th edition pdf
-digital signal processing multiple choice questions and answers by sanjit k mitra 4th edition pdf
-digital signal processing exam questions and solutions by sanjit k mitra 4th edition pdf
-digital signal processing assignment questions and solutions by sanjit k mitra 4th edition pdf
-digital signal processing homework questions and solutions by sanjit k mitra 4th edition pdf
-digital signal processing practice questions and solutions by sanjit k mitra 4th edition pdf
-digital signal processing review questions and solutions by sanjit k mitra 4th edition pdf
-digital signal processing test questions and solutions by sanjit k mitra 4th edition pdf
-digital signal processing question bank with answers by sanjit k mitra 4th edition pdf
-digital signal processing question paper with answers by sanjit k mitra 4th edition pdf
-digital signal processing previous year question papers with answers by sanjit k mitra 4th edition pdf
-digital signal processing model question papers with answers by sanjit k mitra 4th edition pdf
-digital signal processing sample question papers with answers by sanjit k mitra 4th edition pdf
-digital signal processing important question papers with answers by sanjit k mitra 4th edition pdf
-digital signal processing solved question papers with answers by sanjit k mitra 4th edition pdf
-digital signal processing old question papers with answers by sanjit k mitra 4th edition pdf
-digital signal processing past question papers with answers by sanjit k mitra 4th edition pdf
-digital signal processing syllabus for ece by sanjit k mitra 4th edition pdf
-digital signal processing notes for ece by sanjit k mitra 4th edition pdf
-digital signal processing reference books for ece by sanjit k mitra 4th edition pdf
-digital signal processing recommended books for ece by sanjit k mitra 4th edition pdf
-digital signal processing best books for ece by sanjit k mitra 4th edition pdf
-digital signal processing course outline for ece by sanjit k mitra 4th edition pdf
-digital signal processing course objectives for ece by sanjit k mitra 4th edition pdf
-digital signal processing course outcomes for ece by sanjit k mitra 4th edition pdf
-digital signal processing course evaluation for ece by sanjit k mitra 4th edition pdf

- - - - - - - - -
ChapterSummaryExamples
1. IntroductionThis chapter introduces DSP concepts such as signals, systems, analog-to-digital conversion, digital-to-analog conversion, quantization, aliasing, and Nyquist sampling theorem.- Sampling a sinusoidal signal
- Quantizing a speech signal
- Aliasing a musical note
2. Discrete-Time Signals and SystemsThis chapter covers discrete-time signals such as unit impulse, unit step, unit ramp, exponential, sinusoidal, complex exponential, and periodic signals. It also covers discrete-time systems such as linear, time-invariant, causal, stable, memoryless, invertible, and recursive systems. It also introduces convolution sum, correlation, and difference equations.- Generating discrete-time signals using MATLAB
- Plotting discrete-time signals using stem function
- Computing convolution sum using MATLAB
- Solving difference equations using MATLAB
- Finding impulse response of a system using MATLAB
3. Linear Time-Invariant SystemsThis chapter discusses properties of linear time-invariant (LTI) systems such as linearity, time-invariance, causality, stability, memorylessness, invertibility, and recursiveness. It also introduces system representation methods such as block diagrams, signal flow graphs, and state-space models. It also covers system analysis methods such as impulse response, frequency response, transfer function, and pole-zero plots.- Finding impulse response of an LTI system using MATLAB
- Finding frequency response of an LTI system using MATLAB
- Finding transfer function of an LTI system using MATLAB
- Plotting pole-zero plots of an LTI system using MATLAB
- Finding state-space model of an LTI system using MATLAB
4. Frequency Analysis: Continuous-Time SignalsThis chapter reviews continuous-time signals such as Fourier series, Fourier transform, Laplace transform, and their properties. It also covers frequency analysis methods such as magnitude spectrum, phase spectrum, power spectrum, energy spectrum, bandwidth, and filtering.- Finding Fourier series coefficients of a periodic signal using MATLAB
- Finding Fourier transform of a non-periodic signal using MATLAB
- Finding Laplace transform of a signal using MATLAB
- Plotting magnitude spectrum and phase spectrum of a signal using MATLAB
- Plotting power spectrum and energy spectrum of a signal using MATLAB
- Designing low-pass filter using MATLAB
5. Frequency Analysis: Discrete-Time SignalsThis chapter covers discrete-time signals such as discrete-time Fourier series (DTFS), discrete-time Fourier transform (DTFT), z-transform, and their properties. It also covers frequency analysis methods such as magnitude spectrum, phase spectrum, power spectrum, energy spectrum, bandwidth, and filtering.- Finding DTFS coefficients of a periodic signal using MATLAB
- Finding DTFT of a non-periodic signal using MATLAB
- Finding z-transform of a signal using MATLAB
- Plotting magnitude spectrum and phase spectrum of a signal using MATLAB
- Plotting power spectrum and energy spectrum of a signal using MATLAB
- Designing high-pass filter using MATLAB
6. Sampling and ReconstructionThis chapter discusses sampling and reconstruction methods such as ideal sampling, practical sampling, ideal reconstruction, practical reconstruction, sampling theorem, aliasing effect, anti-aliasing filter design,- Sampling a continuous-time signal using MATLAB
- Reconstructing a continuous-time signal from its samples using MATLAB
- Demonstrating aliasing effect using MATLAB
- Designing anti-aliasing filter using MATLAB
- Designing interpolation filter using MATLAB
7. Z-TransformsThis chapter covers z-transforms - - - - -
ChapterSummaryExamples
7. Z-TransformsThis chapter covers z-transforms and their properties such as linearity, time shifting, scaling, differentiation, convolution, initial value theorem, final value theorem, and partial fraction expansion. It also covers inverse z-transform methods such as inspection, long division, residue method, and power series expansion.- Finding z-transform of a signal using MATLAB
- Finding inverse z-transform of a function using MATLAB
- Finding properties of z-transform using MATLAB
- Solving difference equations using z-transform and MATLAB
8. Discrete Fourier TransformsThis chapter covers discrete Fourier transforms (DFTs) and their properties such as linearity, symmetry, periodicity, conjugation, time reversal, time shifting, frequency shifting, modulation, convolution, correlation, parsingval's theorem, and circular convolution. It also covers DFT computation methods such as direct method, matrix method, and radix-2 decimation-in-time (DIT) and decimation-in-frequency (DIF) algorithms.- Finding DFT of a signal using MATLAB
- Finding inverse DFT of a function using MATLAB
- Finding properties of DFT using MATLAB
- Computing DFT using matrix method and radix-2 algorithms using MATLAB
- Performing circular convolution using DFT and MATLAB
9. Fast Fourier TransformsThis chapter covers fast Fourier transforms (FFTs) and their advantages over DFTs in terms of computational complexity and speed. It also covers FFT algorithms such as radix-2 DIT and DIF algorithms, radix-4 DIT and DIF algorithms, and mixed-radix algorithms. It also covers FFT applications such as spectrum analysis, filtering, correlation, and convolution.- Computing FFT of a signal using MATLAB
- Computing inverse FFT of a function using MATLAB
- Comparing FFT and DFT in terms of computation time and accuracy using MATLAB
- Performing spectrum analysis using FFT and MATLAB
- Performing filtering using FFT and MATLAB
- Performing correlation and convolution using FFT and MATLAB
10. Digital Filter DesignThis chapter covers digital filter design methods such as window method, frequency sampling method, optimal filter design method, least-squares method, Parks-McClellan algorithm, Butterworth filter design method, Chebyshev filter design method, inverse Chebyshev filter design method, elliptic filter design method, - - - - -
ChapterSummaryExamples
10. Digital Filter DesignThis chapter covers digital filter design methods such as window method, frequency sampling method, optimal filter design method, least-squares method, Parks-McClellan algorithm, Butterworth filter design method, Chebyshev filter design method, inverse Chebyshev filter design method, elliptic filter design method, Bessel filter design method, and impulse invariant transformation method. It also covers digital filter types such as lowpass, highpass, bandpass, bandstop, and allpass filters.- Designing FIR filters using window method and frequency sampling method using MATLAB
- Designing FIR filters using optimal filter design method and Parks-McClellan algorithm using MATLAB
- Designing IIR filters using Butterworth, Chebyshev, inverse Chebyshev, elliptic, and Bessel filter design methods using MATLAB
- Designing IIR filters using impulse invariant transformation method using MATLAB
- Plotting magnitude response and phase response of digital filters using MATLAB
11. Finite Impulse Response FiltersThis chapter covers finite impulse response (FIR) filters and their advantages and disadvantages over infinite impulse response (IIR) filters. It also covers FIR filter structures such as direct form, cascade form, parallel form, linear phase form, and lattice form. It also covers FIR filter implementation issues such as quantization effects, round-off noise, limit cycles, overflow oscillations, and scaling.- Implementing FIR filters using direct form, cascade form, parallel form, linear phase form, and lattice form structures using MATLAB
- Analyzing quantization effects, round-off noise, limit cycles, overflow oscillations, and scaling issues in FIR filter implementation using MATLAB
- Comparing FIR and IIR filters in terms of stability, phase distortion, group delay, and computational complexity using MATLAB
12. Infinite Impulse Response FiltersThis chapter covers infinite impulse response (IIR) filters and their advantages and disadvantages over finite impulse response (FIR) filters. It also covers IIR filter structures such as direct form I, direct form II, cascade form, parallel form, transposed form, and lattice form. It also covers IIR filter implementation issues such as quantization effects, round-off noise, limit cycles, overflow oscillations, and scaling.- Implementing IIR filters using direct form I, direct form II, cascade form, parallel form, transposed form, and lattice form structures using MATLAB
- Analyzing quantization effects, round-off noise, limit cycles, overflow oscillations, and scaling issues in IIR filter implementation using MATLAB
- Comparing IIR and FIR filters in terms of stability, phase distortion, group delay, and computational complexity using MATLAB
13. Multirate Signal ProcessingThis chapter covers multirate signal processing techniques such as decimation, interpolation, polyphase decomposition, multistage implementation, sample rate conversion, filter banks, quadrature mirror filters (QMFs), and wavelet transform. It also covers multirate signal processing applications such as speech coding, - - - - -
ChapterSummaryExamples
13. Multirate Signal ProcessingThis chapter covers multirate signal processing techniques such as decimation, interpolation, polyphase decomposition, multistage implementation, sample rate conversion, filter banks, quadrature mirror filters (QMFs), and wavelet transform. It also covers multirate signal processing applications such as speech coding, image compression, audio enhancement, digital audio broadcasting (DAB), and software-defined radio (SDR).- Performing decimation and interpolation of a signal using MATLAB
- Performing polyphase decomposition and multistage implementation of a filter using MATLAB
- Performing sample rate conversion of a signal using MATLAB
- Implementing filter banks and QMFs using MATLAB
- Implementing wavelet transform and inverse wavelet transform using MATLAB
- Comparing different multirate techniques in terms of computational complexity and performance using MATLAB
14. Adaptive FiltersThis chapter covers adaptive filters and their advantages over fixed filters in dealing with nonstationary signals and unknown system parameters. It also covers adaptive filter algorithms such as least mean square (LMS), normalized LMS (NLMS), recursive least square (RLS), and affine projection algorithm (APA). It also covers adaptive filter applications such as system identification, noise cancellation, channel equalization, and echo cancellation.- Implementing adaptive filters using LMS, NLMS, RLS, and APA algorithms using MATLAB
- Comparing adaptive filter algorithms in terms of convergence rate, stability, complexity, and robustness using MATLAB
- Performing system identification using adaptive filters and MATLAB
- Performing noise cancellation using adaptive filters and MATLAB
- Performing channel equalization using adaptive filters and MATLAB
- Performing echo cancellation using adaptive filters and MATLAB
15. WaveletsThis chapter covers wavelets and their advantages over Fourier transform in analyzing signals with time-varying frequency components. It also covers wavelet theory such as continuous wavelet transform (CWT), discrete wavelet transform (DWT), wavelet basis functions, wavelet families, and multiresolution analysis (MRA). It also covers wavelet applications such as signal denoising, signal compression, signal feature extraction, and signal classification.- Implementing CWT and DWT of a signal using MATLAB
- Plotting wavelet coefficients and scalograms of a signal using MATLAB
- Choosing appropriate wavelet basis functions and wavelet families for different signals using MATLAB
- Performing MRA of a signal using MATLAB
- Performing signal denoising using wavelets and MATLAB
- Performing signal compression using wavelets and MATLAB
- Performing signal feature extraction using wavelets and MATLAB
- Performing signal classification using wavelets and MATLAB
-

Conclusion

-

In this article, we have reviewed the main features of the book Digital Signal Processing: A Computer-Based Approach by Sanjit K. Mitra. The book is a comprehensive and accessible introduction to DSP that covers both theory and practice. The book uses a computer-based approach that emphasizes the use of MATLAB for implementing DSP algorithms and techniques. The book covers topics such as discrete-time signals and systems, frequency analysis, sampling and reconstruction, z-transforms, discrete Fourier transforms, fast Fourier transforms, digital filter design, finite impulse response filters, infinite impulse response filters, multirate signal processing, adaptive filters, and wavelets. The book also provides numerous examples, problems, exercises, projects, and MATLAB programs that illustrate the concepts and applications of DSP.

-

The book is suitable for a course on DSP for seniors or first-year graduate students who have a background in calculus, linear algebra, and basic circuit theory. The book can also be used as a reference for professionals and researchers who want to learn and apply DSP.

-

Frequently Asked Questions

-

Q: What are the prerequisites for reading this book?

-

A: The prerequisites for reading this book are calculus, linear algebra, and basic circuit theory.

-

Q: What are the benefits of using a computer-based approach for DSP?

-

A: The benefits of using a computer-based approach for DSP are that it allows readers to visualize signals and systems in different domains, verify theoretical results using numerical simulations, experiment with different parameters and scenarios using interactive tools, design and test practical DSP systems using real-world data, and learn how to use a widely used software tool for DSP research and development.

-

Q: What are the differences between FIR and IIR filters?

-

A: FIR filters are filters that have finite impulse responses, meaning that their output depends only on a finite number of past input samples. IIR filters are filters that have infinite impulse responses, meaning that their output depends on an infinite number of past input samples. FIR filters are inherently stable and can be designed to have linear phase. IIR filters can be unstable or nonlinear phase but can achieve higher performance with lower order than FIR filters.

-

Q: What are the advantages of wavelets over Fourier transform?

-

A: Wavelets are functions that can represent signals with time-varying frequency components more efficiently than Fourier transform. Wavelets can capture both the frequency content and the temporal localization of a signal. Wavelets can also adapt to different scales or resolutions of a signal.

-

Q: How can I get more information and resources on DSP?

-

A: You can get more information and resources on DSP from the following sources:

-
    -
  • The companion website of the book by Mitra: https://www.mheducation.com/highered/product/digital-signal-processing-computer-based-approach-mitra/M9780078028151.html
  • -
  • The official website of MATLAB: https://www.mathworks.com/products/matlab.html
  • -
  • The online courses on DSP by MIT OpenCourseWare: https://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-341-discrete-time-signal-processing-fall-2005/
  • -
  • The online tutorials on DSP by TutorialsPoint: https://www.tutorialspoint.com/digital_signal_processing/index.htm
  • -
  • The online books on DSP by The Scientist and Engineer's Guide to Digital Signal Processing: http://www.dspguide.com/
  • -
  • The online books on DSP by Understanding Digital Signal Processing: https://www.analog.com/en/education/education-library/scientist_engineers_guide.html
  • -
  • The online books on DSP by Digital Signal Processing - A Practical Approach: https://www.sciencedirect.com/book/9780201596199/digital-signal-processing-a-practical-approach
  • -
  • The online books on DSP by Digital Signal Processing - Principles Algorithms Applications: https://www.sciencedirect.com/book/9780131873742/digital-signal-processing-principles-algorithms-and-applications
  • -
  • The online books on DSP by Digital Signal Processing - A Modern Introduction: https://www.sciencedirect.com/book/9780123740908/digital-signal-processing-a-modern-introduction
  • -
  • The online books on DSP by Digital Signal Processing - A Computer-Based Approach: https://link.springer.com/book/10.1007/978-1-4612-1282-9
  • -
-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Download La Grande Guerra il gioco PC che ti fa comandare eserciti trincee e nuove tecnologie.md b/spaces/raedeXanto/academic-chatgpt-beta/Download La Grande Guerra il gioco PC che ti fa comandare eserciti trincee e nuove tecnologie.md deleted file mode 100644 index f8a00ce7a0bf257ee97d052ce1a64ec935d9fa3f..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Download La Grande Guerra il gioco PC che ti fa comandare eserciti trincee e nuove tecnologie.md +++ /dev/null @@ -1,118 +0,0 @@ -
-

Download La Grande Guerra Gioco PC

-

If you are a fan of strategy games and history, you might want to check out La Grande Guerra, a PC game that lets you relive the events of World War I. In this article, we will tell you what La Grande Guerra is, how to download it, and why you should play it.

-

What is La Grande Guerra?

-

La Grande Guerra is a real-time strategy game developed by Bryo Media and published by Black Bean Games in 2004. The game covers the period from 1914 to 1918, and allows you to choose one of the five major powers involved in the conflict: France, Germany, Italy, Austria-Hungary, or Great Britain. As the commander of your army, you can control infantry, cavalry, artillery, tanks, planes, and ships, and use them to fight on various historical battlefields.

-

download la grande guerra gioco pc


Download Zip 🌟 https://tinourl.com/2uL0sC



-

A real-time strategy game set in World War I

-

La Grande Guerra is not a typical strategy game that focuses on resource management and base building. Instead, it is a realistic simulation of the warfare tactics and technologies of the time. You have to deal with the challenges of trench warfare, gas attacks, air raids, naval battles, and more. You also have to consider the morale and fatigue of your troops, as well as the weather and terrain conditions.

-

A historical simulation of the Great War

-

La Grande Guerra is not only a game, but also a learning tool that teaches you about the history and culture of World War I. The game features authentic maps, units, weapons, uniforms, and sounds that recreate the atmosphere of the era. The game also includes historical events and characters that influence the course of the war. For example, you can witness the assassination of Archduke Franz Ferdinand, the sinking of the Lusitania, or the entry of the United States into the war.

-

A challenging and immersive gameplay experience

-

La Grande Guerra is not a game for casual players who want to win easily. It is a game for hardcore gamers who enjoy a high level of difficulty and realism. The game offers three modes of play: campaign, skirmish, and multiplayer. In campaign mode, you can follow the chronological progression of the war through 20 missions that span different fronts and scenarios. In skirmish mode, you can customize your own battles with various options and settings. In multiplayer mode, you can challenge other players online or on a local network.

-

How to download La Grande Guerra?

-

If you are interested in playing La Grande Guerra, you will need to meet some requirements and follow some steps to download it.

-

Requirements and compatibility

-

La Grande Guerra is an old game that does not require a powerful PC to run. However, it may not be compatible with newer operating systems or devices. Here are the minimum system requirements for the game:

-
    -
  • Windows 98/ME/2000/XP
  • -
  • Pentium III 800 MHz or equivalent processor
  • -
  • 256 MB of RAM
  • -
  • 32 MB DirectX 9 compatible video card
  • -
  • DirectX 9 compatible sound card
  • -
  • 1 GB of free hard disk space
  • -
  • CD-ROM drive
  • -
  • Keyboard and mouse
  • -
-

If you have a newer PC or a Mac, you may need to use an emulator or a compatibility mode to run the game.

-

Sources and links

-

La Grande Guerra is not available on any digital distribution platform like Steam or GOG. The only way to get the game is to buy a physical copy from online retailers or second-hand sellers. You can also try to find a torrent or a direct download link from unofficial sources, but be careful of viruses and malware.

-

Installation and activation

-

To install La Grande Guerra on your PC, you will need to insert the CD-ROM into your drive and follow the instructions on screen. You may need to enter a serial number or a CD key that comes with your copy of the game. You may also need to update your DirectX drivers or install some patches to fix some bugs or improve performance.

-

scaricare la grande guerra gioco per pc gratis
-come installare la grande guerra gioco su pc
-la grande guerra gioco pc download completo
-la grande guerra gioco pc requisiti minimi
-la grande guerra gioco pc trucchi e soluzioni
-la grande guerra gioco pc recensione e opinioni
-la grande guerra gioco pc multiplayer online
-la grande guerra gioco pc versione italiana
-la grande guerra gioco pc crack e serial
-la grande guerra gioco pc torrent magnet link
-dove trovare la grande guerra gioco per pc
-la grande guerra gioco pc gameplay e video
-la grande guerra gioco pc guida e tutorial
-la grande guerra gioco pc patch e aggiornamenti
-la grande guerra gioco pc mod e personalizzazioni
-la grande guerra gioco pc storia e personaggi
-la grande guerra gioco pc ambientazione e grafica
-la grande guerra gioco pc soundtrack e colonna sonora
-la grande guerra gioco pc curiosità e segreti
-la grande guerra gioco pc errori e problemi
-la grande guerra gioco per pc simili e alternativi
-la grande guerra gioco per pc offerte e sconti
-la grande guerra gioco per pc demo e prova gratuita
-la grande guerra gioco per pc compatibilità e ottimizzazione
-la grande guerra gioco per pc supporto e assistenza
-la grande guerra gioco per pc commenti e recensioni
-la grande guerra gioco per pc classificazione e valutazione
-la grande guerra gioco per pc premi e riconoscimenti
-la grande guerra gioco per pc sviluppatori e produttori
-la grande guerra gioco per pc generi e categorie
-la grande guerra gioco per pc piattaforme e dispositivi
-la grande guerra gioco per pc dimensioni e peso
-la grande guerra gioco per pc lingue e sottotitoli
-la grande guerra gioco per pc controlli e tastiera
-la grande guerra gioco per pc livelli e missioni
-la grande guerra gioco per pc armi e equipaggiamento
-la grande guerra gioco per pc nemici e alleati
-la grande guerra gioco per pc sfide e obiettivi
-la grande guerra gioco per pc modalità e opzioni
-la grande guerra gioco per pc trama e finale
-come giocare a la grande guerra su pc
-come scaricare gratis la grande guerra su pc
-come funziona la grande guerra su pc
-come si chiama il protagonista di la grande guerra su pc
-come si fa a vincere a la grande guerra su pc
-come si cambia il livello di difficoltà in la grande guerra su pc
-come si salva il progresso in la grande guerra su pc
-come si attiva il codice di sblocco in la grande guerra su pc
-come si usa il mouse in la grande guerra su pc
-come si configura lo schermo in la grande guerra su pc

-

Why play La Grande Guerra?

-

Now that you know how to download La Grande Guerra, you may wonder why you should play it. Here are some reasons why La Grande Guerra is worth your time and attention.

-

The features and benefits of the game

-

La Grande Guerra has many features and benefits that make it stand out from other strategy games. Some of them are:

-
    -
  • A realistic and detailed representation of World War I
  • -
  • A variety of units, weapons, and vehicles that reflect the technological innovations of the time
  • -
  • A dynamic weather system that affects visibility, movement, and combat
  • -
  • A morale system that influences the behavior and performance of your troops
  • -
  • A historical mode that follows the actual events and outcomes of the war
  • -
  • A sandbox mode that allows you to change history and create your own scenarios
  • -
  • A multiplayer mode that supports up to 8 players online or on LAN
  • -
  • A map editor that lets you create your own maps and missions
  • -
-

The reviews and ratings of the game

-

La Grande Guerra received mostly positive reviews and ratings from critics and players when it was released in 2004. The game was praised for its graphics, sound effects, historical accuracy, gameplay depth, replay value, and educational value. The game was criticized for its steep learning curve, technical issues, lack of documentation, limited AI, and outdated interface. The game has an average score of 7.4 out of 10 on Multiplayer.it, an Italian gaming website.

-

The tips and tricks for the game

-

If you want to enjoy La Grande Guerra to its fullest potential, you may need some tips and tricks to help you master its mechanics and strategies. Here are some useful ones:

  • Read the manual or watch some tutorials before playing. The game does not have a tutorial mode or an in-game help system.
  • Save often. The game does not have an autosave feature or checkpoints.
  • Use pause frequently. The game allows you to pause at any time and issue orders to your units without losing time.
  • Use cover wisely. The game simulates line-of-sight and ballistics physics. Your units can hide behind trenches, buildings, trees, hills, etc., to avoid enemy fire.
  • Use combined arms tactics. The game encourages you to use different types of units together to achieve synergy effects. For example, use infantry with artillery support, use cavalry for flanking maneuvers, use tanks for breaking through enemy lines, use planes for reconnaissance or bombing, use ships for naval supremacy or landing operations.
  • Use diplomacy wisely. The game allows you to negotiate with other factions for alliances or peace treaties. You can also influence their actions by sending spies or propaganda.
  • Use historical mode wisely. The game allows you to switch between historical mode and sandbox mode at any time. In historical mode, you have to follow the actual events and outcomes of the war. In sandbox mode, you can change history and create your own scenarios. You can also mix and match both modes to create alternative histories.
-

Conclusion

-

In conclusion, La Grande Guerra is a great game for strategy and history fans. It is a realistic and immersive simulation of World War I that offers a lot of challenge and replay value. If you want to download La Grande Guerra, you will need to find a physical copy or an unofficial source, and make sure your PC or Mac If you want to play La Grande Guerra, you will need to learn its mechanics and strategies, and enjoy its features and benefits. We hope this article has helped you to understand what La Grande Guerra is, how to download it, and why you should play it. Have fun and good luck in your battles!

-

FAQs

-

Here are some frequently asked questions about La Grande Guerra:

-
    -
  1. Is La Grande Guerra available in English?
  2. -

    Yes, La Grande Guerra has an English version, as well as Italian, French, German, and Spanish versions. You can select your preferred language during the installation process.

    -
  3. Is La Grande Guerra compatible with Windows 10?
  4. -

    La Grande Guerra may not work properly on Windows 10 or other newer operating systems. You may need to use an emulator or a compatibility mode to run the game. You can also try some solutions suggested by other users on online forums or websites.

    -
  5. Is La Grande Guerra based on a true story?
  6. -

    La Grande Guerra is based on the historical events and characters of World War I, but it is not a documentary or a biography. The game takes some artistic liberties and simplifications to create a more engaging and enjoyable gameplay experience. The game does not intend to offend or glorify any nation, faction, or ideology.

    -
  7. Is La Grande Guerra similar to Valiant Hearts: The Great War?
  8. -

    La Grande Guerra and Valiant Hearts: The Great War are both games that deal with the theme of World War I, but they are very different in terms of genre, style, and tone. La Grande Guerra is a real-time strategy game that focuses on the military aspects of the war. Valiant Hearts: The Great War is a puzzle-adventure game that focuses on the human aspects of the war. Both games have their own merits and appeal to different audiences.

    -
  9. Is La Grande Guerra still supported by the developers?
  10. -

    La Grande Guerra is no longer supported by the developers or the publishers. The game was released in 2004 and has not received any updates or patches since then. The official website and forums are no longer active. The game may have some bugs or glitches that cannot be fixed or resolved.

    -
-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Fate stay night anime torrent How to watch the complete saga for free.md b/spaces/raedeXanto/academic-chatgpt-beta/Fate stay night anime torrent How to watch the complete saga for free.md deleted file mode 100644 index 96749b5c5b6ce594d955d5943602690e178841b2..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Fate stay night anime torrent How to watch the complete saga for free.md +++ /dev/null @@ -1,143 +0,0 @@ -
-

Fate Stay Night Anime Torrent: How to Watch the Epic Fantasy Series Online

-

If you are a fan of anime, you have probably heard of Fate Stay Night, one of the most popular and acclaimed fantasy series in the medium. But if you haven't watched it yet, or if you want to rewatch it, you might be wondering how to find it online. In this article, we will tell you everything you need to know about Fate Stay Night, why you should watch it, and how to watch it online using torrents or other methods.

-

Fate stay night anime torrent


Download Zip ►►► https://tinourl.com/2uL4c7



-

What is Fate Stay Night?

-

Fate Stay Night is an anime series based on a visual novel of the same name by Type-Moon, a Japanese game developer. The visual novel was released in 2004 for Windows PCs, and later ported to other platforms such as PlayStation 2, PlayStation Vita, and iOS. The visual novel is divided into three main routes: Fate, Unlimited Blade Works, and Heaven's Feel, each with a different storyline and focus.

-

The Plot of Fate Stay Night

-

The plot of Fate Stay Night revolves around the Holy Grail War, a secret and deadly battle royale that takes place every few decades in Fuyuki City, Japan. The participants of the war are seven magi, or masters, who summon seven heroic spirits, or servants, from various historical and mythical legends. The servants are divided into seven classes: Saber, Archer, Lancer, Rider, Caster, Assassin, and Berserker. The master-servant pairs must fight each other until only one pair remains, who will then be granted the Holy Grail, a powerful artifact that can grant any wish.

-

The main protagonist of the series is Shirou Emiya, a high school student who accidentally witnesses a battle between two servants and gets involved in the war. He is saved by Saber, a servant who claims to be the legendary King Arthur in female form. Shirou becomes Saber's master and vows to protect her and stop the war. Along the way, he meets other masters and servants, some of whom become his allies or enemies. He also learns more about his own past and destiny as a magus.

-

The Characters of Fate Stay Night

-

The characters of Fate Stay Night are one of the main attractions of the series. They are diverse, complex, and memorable, each with their own personality, motivation, and backstory. Some of the most notable characters are:

-

Fate stay night unlimited blade works torrent
-Fate stay night heaven's feel torrent
-Fate stay night 2006 anime torrent
-Fate stay night english dub torrent
-Fate stay night original visual novel torrent
-Fate stay night realta nua torrent
-Fate stay night movie torrent
-Fate stay night ubw movie torrent
-Fate stay night ost torrent
-Fate stay night manga torrent
-Fate stay night zero torrent
-Fate stay night pc game torrent
-Fate stay night ps2 game torrent
-Fate stay night psp game torrent
-Fate stay night vn torrent
-Fate stay night anime download torrent
-Fate stay night anime series torrent
-Fate stay night anime complete torrent
-Fate stay night anime hd torrent
-Fate stay night anime bluray torrent
-Fate stay night anime 1080p torrent
-Fate stay night anime 720p torrent
-Fate stay night anime subbed torrent
-Fate stay night anime english subtitles torrent
-Fate stay night anime fansub torrent
-Fate stay night anime batch torrent
-Fate stay night anime all episodes torrent
-Fate stay night anime opening song torrent
-Fate stay night anime ending song torrent
-Fate stay night anime soundtrack torrent
-Fate stay night anime ost download torrent
-Fate stay night anime ost flac torrent
-Fate stay night anime ost mp3 torrent
-Fate stay night anime artbook torrent
-Fate stay night anime wallpaper torrent
-Fate stay night anime poster torrent
-Fate stay night anime figure torrent
-Fate stay night anime merchandise torrent
-Fate stay night anime review torrent
-Fate stay night anime rating torrent
-Fate stay night anime trivia torrent
-Fate stay night anime quotes torrent
-Fate stay night anime memes torrent
-Fate stay night anime gifs torrent
-Fate stay night anime crossover torrent
-Fate stay night anime spin off torrent
-Fate stay night anime ova torrent
-Fate stay night anime special episode torrent
-Fate stay night anime director's cut torrent
-Fate stay night anime commentary track torrent

-
    -
  • Saber: The servant of Shirou Emiya. She is a noble and loyal knight who values honor and justice. She has a mysterious past that she cannot remember. She wields Excalibur, the sword of kings.
  • -
  • Archer: The servant of Rin Tohsaka, a talented and wealthy magus who is Shirou's classmate and rival. He is a cynical and sarcastic archer who has no memory of his true identity. He uses various weapons and magic to fight.
  • -
  • Rin Tohsaka: The master of Archer. She is a proud and intelligent magus who comes from a prestigious family of magi. She is also kind and compassionate at heart. She has a crush on Shirou but tries to hide it.
  • -
  • Sakura Matou: Shirou's childhood friend and neighbor. She is a sweet and gentle girl who loves Shirou dearly. She is also a magus who belongs to the Matou family, a clan that practices dark and cruel magic.
  • -
  • Illyasviel von Einzbern: A young girl who is the master of Berserker, a monstrous servant who can destroy anything in his path. She is playful and innocent but also ruthless and sadistic. She has a connection to Shirou's past.
  • -
  • Gilgamesh: The servant of Kirei Kotomine, a priest who acts as the mediator of the Holy Grail War. He is the king of heroes who possesses countless treasures and weapons. He is arrogant and selfish but also charismatic and charismatic.
  • -
  • Kirei Kotomine: The master of Gilgamesh. He is a cold and manipulative man who enjoys watching others suffer. He has a twisted interest in Shirou and his ideals.
  • -
-

The Different Routes and Adaptations of Fate Stay Night

-

As mentioned earlier, Fate Stay Night has three main routes that diverge depending on the choices made by the player in the visual novel. Each route has its own theme, tone, and focus on different characters and aspects of the story.

-
    -
  • Fate: The first route that introduces the basic premise and characters of the series. It focuses on Shirou's relationship with Saber and his idealism as a hero.
  • -
  • Unlimited Blade Works: The second route that explores Shirou's conflict with Archer and his own identity as a magus. It focuses on Rin's role as Shirou's ally and love interest.
  • -
  • Heaven's Feel: The third route that reveals the dark secrets behind the Holy Grail War and its consequences. It focuses on Sakura's plight as Shirou's friend and lover.
  • -
-

Fate Stay Night has been adapted into various media forms over the years. Some of the most notable adaptations are:

-
    -
  • Fate/stay night (2006): An anime series by Studio Deen that covers the Fate route with some elements from other routes.
  • -
  • Fate/stay night: Unlimited Blade Works (2010): A movie by Studio Deen that covers the Unlimited Blade Works route.
  • -
  • Fate/stay night: Unlimited Blade Works (2014-2015): An anime series by Ufotable that covers the Unlimited Blade Works route with more detail and fidelity.
  • -
  • Fate/stay night: Heaven's Feel (2017-2020): A trilogy of movies by Ufotable that covers the Heaven's Feel route with stunning animation quality.
  • -
-

Why You Should Watch Fate Stay Night

-

If you are still not convinced that Fate Stay Night is worth your time, here are some reasons why you should watch it:

-

The Amazing Animation and Soundtrack

-

The Complex and Engaging Storyline

-

Fate Stay Night has a rich and intricate storyline that spans multiple timelines, perspectives, and themes. The series explores various philosophical and moral questions, such as the meaning of justice, the value of life, the nature of fate, and the cost of a wish. The series also has many twists and surprises that will keep you on the edge of your seat.

-

The Diverse and Lovable Cast of Characters

-

Fate Stay Night has a large and varied cast of characters that you will grow to love or hate. Each character has their own personality, motivation, and backstory that makes them unique and relatable. You will also see how they develop and change throughout the series, depending on their choices and circumstances. You will also enjoy the interactions and relationships between the characters, whether they are friendly, romantic, or antagonistic.

-

How to Watch Fate Stay Night Online

-

Now that you know what Fate Stay Night is and why you should watch it, you might be wondering how to watch it online. There are several ways to do so, depending on your preference and availability.

-

The Official Streaming Platforms

-

The easiest and safest way to watch Fate Stay Night online is to use the official streaming platforms that have licensed the series. Some of the most popular platforms are:

-
    -
  • Netflix: Netflix has all the Fate Stay Night adaptations available for streaming in most regions. You can watch them with subtitles or dubbing in various languages. You will need a Netflix subscription to access them.
  • -
  • Crunchyroll: Crunchyroll has the Fate/stay night: Unlimited Blade Works (2014-2015) and Fate/stay night: Heaven's Feel (2017-2020) adaptations available for streaming in most regions. You can watch them with subtitles in various languages. You can watch them for free with ads or with a Crunchyroll subscription without ads.
  • -
  • Funimation: Funimation has the Fate/stay night: Unlimited Blade Works (2014-2015) adaptation available for streaming in some regions. You can watch it with subtitles or dubbing in English. You can watch it for free with ads or with a Funimation subscription without ads.
  • -
-

The Alternative Streaming Options

-

If you cannot access the official streaming platforms or if you prefer other options, you can also use some alternative streaming options to watch Fate Stay Night online. Some of the most common options are:

-
    -
  • Unofficial Streaming Sites: There are many unofficial streaming sites that host anime content without permission from the licensors. These sites often have low-quality video and audio, intrusive ads, malware, and pop-ups. They also violate the intellectual property rights of the creators and distributors of the series. We do not recommend using these sites as they are illegal and unsafe.
  • -
  • VPN Services: VPN stands for Virtual Private Network, a service that allows you to change your IP address and location to access geo-restricted content online. You can use a VPN service to access the official streaming platforms that are not available in your region. However, you will still need a subscription to those platforms to watch Fate Stay Night online. You will also need to pay for a reliable and secure VPN service that does not compromise your privacy or speed.
  • -
  • Torrents: Torrents are files that contain data that can be downloaded from peer-to-peer networks using torrent clients. You can use torrents to download Fate Stay Night episodes or movies from other users who have them on their devices. However, you will need a torrent client software and a torrent file or magnet link to do so. You will also need to be careful of fake or malicious torrents that can harm your device or expose your identity. You will also violate the intellectual property rights of the creators and distributors of the series by using torrents.
  • -
-

The Risks and Benefits of Using Torrents

-

As you can see, torrents are one of the most popular and controversial ways to watch Fate Stay Night online. There are some risks and benefits of using torrents that you should be aware of before deciding to use them.

-

Some of the benefits of using torrents are:

-
    -
  • You can watch Fate Stay Night online for free without paying for any subscription or service.
  • -
  • You can watch Fate Stay Night online in high-quality video and audio without any buffering or lagging.
  • -
  • You can watch Fate Stay Night online at any time and place without any restrictions or limitations.
  • -
-

Some of the risks of using torrents are:

-
    -the licensors. You can face fines, lawsuits, or even jail time depending on the laws of your country and the severity of the infringement. -
  • You can harm your device or compromise your privacy by downloading or sharing fake or malicious torrents that contain viruses, malware, spyware, or trackers. You can lose your data, damage your system, or expose your identity to hackers or authorities.
  • -
  • You can have a poor or inconsistent viewing experience by using torrents that have low quality, incomplete, or corrupted files. You can also face slow download speeds, low seeders, or dead links that prevent you from watching Fate Stay Night online.
  • -
-

Conclusion

-

Summary of the Main Points

-

In conclusion, Fate Stay Night is an epic fantasy anime series that you should watch if you are a fan of anime or not. It has amazing animation and soundtrack, complex and engaging storyline, and diverse and lovable cast of characters. It has three main routes that offer different perspectives and outcomes of the story: Fate, Unlimited Blade Works, and Heaven's Feel. It has been adapted into various media forms over the years, such as anime series, movies, games, and spin-offs.

-

You can watch Fate Stay Night online using various methods, depending on your preference and availability. You can use the official streaming platforms that have licensed the series, such as Netflix, Crunchyroll, and Funimation. You can also use alternative streaming options that are not authorized by the licensors, such as unofficial streaming sites, VPN services, and torrents. However, you should be aware of the risks and benefits of using these options before deciding to use them.

-

Call to Action

-

Now that you have learned everything you need to know about Fate Stay Night and how to watch it online, what are you waiting for? Grab your popcorn and snacks, sit back and relax, and enjoy the show! You will not regret it!

-

Frequently Asked Questions

-

Here are some of the most frequently asked questions about Fate Stay Night and how to watch it online:

-
    -
  1. What is the best order to watch Fate Stay Night?
  2. -

    There is no definitive answer to this question as different fans have different preferences and opinions. However, a general recommendation is to watch the series in the order of release or adaptation: Fate/stay night (2006), Fate/stay night: Unlimited Blade Works (2010), Fate/stay night: Unlimited Blade Works (2014-2015), Fate/stay night: Heaven's Feel (2017-2020). This way, you can follow the story chronologically and avoid spoilers from later adaptations.

    -
  3. Is Fate Stay Night related to Fate/Zero?
  4. -

    Fate/Zero is a prequel to Fate Stay Night that tells the story of the previous Holy Grail War that took place 10 years before the events of Fate Stay Night. It features some of the same characters as well as new ones. It is also based on a light novel series by Gen Urobuchi, a different writer from Kinoko Nasu who wrote Fate Stay Night. It is recommended to watch Fate/Zero after watching Fate Stay Night as it contains spoilers and references to the latter.

    -
  5. Is Fate Stay Night suitable for children?
  6. -

    Fate Stay Night is not suitable for children as it contains graphic violence, bloodshed, gore, death, sexual content, nudity, profanity, and dark themes. It is rated R-17+ (violence & profanity) by MAL and TV-MA by Netflix. It is intended for mature audiences who can handle its mature content.

    -
  7. How many episodes are there in Fate Stay Night?
  8. -

    The number of episodes in Fate Stay Night depends on which adaptation you are watching. The 2006 anime series by Studio Deen has 24 episodes. The 2010 movie by Studio Deen has 1 episode. The 2014-2015 anime series by Ufotable has 26 episodes. The 2017-2020 movie trilogy by Ufotable has 3 episodes.

    -
  9. Where can I read the visual novel of Fate Stay Night?
  10. -PlayStation Vita, or iOS devices using official ports or fan translations. You can also read it online using unofficial websites or apps. However, you should be careful of the legality and safety of these sources before using them.

    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/rahul999r/Rahul_Kannada_TTS/scripts/inference/infer.sh b/spaces/rahul999r/Rahul_Kannada_TTS/scripts/inference/infer.sh deleted file mode 100644 index dec70e1f30fb80f6957f4f3382b4c0963827cf43..0000000000000000000000000000000000000000 --- a/spaces/rahul999r/Rahul_Kannada_TTS/scripts/inference/infer.sh +++ /dev/null @@ -1,15 +0,0 @@ -gender='male' -glowdir='../../checkpoints/glow/'$gender'/' -hifidir='../../checkpoints/hifi/'$gender'/' -device='cpu' -text='testing this one' - - -timestamp=$(date +%s) -wav='../../results/'$gender'/' -wav_file=$wav/$timestamp'.wav' - - -mkdir -p $wav -python ../../utils/inference/tts.py -a $glowdir -v $hifidir -d $device -t "$text" -w $wav_file -echo "File saved at: "$wav_file diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/README.md b/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/README.md deleted file mode 100644 index 38c7b0c1de367b7fbe001e13041db0a3b34bb158..0000000000000000000000000000000000000000 --- a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# Installation -> `npm install --save @types/node` - -# Summary -This package contains type definitions for Node.js (https://nodejs.org/). - -# Details -Files were exported from https://github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/node. - -### Additional Details - * Last updated: Mon, 26 Dec 2022 17:32:37 GMT - * Dependencies: none - * Global values: `AbortController`, `AbortSignal`, `__dirname`, `__filename`, `console`, `exports`, `gc`, `global`, `module`, `process`, `require`, `structuredClone` - -# Credits -These definitions were written by [Microsoft TypeScript](https://github.com/Microsoft), [DefinitelyTyped](https://github.com/DefinitelyTyped), [Alberto Schiabel](https://github.com/jkomyno), [Alvis HT Tang](https://github.com/alvis), [Andrew Makarov](https://github.com/r3nya), [Benjamin Toueg](https://github.com/btoueg), [Chigozirim C.](https://github.com/smac89), [David Junger](https://github.com/touffy), [Deividas Bakanas](https://github.com/DeividasBakanas), [Eugene Y. Q. Shen](https://github.com/eyqs), [Hannes Magnusson](https://github.com/Hannes-Magnusson-CK), [Huw](https://github.com/hoo29), [Kelvin Jin](https://github.com/kjin), [Klaus Meinhardt](https://github.com/ajafff), [Lishude](https://github.com/islishude), [Mariusz Wiktorczyk](https://github.com/mwiktorczyk), [Mohsen Azimi](https://github.com/mohsen1), [Nicolas Even](https://github.com/n-e), [Nikita Galkin](https://github.com/galkin), [Parambir Singh](https://github.com/parambirs), [Sebastian Silbermann](https://github.com/eps1lon), [Simon Schick](https://github.com/SimonSchick), [Thomas den Hollander](https://github.com/ThomasdenH), [Wilco Bakker](https://github.com/WilcoBakker), [wwwy3y3](https://github.com/wwwy3y3), [Samuel Ainsworth](https://github.com/samuela), [Kyle Uehlein](https://github.com/kuehlein), [Thanik Bhongbhibhat](https://github.com/bhongy), [Marcin Kopacz](https://github.com/chyzwar), [Trivikram Kamat](https://github.com/trivikr), [Junxiao Shi](https://github.com/yoursunny), [Ilia Baryshnikov](https://github.com/qwelias), [ExE Boss](https://github.com/ExE-Boss), [Piotr Błażejewicz](https://github.com/peterblazejewicz), [Anna Henningsen](https://github.com/addaleax), [Victor Perin](https://github.com/victorperin), [Yongsheng Zhang](https://github.com/ZYSzys), [NodeJS Contributors](https://github.com/NodeJS), [Linus Unnebäck](https://github.com/LinusU), [wafuwafu13](https://github.com/wafuwafu13), [Matteo Collina](https://github.com/mcollina), and [Dmitry Semigradsky](https://github.com/Semigradsky). diff --git a/spaces/realchenyuy/llama2-playground/app.py b/spaces/realchenyuy/llama2-playground/app.py deleted file mode 100644 index 8145f8b77f7bb05d9bcf958da623632e19d29f48..0000000000000000000000000000000000000000 --- a/spaces/realchenyuy/llama2-playground/app.py +++ /dev/null @@ -1,13 +0,0 @@ -import gradio as gr -from transformers import pipeline - -pipeline = pipeline(task="text-generation", model="meta-llama/Llama-2-7b") - -def predict(input): - return pipeline("I can't believe you did such a ", do_sample=False) - -gr.Interface( - fn=predict, - inputs=gr.Textbox(lines=2, placeholder="Text here..."), - outputs="text", -).launch() diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Downloadcrack !EXCLUSIVE!autocad2013mac.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Downloadcrack !EXCLUSIVE!autocad2013mac.md deleted file mode 100644 index abdbe78aa002208f53f916d95994b66081081b98..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Downloadcrack !EXCLUSIVE!autocad2013mac.md +++ /dev/null @@ -1,61 +0,0 @@ -
    -

    How to Download and Install AutoCAD 2013 for Mac

    -

    If you are looking for a way to download and install AutoCAD 2013 for Mac, you have come to the right place. AutoCAD 2013 is a powerful and versatile software for 2D and 3D design, drafting, modeling, and visualization. Whether you are an architect, engineer, designer, or hobbyist, AutoCAD 2013 can help you turn your ideas into reality.

    -

    downloadcrackautocad2013mac


    Download »»» https://urlgoal.com/2uCJ6P



    -

    In this article, we will show you how to download and install AutoCAD 2013 for Mac in a few simple steps. You will need an Autodesk account and a valid serial number and product key to complete the installation. If you don't have an Autodesk account, you can create one for free at https://www.autodesk.com/account.

    -

    Step 1: Download the installer for Mac

    -

    The first step is to download the installer for Mac from the Autodesk website. To do this, follow these steps:

    -
      -
    1. Go to https://www.autodesk.com/account and sign in with your Autodesk account credentials.
    2. -
    3. Under "All Products and Services", find AutoCAD 2013 and click "View Downloads".
    4. -
    5. Select the version (Mac OS X) and the language of your choice.
    6. -
    7. Click "Download Now" and save the file (with .dmg extension) to your computer.
    8. -
    -

    Step 2: Extract and run the installer

    -

    The next step is to extract and run the installer from the downloaded file. To do this, follow these steps:

    -
      -
    1. Double-click the .dmg file to open it.
    2. -
    3. The program will extract and the installer will run automatically.
    4. -
    5. Select "Install" and follow the prompts on the screen.
    6. -
    -

    Step 3: Enter the serial number and product key

    -

    The final step is to enter the serial number and product key for AutoCAD 2013. To do this, follow these steps:

    -
      -
    1. When prompted, enter the serial number and product key that you received when you purchased AutoCAD 2013. You can also find them in your Autodesk account under "All Products and Services".
    2. -
    3. Select the license type (network or standalone, depending on the serial number you purchased).
    4. -
    5. Click "Continue" and complete the installation process.
    6. -
    -

    Congratulations! You have successfully installed AutoCAD 2013 for Mac.

    -

    You can now launch AutoCAD 2013 from your Applications folder or Dock and start creating amazing designs. We hope you enjoy using AutoCAD 2013 for Mac. For more information and support, please visit https://www.autodesk.com/support.

    - -

    What is AutoCAD 2013 for Mac?

    -

    AutoCAD 2013 for Mac is a version of AutoCAD that is specially designed for Mac users. It has the same features and functionality as AutoCAD 2013 for Windows, but with a native Mac interface and compatibility. AutoCAD 2013 for Mac allows you to create and edit 2D drawings and 3D models, add annotations and dimensions, apply materials and textures, render and print your designs, and much more.

    -

    -

    AutoCAD 2013 for Mac also supports the latest Mac OS X features, such as Full Screen mode, Multi-Touch gestures, Mission Control, and Retina display. You can also use AutoCAD 2013 for Mac with other Autodesk products, such as AutoCAD LT, AutoCAD Architecture, AutoCAD Civil 3D, and AutoCAD Map 3D.

    -

    What are the system requirements for AutoCAD 2013 for Mac?

    -

    To run AutoCAD 2013 for Mac smoothly, you will need a Mac computer that meets the following minimum system requirements:

    -
      -
    • Operating system: Mac OS X v10.7.4 or later (Lion), Mac OS X v10.8 or later (Mountain Lion)
    • -
    • Processor: Intel Core 2 Duo or greater
    • -
    • Memory: 3 GB of RAM (4 GB recommended)
    • -
    • Hard disk space: 2.5 GB free disk space for download and installation
    • -
    • Display: 1280 x 800 display with true color (2880 x 1800 with Retina display recommended)
    • -
    • Graphics card: 256 MB VRAM with OpenGL 2.0 support (512 MB recommended)
    • -
    • Mouse: Apple Mouse, Apple Magic Mouse, Magic Trackpad, MacBook Pro trackpad, or Microsoft-compliant mouse
    • -
    • Browser: Apple Safari 5.0 or later
    • -
    -

    How to update AutoCAD 2013 for Mac?

    -

    To keep your AutoCAD 2013 for Mac up to date and secure, you should install the latest service packs and hotfixes that are available from Autodesk. These updates can fix bugs, improve performance, and add new features to your software. To download and install the updates, follow these steps:

    -
      -
    1. Go to https://www.autodesk.com/support and select "Downloads".
    2. -
    3. Select "AutoCAD" from the product list and choose "Mac OS X" from the platform list.
    4. -
    5. Select "AutoCAD 2013" from the version list and click "View All".
    6. -
    7. Find the latest service pack or hotfix that matches your software version and click "Download".
    8. -
    9. Save the file (with .dmg extension) to your computer.
    10. -
    11. Double-click the .dmg file to open it.
    12. -
    13. The program will extract and the installer will run automatically.
    14. -
    15. Select "Install" and follow the prompts on the screen.
    16. -
    -

    You have now updated your AutoCAD 2013 for Mac to the latest version.

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/riccorl/relik-entity-linking/relik/inference/serve/backend/retriever.py b/spaces/riccorl/relik-entity-linking/relik/inference/serve/backend/retriever.py deleted file mode 100644 index e796893e76b83377a5f8b2c7afdccce21756dcbd..0000000000000000000000000000000000000000 --- a/spaces/riccorl/relik-entity-linking/relik/inference/serve/backend/retriever.py +++ /dev/null @@ -1,206 +0,0 @@ -import logging -from pathlib import Path -from typing import List, Optional, Union - -from relik.common.utils import is_package_available - -if not is_package_available("fastapi"): - raise ImportError( - "FastAPI is not installed. Please install FastAPI with `pip install relik[serve]`." - ) -from fastapi import FastAPI, HTTPException - -if not is_package_available("ray"): - raise ImportError( - "Ray is not installed. Please install Ray with `pip install relik[serve]`." - ) -from ray import serve - -from relik.common.log import get_logger -from relik.inference.data.tokenizers import SpacyTokenizer, WhitespaceTokenizer -from relik.inference.data.window.manager import WindowManager -from relik.inference.serve.backend.utils import ( - RayParameterManager, - ServerParameterManager, -) -from relik.retriever.data.utils import batch_generator -from relik.retriever.pytorch_modules import GoldenRetriever - -logger = get_logger(__name__, level=logging.INFO) - -VERSION = {} # type: ignore -with open(Path(__file__).parent.parent.parent / "version.py", "r") as version_file: - exec(version_file.read(), VERSION) - -# Env variables for server -SERVER_MANAGER = ServerParameterManager() -RAY_MANAGER = RayParameterManager() - -app = FastAPI( - title="Golden Retriever", - version=VERSION["VERSION"], - description="Golden Retriever REST API", -) - - -@serve.deployment( - ray_actor_options={ - "num_gpus": RAY_MANAGER.num_gpus if SERVER_MANAGER.device == "cuda" else 0 - }, - autoscaling_config={ - "min_replicas": RAY_MANAGER.min_replicas, - "max_replicas": RAY_MANAGER.max_replicas, - }, -) -@serve.ingress(app) -class GoldenRetrieverServer: - def __init__( - self, - question_encoder: str, - document_index: str, - passage_encoder: Optional[str] = None, - top_k: int = 100, - device: str = "cpu", - index_device: Optional[str] = None, - precision: int = 32, - index_precision: Optional[int] = None, - use_faiss: bool = False, - window_batch_size: int = 32, - window_size: int = 32, - window_stride: int = 16, - split_on_spaces: bool = False, - ): - # parameters - self.question_encoder = question_encoder - self.passage_encoder = passage_encoder - self.document_index = document_index - self.top_k = top_k - self.device = device - self.index_device = index_device or device - self.precision = precision - self.index_precision = index_precision or precision - self.use_faiss = use_faiss - self.window_batch_size = window_batch_size - self.window_size = window_size - self.window_stride = window_stride - self.split_on_spaces = split_on_spaces - - # log stuff for debugging - logger.info("Initializing GoldenRetrieverServer with parameters:") - logger.info(f"QUESTION_ENCODER: {self.question_encoder}") - logger.info(f"PASSAGE_ENCODER: {self.passage_encoder}") - logger.info(f"DOCUMENT_INDEX: {self.document_index}") - logger.info(f"TOP_K: {self.top_k}") - logger.info(f"DEVICE: {self.device}") - logger.info(f"INDEX_DEVICE: {self.index_device}") - logger.info(f"PRECISION: {self.precision}") - logger.info(f"INDEX_PRECISION: {self.index_precision}") - logger.info(f"WINDOW_BATCH_SIZE: {self.window_batch_size}") - logger.info(f"SPLIT_ON_SPACES: {self.split_on_spaces}") - - self.retriever = GoldenRetriever( - question_encoder=self.question_encoder, - passage_encoder=self.passage_encoder, - document_index=self.document_index, - device=self.device, - index_device=self.index_device, - index_precision=self.index_precision, - ) - self.retriever.eval() - - if self.split_on_spaces: - logger.info("Using WhitespaceTokenizer") - self.tokenizer = WhitespaceTokenizer() - # logger.info("Using RegexTokenizer") - # self.tokenizer = RegexTokenizer() - else: - logger.info("Using SpacyTokenizer") - self.tokenizer = SpacyTokenizer(language="en") - - self.window_manager = WindowManager(tokenizer=self.tokenizer) - - # @serve.batch() - async def handle_batch( - self, documents: List[str], document_topics: List[str] - ) -> List: - return self.retriever.retrieve( - documents, text_pair=document_topics, k=self.top_k, precision=self.precision - ) - - @app.post("/api/retrieve") - async def retrieve_endpoint( - self, - documents: Union[str, List[str]], - document_topics: Optional[Union[str, List[str]]] = None, - ): - try: - # normalize input - if isinstance(documents, str): - documents = [documents] - if document_topics is not None: - if isinstance(document_topics, str): - document_topics = [document_topics] - assert len(documents) == len(document_topics) - # get predictions - return await self.handle_batch(documents, document_topics) - except Exception as e: - # log the entire stack trace - logger.exception(e) - raise HTTPException(status_code=500, detail=f"Server Error: {e}") - - @app.post("/api/gerbil") - async def gerbil_endpoint(self, documents: Union[str, List[str]]): - try: - # normalize input - if isinstance(documents, str): - documents = [documents] - - # output list - windows_passages = [] - # split documents into windows - document_windows = [ - window - for doc_id, document in enumerate(documents) - for window in self.window_manager( - self.tokenizer, - document, - window_size=self.window_size, - stride=self.window_stride, - doc_id=doc_id, - ) - ] - - # get text and topic from document windows and create new list - model_inputs = [ - (window.text, window.doc_topic) for window in document_windows - ] - - # batch generator - for batch in batch_generator( - model_inputs, batch_size=self.window_batch_size - ): - text, text_pair = zip(*batch) - batch_predictions = await self.handle_batch(text, text_pair) - windows_passages.extend( - [ - [p.label for p in predictions] - for predictions in batch_predictions - ] - ) - - # add passage to document windows - for window, passages in zip(document_windows, windows_passages): - # clean up passages (remove everything after first tag if present) - passages = [c.split(" ", 1)[0] for c in passages] - window.window_candidates = passages - - # return document windows - return document_windows - - except Exception as e: - # log the entire stack trace - logger.exception(e) - raise HTTPException(status_code=500, detail=f"Server Error: {e}") - - -server = GoldenRetrieverServer.bind(**vars(SERVER_MANAGER)) diff --git a/spaces/richardzhangy26/yandian_flow_classification/configs/liteflownet/liteflownet_ft_4x1_500k_kitti_320x896.py b/spaces/richardzhangy26/yandian_flow_classification/configs/liteflownet/liteflownet_ft_4x1_500k_kitti_320x896.py deleted file mode 100644 index e64e87c230150ffa00917d54fc66676ab724fa3a..0000000000000000000000000000000000000000 --- a/spaces/richardzhangy26/yandian_flow_classification/configs/liteflownet/liteflownet_ft_4x1_500k_kitti_320x896.py +++ /dev/null @@ -1,86 +0,0 @@ -_base_ = [ - '../_base_/datasets/kitti2012_kitti2015_320x896.py', - '../_base_/default_runtime.py' -] - -model = dict( - type='LiteFlowNet', - encoder=dict( - type='NetC', - in_channels=3, - pyramid_levels=[ - 'level1', 'level2', 'level3', 'level4', 'level5', 'level6' - ], - out_channels=(32, 32, 64, 96, 128, 192), - strides=(1, 2, 2, 2, 2, 2), - num_convs=(1, 3, 2, 2, 1, 1), - conv_cfg=None, - norm_cfg=None, - act_cfg=dict(type='LeakyReLU', negative_slope=0.1), - init_cfg=None), - decoder=dict( - type='NetE', - in_channels=dict( - level2=32, level3=64, level4=96, level5=128, level6=192), - corr_channels=dict( - level2=49, level3=49, level4=49, level5=49, level6=49), - sin_channels=dict( - level2=130, level3=130, level4=194, level5=258, level6=386), - rin_channels=dict( - level2=131, level3=131, level4=131, level5=131, level6=195), - feat_channels=64, - mfeat_channels=(128, 64, 32), - sfeat_channels=(128, 64, 32), - rfeat_channels=(128, 128, 64, 64, 32, 32), - patch_size=dict(level2=7, level3=5, level4=5, level5=3, level6=3), - corr_cfg=dict( - level2=dict( - type='Correlation', - max_displacement=3, - stride=2, - dilation_patch=2), - level3=dict( - type='Correlation', - max_displacement=3, - stride=2, - dilation_patch=2), - level4=dict(type='Correlation', max_displacement=3), - level5=dict(type='Correlation', max_displacement=3), - level6=dict(type='Correlation', max_displacement=3)), - warp_cfg=dict(type='Warp', align_corners=True, use_mask=True), - flow_div=20., - conv_cfg=None, - norm_cfg=None, - act_cfg=dict(type='LeakyReLU', negative_slope=0.1), - scaled_corr=False, - regularized_flow=True, - extra_training_loss=False, - flow_loss=dict( - type='MultiLevelCharbonnierLoss', - resize_flow='upsample', - weights=dict( - level6=0.32, - level5=0.08, - level4=0.02, - level3=0.01, - level2=0.005), - q=0.2, - eps=0.01, - reduction='sum'), - init_cfg=None), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(), -) - -optimizer = dict(type='Adam', lr=5e-5, weight_decay=0.0004, betas=(0.9, 0.999)) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', by_epoch=False, gamma=0.5, step=[200000, 300000, 400000]) -runner = dict(type='IterBasedRunner', max_iters=500000) -checkpoint_config = dict(by_epoch=False, interval=50000) -evaluation = dict(interval=50000, metric='EPE') - -# Train on FlyingChairs and FlyingThings3D_subset and finetune on KITTI -load_from = 'https://download.openmmlab.com/mmflow/liteflownet/liteflownet_8x1_500k_flyingthings3d_subset_384x768.pth' # noqa diff --git a/spaces/rishikesh/twitterEngagementPredictor/app.py b/spaces/rishikesh/twitterEngagementPredictor/app.py deleted file mode 100644 index b6e77f9831d70e11bbc76d15caa4248851dcaef8..0000000000000000000000000000000000000000 --- a/spaces/rishikesh/twitterEngagementPredictor/app.py +++ /dev/null @@ -1,67 +0,0 @@ -import streamlit as st -import pickle -from datetime import datetime -from sentence_transformers import SentenceTransformer -import numpy as np -import re - -loaded_scaler = pickle.load(open('minMaxScaler.sav', 'rb')) -loaded_model = pickle.load(open('justin_rf_model.sav', 'rb')) -vectorizer = SentenceTransformer('all-MiniLM-L6-v2') - -def featurize(tweet, tweet_date, tweet_time): - year = float(tweet_date.year) - month = float(tweet_date.month) - day = float(tweet_date.day) - hr = float(tweet_time.hour) - minutes = float(tweet_time.minute) - weekDay = float(tweet_date.weekday()) - - # preprocess tweets - tweet = re.sub(r'http\S+', 'url', tweet) - - # count the number of accounts tagged and hashtags mentioned in tweet - tagCount = float(len(re.findall(r"@(\w+)", tweet))) - hashTagsCount = float(len(re.findall(r"#(\w+)", tweet))) - - # vectorize data - x1 = np.array([tagCount, hashTagsCount, minutes, hr, day, weekDay, month, year ]) - x1 = loaded_scaler.transform(x1.reshape(1,-1)) - x2 = vectorizer.encode(tweet) - inp_vec = np.concatenate([x2, x1.flatten()]).reshape(1,-1) - - return inp_vec - -def getOutput(inp_vec): - output = loaded_model.predict(inp_vec) - return output[0] - -def main(): - - st.title("Welcome to tweet engagement predictor") - with st.form("my_form", clear_on_submit=True): - tweet = st.text_input('Enter a tweet') - tweet_date = st.date_input("Enter the date of tweeting", - value = datetime(2018,1,1,0,0), - min_value=datetime(2015,1,1,0,0), - max_value=datetime(2021,12,12,23,59)) - tweet_time = st.time_input('Enter the time of tweeting') - - # Every form must have a submit button. - submitted = st.form_submit_button("Submit") - - if submitted: - inp_vec = featurize(tweet, tweet_date, tweet_time) - output = getOutput(inp_vec) - st.write(tweet) - st.write(tweet_date) - st.write(tweet_time) - if output == 1 : - st.write('Given tweet will get low engagment - less than 8800 ') - elif output == 2 : - st.write('Given tweet will get moderate engagment - retweets in the range of 8800 to 24000') - elif output == 3 : - st.write('Given tweet will get high engagment - more than 24000 retweets ') - -if __name__ == '__main__' : - main() diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/necks/fpn.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/necks/fpn.py deleted file mode 100644 index 4bdb5b22156b579dc262894fd0c4a141f4479854..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/necks/fpn.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule, auto_fp16 - -from ..builder import NECKS - - -@NECKS.register_module() -class FPN(BaseModule): - r"""Feature Pyramid Network. - - This is an implementation of paper `Feature Pyramid Networks for Object - Detection `_. - - Args: - in_channels (list[int]): Number of input channels per scale. - out_channels (int): Number of output channels (used at each scale). - num_outs (int): Number of output scales. - start_level (int): Index of the start input backbone level used to - build the feature pyramid. Default: 0. - end_level (int): Index of the end input backbone level (exclusive) to - build the feature pyramid. Default: -1, which means the last level. - add_extra_convs (bool | str): If bool, it decides whether to add conv - layers on top of the original feature maps. Default to False. - If True, it is equivalent to `add_extra_convs='on_input'`. - If str, it specifies the source feature map of the extra convs. - Only the following options are allowed - - - 'on_input': Last feat map of neck inputs (i.e. backbone feature). - - 'on_lateral': Last feature map after lateral convs. - - 'on_output': The last output feature map after fpn convs. - relu_before_extra_convs (bool): Whether to apply relu before the extra - conv. Default: False. - no_norm_on_lateral (bool): Whether to apply norm on lateral. - Default: False. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Config dict for normalization layer. Default: None. - act_cfg (dict): Config dict for activation layer in ConvModule. - Default: None. - upsample_cfg (dict): Config dict for interpolate layer. - Default: dict(mode='nearest'). - init_cfg (dict or list[dict], optional): Initialization config dict. - - Example: - >>> import torch - >>> in_channels = [2, 3, 5, 7] - >>> scales = [340, 170, 84, 43] - >>> inputs = [torch.rand(1, c, s, s) - ... for c, s in zip(in_channels, scales)] - >>> self = FPN(in_channels, 11, len(in_channels)).eval() - >>> outputs = self.forward(inputs) - >>> for i in range(len(outputs)): - ... print(f'outputs[{i}].shape = {outputs[i].shape}') - outputs[0].shape = torch.Size([1, 11, 340, 340]) - outputs[1].shape = torch.Size([1, 11, 170, 170]) - outputs[2].shape = torch.Size([1, 11, 84, 84]) - outputs[3].shape = torch.Size([1, 11, 43, 43]) - """ - - def __init__(self, - in_channels, - out_channels, - num_outs, - start_level=0, - end_level=-1, - add_extra_convs=False, - relu_before_extra_convs=False, - no_norm_on_lateral=False, - conv_cfg=None, - norm_cfg=None, - act_cfg=None, - upsample_cfg=dict(mode='nearest'), - init_cfg=dict( - type='Xavier', layer='Conv2d', distribution='uniform')): - super(FPN, self).__init__(init_cfg) - assert isinstance(in_channels, list) - self.in_channels = in_channels - self.out_channels = out_channels - self.num_ins = len(in_channels) - self.num_outs = num_outs - self.relu_before_extra_convs = relu_before_extra_convs - self.no_norm_on_lateral = no_norm_on_lateral - self.fp16_enabled = False - self.upsample_cfg = upsample_cfg.copy() - - if end_level == -1 or end_level == self.num_ins - 1: - self.backbone_end_level = self.num_ins - assert num_outs >= self.num_ins - start_level - else: - # if end_level is not the last level, no extra level is allowed - self.backbone_end_level = end_level + 1 - assert end_level < self.num_ins - assert num_outs == end_level - start_level + 1 - self.start_level = start_level - self.end_level = end_level - self.add_extra_convs = add_extra_convs - assert isinstance(add_extra_convs, (str, bool)) - if isinstance(add_extra_convs, str): - # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' - assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') - elif add_extra_convs: # True - self.add_extra_convs = 'on_input' - - self.lateral_convs = nn.ModuleList() - self.fpn_convs = nn.ModuleList() - - for i in range(self.start_level, self.backbone_end_level): - l_conv = ConvModule( - in_channels[i], - out_channels, - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, - act_cfg=act_cfg, - inplace=False) - fpn_conv = ConvModule( - out_channels, - out_channels, - 3, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - inplace=False) - - self.lateral_convs.append(l_conv) - self.fpn_convs.append(fpn_conv) - - # add extra conv layers (e.g., RetinaNet) - extra_levels = num_outs - self.backbone_end_level + self.start_level - if self.add_extra_convs and extra_levels >= 1: - for i in range(extra_levels): - if i == 0 and self.add_extra_convs == 'on_input': - in_channels = self.in_channels[self.backbone_end_level - 1] - else: - in_channels = out_channels - extra_fpn_conv = ConvModule( - in_channels, - out_channels, - 3, - stride=2, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - inplace=False) - self.fpn_convs.append(extra_fpn_conv) - - @auto_fp16() - def forward(self, inputs): - """Forward function.""" - assert len(inputs) == len(self.in_channels) - - # build laterals - laterals = [ - lateral_conv(inputs[i + self.start_level]) - for i, lateral_conv in enumerate(self.lateral_convs) - ] - - # build top-down path - used_backbone_levels = len(laterals) - for i in range(used_backbone_levels - 1, 0, -1): - # In some cases, fixing `scale factor` (e.g. 2) is preferred, but - # it cannot co-exist with `size` in `F.interpolate`. - if 'scale_factor' in self.upsample_cfg: - # fix runtime error of "+=" inplace operation in PyTorch 1.10 - laterals[i - 1] = laterals[i - 1] + F.interpolate( - laterals[i], **self.upsample_cfg) - else: - prev_shape = laterals[i - 1].shape[2:] - laterals[i - 1] = laterals[i - 1] + F.interpolate( - laterals[i], size=prev_shape, **self.upsample_cfg) - - # build outputs - # part 1: from original levels - outs = [ - self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) - ] - # part 2: add extra levels - if self.num_outs > len(outs): - # use max pool to get more levels on top of outputs - # (e.g., Faster R-CNN, Mask R-CNN) - if not self.add_extra_convs: - for i in range(self.num_outs - used_backbone_levels): - outs.append(F.max_pool2d(outs[-1], 1, stride=2)) - # add conv layers on top of original feature maps (RetinaNet) - else: - if self.add_extra_convs == 'on_input': - extra_source = inputs[self.backbone_end_level - 1] - elif self.add_extra_convs == 'on_lateral': - extra_source = laterals[-1] - elif self.add_extra_convs == 'on_output': - extra_source = outs[-1] - else: - raise NotImplementedError - outs.append(self.fpn_convs[used_backbone_levels](extra_source)) - for i in range(used_backbone_levels + 1, self.num_outs): - if self.relu_before_extra_convs: - outs.append(self.fpn_convs[i](F.relu(outs[-1]))) - else: - outs.append(self.fpn_convs[i](outs[-1])) - return tuple(outs) diff --git a/spaces/rohan13/coursera-qa-bot/docs/01_course-orientation/02_about-your-classmates/02_social-media_instructions.html b/spaces/rohan13/coursera-qa-bot/docs/01_course-orientation/02_about-your-classmates/02_social-media_instructions.html deleted file mode 100644 index 8c0de9a495bbb997877f35e04aa9de6c24a93276..0000000000000000000000000000000000000000 --- a/spaces/rohan13/coursera-qa-bot/docs/01_course-orientation/02_about-your-classmates/02_social-media_instructions.html +++ /dev/null @@ -1,162 +0,0 @@ - - -

    - Social Media -

    -

    - Learning takes place not only through course instruction but also through quality interaction with your peers. Research suggests that participating in a learning community around your interests and passions helps you stay motivated to learn. Thus, we encourage you to use our various social media platforms to connect with thousands of your peers from across the world. Learn from others, network, create study groups, discuss interesting course topics, and share your own perspectives about this topic. You can even set up or get notified about physical meetups on so that you can enhance your learning by interacting face-to-face with others who share your interests. The more active these communities are, the more value they will bring to all of us. So, we hope that you take a leading role in keeping these communities active and of value to all of us, so please join your preferred platform and share your ideas! -

    -

    - - Connect to your classmates via the following social media sites related to our 3D Printing courses. Use the hashtags #ideas2objects and #3dprinting - -

    - -

    - - You may also be interested in the following social media sites: - -

    - -

    - If you find another social media page or community related to this course, feel share it in our discussion forums. -

    -

    - - NOTE: Please do not post links to copyrighted materials in the Coursera discussion forums or on social networks. Doing so is a violation of the - - Coursera Terms of Service - - . - -

    -

    -

    -
    - - - diff --git a/spaces/rorallitri/biomedical-language-models/logs/Dalal Street Journal PDF Download Get the Latest Issue of Indias No 1 Equity Research and Capital Investment Magazine.md b/spaces/rorallitri/biomedical-language-models/logs/Dalal Street Journal PDF Download Get the Latest Issue of Indias No 1 Equity Research and Capital Investment Magazine.md deleted file mode 100644 index afde7278f6b98c9a7147217af0401f0091da66d2..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Dalal Street Journal PDF Download Get the Latest Issue of Indias No 1 Equity Research and Capital Investment Magazine.md +++ /dev/null @@ -1,6 +0,0 @@ -

    dalal street journal pdf download


    DOWNLOAD 🔗 https://tinurll.com/2uzn1y



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/roshithindia/text_summarization/app.py b/spaces/roshithindia/text_summarization/app.py deleted file mode 100644 index cdcea8a9e2ff4548bc9ffcfa6252f71361a53d9f..0000000000000000000000000000000000000000 --- a/spaces/roshithindia/text_summarization/app.py +++ /dev/null @@ -1,9 +0,0 @@ -from transformers import pipeline -import streamlit as st - -st.header('Text Summarizer') - -model = pipeline('summarization') -title = st.text_input('Enter the text to summarize') -if title: - st.write(model(title, max_length=220, min_length=30, do_sample=False)) \ No newline at end of file diff --git a/spaces/samuelinferences/transformers-can-do-bayesian-inference/prior-fitting/decoders.py b/spaces/samuelinferences/transformers-can-do-bayesian-inference/prior-fitting/decoders.py deleted file mode 100644 index efdd8fcb624c1b0a6d127fcf0aa8548c4ca1524d..0000000000000000000000000000000000000000 --- a/spaces/samuelinferences/transformers-can-do-bayesian-inference/prior-fitting/decoders.py +++ /dev/null @@ -1,30 +0,0 @@ -import torch -from torch import nn -import random - - -class ScaledDecoder(nn.Module): - def __init__(self, ninp, nhid, nout): - super().__init__() - self.linear = nn.Linear(ninp, nhid) - self.linear1 = nn.Linear(nhid, nout) - self.linear2 = nn.Linear(nhid, 10) - - def forward(self, x): - #return torch.cat([self.linear1(x), self.linear2(x)], -1) - x = self.linear(x) - x = nn.GELU()(x) - temps = self.linear2(x).softmax(-1) @ torch.tensor([1.,1.4,1.7,2.,5.,10.,20.,40.,80.,160.], device=x.device) - if random.random() > .99: - print(temps.shape,temps[:,:2]) - return self.linear1(x) / temps.unsqueeze(-1) - -class FixedScaledDecoder(nn.Module): - def __init__(self, ninp, nhid, nout): - super().__init__() - self.mapper = nn.Sequential(nn.Linear(ninp, nhid), nn.GELU(), nn.Linear(nhid, nout)) - self.T = nn.Parameter(torch.ones(10000)/10000) - - def forward(self, x): - return self.mapper(x)/self.T.sum() - diff --git a/spaces/samyak152002/Quantumn-Multiplication/README.md b/spaces/samyak152002/Quantumn-Multiplication/README.md deleted file mode 100644 index adfc4ded81ec69f2591c019682d88711e277b834..0000000000000000000000000000000000000000 --- a/spaces/samyak152002/Quantumn-Multiplication/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Quantumn Multiplication -emoji: 💻 -colorFrom: gray -colorTo: pink -sdk: gradio -sdk_version: 3.45.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/scedlatioru/img-to-music/example/Borland Delphi.md b/spaces/scedlatioru/img-to-music/example/Borland Delphi.md deleted file mode 100644 index cc196aeb06c3345dc2bc3720a3c8b5047feaca83..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Borland Delphi.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Borland Delphi


    Download Zip ……… https://gohhs.com/2uEzG8



    -
    -Borland Delphi. Lingobit Localizer is a perfect solution for localization of applications developed with Borland Delphi. Lingobit Localizer uses binary localization ... 1fdad05405
    -
    -
    -

    diff --git a/spaces/sdhsdhk/bingosjj/tests/kblob.ts b/spaces/sdhsdhk/bingosjj/tests/kblob.ts deleted file mode 100644 index 9e15b41c1c94a690beb61b23cdb42fc78767ccd2..0000000000000000000000000000000000000000 --- a/spaces/sdhsdhk/bingosjj/tests/kblob.ts +++ /dev/null @@ -1,27 +0,0 @@ -import FormData from 'form-data' - -import { fetch } from '@/lib/isomorphic' - -const formData = new FormData() - -const knowledgeRequest = {"imageInfo":{"url":"https://www.baidu.com/img/PCfb_5bf082d29588c07f842ccde3f97243ea.png"},"knowledgeRequest":{"invokedSkills":["ImageById"],"subscriptionId":"Bing.Chat.Multimodal","invokedSkillsRequestData":{"enableFaceBlur":true},"convoData":{"convoid":"51D|BingProdUnAuthenticatedUsers|E3DCA904FF236C67C3450163BCEC64CFF3F618CC8A4AFD75FD518F5ED0ADA080","convotone":"Creative"}}} - -formData.append('knowledgeRequest', JSON.stringify(knowledgeRequest)) - - -fetch('https://bing.vcanbb.top/images/kblob', - { - method: 'POST', - body: formData.getBuffer(), - headers: { - "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"", - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": "\"Windows\"", - "Referer": "https://bing.vcanbb.top/web/index.html", - "Referrer-Policy": "origin-when-cross-origin", - ...formData.getHeaders() - } - - } -).then(res => res.text()) -.then(res => console.log('res', res)) diff --git a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/lm/seq_rnn.py b/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/lm/seq_rnn.py deleted file mode 100644 index ee5f026e3811c790f283dc9298e1221d783c0e4f..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/lm/seq_rnn.py +++ /dev/null @@ -1,178 +0,0 @@ -"""Sequential implementation of Recurrent Neural Network Language Model.""" - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from espnet.nets.lm_interface import LMInterface - - -class SequentialRNNLM(LMInterface, torch.nn.Module): - """Sequential RNNLM. - - See also: - https://github.com/pytorch/examples/blob/4581968193699de14b56527296262dd76ab43557/word_language_model/model.py - - """ - - @staticmethod - def add_arguments(parser): - """Add arguments to command line argument parser.""" - parser.add_argument( - "--type", - type=str, - default="lstm", - nargs="?", - choices=["lstm", "gru"], - help="Which type of RNN to use", - ) - parser.add_argument( - "--layer", "-l", type=int, default=2, help="Number of hidden layers" - ) - parser.add_argument( - "--unit", "-u", type=int, default=650, help="Number of hidden units" - ) - parser.add_argument( - "--dropout-rate", type=float, default=0.5, help="dropout probability" - ) - return parser - - def __init__(self, n_vocab, args): - """Initialize class. - - Args: - n_vocab (int): The size of the vocabulary - args (argparse.Namespace): configurations. see py:method:`add_arguments` - - """ - torch.nn.Module.__init__(self) - self._setup( - rnn_type=args.type.upper(), - ntoken=n_vocab, - ninp=args.unit, - nhid=args.unit, - nlayers=args.layer, - dropout=args.dropout_rate, - ) - - def _setup( - self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False - ): - self.drop = nn.Dropout(dropout) - self.encoder = nn.Embedding(ntoken, ninp) - if rnn_type in ["LSTM", "GRU"]: - self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout) - else: - try: - nonlinearity = {"RNN_TANH": "tanh", "RNN_RELU": "relu"}[rnn_type] - except KeyError: - raise ValueError( - "An invalid option for `--model` was supplied, " - "options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']" - ) - self.rnn = nn.RNN( - ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout - ) - self.decoder = nn.Linear(nhid, ntoken) - - # Optionally tie weights as in: - # "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016) - # https://arxiv.org/abs/1608.05859 - # and - # "Tying Word Vectors and Word Classifiers: - # A Loss Framework for Language Modeling" (Inan et al. 2016) - # https://arxiv.org/abs/1611.01462 - if tie_weights: - if nhid != ninp: - raise ValueError( - "When using the tied flag, nhid must be equal to emsize" - ) - self.decoder.weight = self.encoder.weight - - self._init_weights() - - self.rnn_type = rnn_type - self.nhid = nhid - self.nlayers = nlayers - - def _init_weights(self): - # NOTE: original init in pytorch/examples - # initrange = 0.1 - # self.encoder.weight.data.uniform_(-initrange, initrange) - # self.decoder.bias.data.zero_() - # self.decoder.weight.data.uniform_(-initrange, initrange) - # NOTE: our default.py:RNNLM init - for param in self.parameters(): - param.data.uniform_(-0.1, 0.1) - - def forward(self, x, t): - """Compute LM loss value from buffer sequences. - - Args: - x (torch.Tensor): Input ids. (batch, len) - t (torch.Tensor): Target ids. (batch, len) - - Returns: - tuple[torch.Tensor, torch.Tensor, torch.Tensor]: Tuple of - loss to backward (scalar), - negative log-likelihood of t: -log p(t) (scalar) and - the number of elements in x (scalar) - - Notes: - The last two return values are used - in perplexity: p(t)^{-n} = exp(-log p(t) / n) - - """ - y = self._before_loss(x, None)[0] - mask = (x != 0).to(y.dtype) - loss = F.cross_entropy(y.view(-1, y.shape[-1]), t.view(-1), reduction="none") - logp = loss * mask.view(-1) - logp = logp.sum() - count = mask.sum() - return logp / count, logp, count - - def _before_loss(self, input, hidden): - emb = self.drop(self.encoder(input)) - output, hidden = self.rnn(emb, hidden) - output = self.drop(output) - decoded = self.decoder( - output.view(output.size(0) * output.size(1), output.size(2)) - ) - return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden - - def init_state(self, x): - """Get an initial state for decoding. - - Args: - x (torch.Tensor): The encoded feature tensor - - Returns: initial state - - """ - bsz = 1 - weight = next(self.parameters()) - if self.rnn_type == "LSTM": - return ( - weight.new_zeros(self.nlayers, bsz, self.nhid), - weight.new_zeros(self.nlayers, bsz, self.nhid), - ) - else: - return weight.new_zeros(self.nlayers, bsz, self.nhid) - - def score(self, y, state, x): - """Score new token. - - Args: - y (torch.Tensor): 1D torch.int64 prefix tokens. - state: Scorer state for prefix tokens - x (torch.Tensor): 2D encoder feature that generates ys. - - Returns: - tuple[torch.Tensor, Any]: Tuple of - torch.float32 scores for next token (n_vocab) - and next state for ys - - """ - y, new_state = self._before_loss(y[-1].view(1, 1), state) - logp = y.log_softmax(dim=-1).view(-1) - return logp, new_state diff --git a/spaces/segments-tobias/conex/espnet2/enh/encoder/stft_encoder.py b/spaces/segments-tobias/conex/espnet2/enh/encoder/stft_encoder.py deleted file mode 100644 index a81f07b2257b395b3a6990d42dd0ec9d0fb6c710..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet2/enh/encoder/stft_encoder.py +++ /dev/null @@ -1,51 +0,0 @@ -import torch -from torch_complex.tensor import ComplexTensor - -from espnet2.enh.encoder.abs_encoder import AbsEncoder -from espnet2.layers.stft import Stft - - -class STFTEncoder(AbsEncoder): - """STFT encoder for speech enhancement and separation """ - - def __init__( - self, - n_fft: int = 512, - win_length: int = None, - hop_length: int = 128, - window="hann", - center: bool = True, - normalized: bool = False, - onesided: bool = True, - ): - super().__init__() - self.stft = Stft( - n_fft=n_fft, - win_length=win_length, - hop_length=hop_length, - window=window, - center=center, - normalized=normalized, - onesided=onesided, - ) - - self._output_dim = n_fft // 2 + 1 if onesided else n_fft - - @property - def output_dim(self) -> int: - return self._output_dim - - def forward(self, input: torch.Tensor, ilens: torch.Tensor): - """Forward. - - Args: - input (torch.Tensor): mixed speech [Batch, sample] - ilens (torch.Tensor): input lengths [Batch] - Returns: - stft spectrum (torch.ComplexTensor): (Batch, Frames, Freq) - or (Batch, Frames, Channels, Freq) - """ - spectrum, flens = self.stft(input, ilens) - spectrum = ComplexTensor(spectrum[..., 0], spectrum[..., 1]) - - return spectrum, flens diff --git a/spaces/shi-labs/Prompt-Free-Diffusion/lib/model_zoo/controlnet_annotator/midas/midas/base_model.py b/spaces/shi-labs/Prompt-Free-Diffusion/lib/model_zoo/controlnet_annotator/midas/midas/base_model.py deleted file mode 100644 index 5cf430239b47ec5ec07531263f26f5c24a2311cd..0000000000000000000000000000000000000000 --- a/spaces/shi-labs/Prompt-Free-Diffusion/lib/model_zoo/controlnet_annotator/midas/midas/base_model.py +++ /dev/null @@ -1,16 +0,0 @@ -import torch - - -class BaseModel(torch.nn.Module): - def load(self, path): - """Load model from file. - - Args: - path (str): file path - """ - parameters = torch.load(path, map_location=torch.device('cpu')) - - if "optimizer" in parameters: - parameters = parameters["model"] - - self.load_state_dict(parameters) diff --git a/spaces/shiwan10000/CodeFormer/CodeFormer/basicsr/utils/dist_util.py b/spaces/shiwan10000/CodeFormer/CodeFormer/basicsr/utils/dist_util.py deleted file mode 100644 index 0fab887b2cb1ce8533d2e8fdee72ae0c24f68fd0..0000000000000000000000000000000000000000 --- a/spaces/shiwan10000/CodeFormer/CodeFormer/basicsr/utils/dist_util.py +++ /dev/null @@ -1,82 +0,0 @@ -# Modified from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/dist_utils.py # noqa: E501 -import functools -import os -import subprocess -import torch -import torch.distributed as dist -import torch.multiprocessing as mp - - -def init_dist(launcher, backend='nccl', **kwargs): - if mp.get_start_method(allow_none=True) is None: - mp.set_start_method('spawn') - if launcher == 'pytorch': - _init_dist_pytorch(backend, **kwargs) - elif launcher == 'slurm': - _init_dist_slurm(backend, **kwargs) - else: - raise ValueError(f'Invalid launcher type: {launcher}') - - -def _init_dist_pytorch(backend, **kwargs): - rank = int(os.environ['RANK']) - num_gpus = torch.cuda.device_count() - torch.cuda.set_device(rank % num_gpus) - dist.init_process_group(backend=backend, **kwargs) - - -def _init_dist_slurm(backend, port=None): - """Initialize slurm distributed training environment. - - If argument ``port`` is not specified, then the master port will be system - environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system - environment variable, then a default port ``29500`` will be used. - - Args: - backend (str): Backend of torch.distributed. - port (int, optional): Master port. Defaults to None. - """ - proc_id = int(os.environ['SLURM_PROCID']) - ntasks = int(os.environ['SLURM_NTASKS']) - node_list = os.environ['SLURM_NODELIST'] - num_gpus = torch.cuda.device_count() - torch.cuda.set_device(proc_id % num_gpus) - addr = subprocess.getoutput(f'scontrol show hostname {node_list} | head -n1') - # specify master port - if port is not None: - os.environ['MASTER_PORT'] = str(port) - elif 'MASTER_PORT' in os.environ: - pass # use MASTER_PORT in the environment variable - else: - # 29500 is torch.distributed default port - os.environ['MASTER_PORT'] = '29500' - os.environ['MASTER_ADDR'] = addr - os.environ['WORLD_SIZE'] = str(ntasks) - os.environ['LOCAL_RANK'] = str(proc_id % num_gpus) - os.environ['RANK'] = str(proc_id) - dist.init_process_group(backend=backend) - - -def get_dist_info(): - if dist.is_available(): - initialized = dist.is_initialized() - else: - initialized = False - if initialized: - rank = dist.get_rank() - world_size = dist.get_world_size() - else: - rank = 0 - world_size = 1 - return rank, world_size - - -def master_only(func): - - @functools.wraps(func) - def wrapper(*args, **kwargs): - rank, _ = get_dist_info() - if rank == 0: - return func(*args, **kwargs) - - return wrapper diff --git a/spaces/shiwan10000/CodeFormer/CodeFormer/facelib/parsing/parsenet.py b/spaces/shiwan10000/CodeFormer/CodeFormer/facelib/parsing/parsenet.py deleted file mode 100644 index e178ebe43a1ef666aaea0bc0faf629485c22a24f..0000000000000000000000000000000000000000 --- a/spaces/shiwan10000/CodeFormer/CodeFormer/facelib/parsing/parsenet.py +++ /dev/null @@ -1,194 +0,0 @@ -"""Modified from https://github.com/chaofengc/PSFRGAN -""" -import numpy as np -import torch.nn as nn -from torch.nn import functional as F - - -class NormLayer(nn.Module): - """Normalization Layers. - - Args: - channels: input channels, for batch norm and instance norm. - input_size: input shape without batch size, for layer norm. - """ - - def __init__(self, channels, normalize_shape=None, norm_type='bn'): - super(NormLayer, self).__init__() - norm_type = norm_type.lower() - self.norm_type = norm_type - if norm_type == 'bn': - self.norm = nn.BatchNorm2d(channels, affine=True) - elif norm_type == 'in': - self.norm = nn.InstanceNorm2d(channels, affine=False) - elif norm_type == 'gn': - self.norm = nn.GroupNorm(32, channels, affine=True) - elif norm_type == 'pixel': - self.norm = lambda x: F.normalize(x, p=2, dim=1) - elif norm_type == 'layer': - self.norm = nn.LayerNorm(normalize_shape) - elif norm_type == 'none': - self.norm = lambda x: x * 1.0 - else: - assert 1 == 0, f'Norm type {norm_type} not support.' - - def forward(self, x, ref=None): - if self.norm_type == 'spade': - return self.norm(x, ref) - else: - return self.norm(x) - - -class ReluLayer(nn.Module): - """Relu Layer. - - Args: - relu type: type of relu layer, candidates are - - ReLU - - LeakyReLU: default relu slope 0.2 - - PRelu - - SELU - - none: direct pass - """ - - def __init__(self, channels, relu_type='relu'): - super(ReluLayer, self).__init__() - relu_type = relu_type.lower() - if relu_type == 'relu': - self.func = nn.ReLU(True) - elif relu_type == 'leakyrelu': - self.func = nn.LeakyReLU(0.2, inplace=True) - elif relu_type == 'prelu': - self.func = nn.PReLU(channels) - elif relu_type == 'selu': - self.func = nn.SELU(True) - elif relu_type == 'none': - self.func = lambda x: x * 1.0 - else: - assert 1 == 0, f'Relu type {relu_type} not support.' - - def forward(self, x): - return self.func(x) - - -class ConvLayer(nn.Module): - - def __init__(self, - in_channels, - out_channels, - kernel_size=3, - scale='none', - norm_type='none', - relu_type='none', - use_pad=True, - bias=True): - super(ConvLayer, self).__init__() - self.use_pad = use_pad - self.norm_type = norm_type - if norm_type in ['bn']: - bias = False - - stride = 2 if scale == 'down' else 1 - - self.scale_func = lambda x: x - if scale == 'up': - self.scale_func = lambda x: nn.functional.interpolate(x, scale_factor=2, mode='nearest') - - self.reflection_pad = nn.ReflectionPad2d(int(np.ceil((kernel_size - 1.) / 2))) - self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, bias=bias) - - self.relu = ReluLayer(out_channels, relu_type) - self.norm = NormLayer(out_channels, norm_type=norm_type) - - def forward(self, x): - out = self.scale_func(x) - if self.use_pad: - out = self.reflection_pad(out) - out = self.conv2d(out) - out = self.norm(out) - out = self.relu(out) - return out - - -class ResidualBlock(nn.Module): - """ - Residual block recommended in: http://torch.ch/blog/2016/02/04/resnets.html - """ - - def __init__(self, c_in, c_out, relu_type='prelu', norm_type='bn', scale='none'): - super(ResidualBlock, self).__init__() - - if scale == 'none' and c_in == c_out: - self.shortcut_func = lambda x: x - else: - self.shortcut_func = ConvLayer(c_in, c_out, 3, scale) - - scale_config_dict = {'down': ['none', 'down'], 'up': ['up', 'none'], 'none': ['none', 'none']} - scale_conf = scale_config_dict[scale] - - self.conv1 = ConvLayer(c_in, c_out, 3, scale_conf[0], norm_type=norm_type, relu_type=relu_type) - self.conv2 = ConvLayer(c_out, c_out, 3, scale_conf[1], norm_type=norm_type, relu_type='none') - - def forward(self, x): - identity = self.shortcut_func(x) - - res = self.conv1(x) - res = self.conv2(res) - return identity + res - - -class ParseNet(nn.Module): - - def __init__(self, - in_size=128, - out_size=128, - min_feat_size=32, - base_ch=64, - parsing_ch=19, - res_depth=10, - relu_type='LeakyReLU', - norm_type='bn', - ch_range=[32, 256]): - super().__init__() - self.res_depth = res_depth - act_args = {'norm_type': norm_type, 'relu_type': relu_type} - min_ch, max_ch = ch_range - - ch_clip = lambda x: max(min_ch, min(x, max_ch)) # noqa: E731 - min_feat_size = min(in_size, min_feat_size) - - down_steps = int(np.log2(in_size // min_feat_size)) - up_steps = int(np.log2(out_size // min_feat_size)) - - # =============== define encoder-body-decoder ==================== - self.encoder = [] - self.encoder.append(ConvLayer(3, base_ch, 3, 1)) - head_ch = base_ch - for i in range(down_steps): - cin, cout = ch_clip(head_ch), ch_clip(head_ch * 2) - self.encoder.append(ResidualBlock(cin, cout, scale='down', **act_args)) - head_ch = head_ch * 2 - - self.body = [] - for i in range(res_depth): - self.body.append(ResidualBlock(ch_clip(head_ch), ch_clip(head_ch), **act_args)) - - self.decoder = [] - for i in range(up_steps): - cin, cout = ch_clip(head_ch), ch_clip(head_ch // 2) - self.decoder.append(ResidualBlock(cin, cout, scale='up', **act_args)) - head_ch = head_ch // 2 - - self.encoder = nn.Sequential(*self.encoder) - self.body = nn.Sequential(*self.body) - self.decoder = nn.Sequential(*self.decoder) - self.out_img_conv = ConvLayer(ch_clip(head_ch), 3) - self.out_mask_conv = ConvLayer(ch_clip(head_ch), parsing_ch) - - def forward(self, x): - feat = self.encoder(x) - x = feat + self.body(feat) - x = self.decoder(x) - out_img = self.out_img_conv(x) - out_mask = self.out_mask_conv(x) - return out_mask, out_img diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download UC for PUBG Mobile and Customize Your Character.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download UC for PUBG Mobile and Customize Your Character.md deleted file mode 100644 index 52fd96b03c936672f734fc3ae119ad349b3aa86c..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download UC for PUBG Mobile and Customize Your Character.md +++ /dev/null @@ -1,142 +0,0 @@ - -

    How to Download UC PUBG Mobile and Why You Should Do It

    -

    If you are a fan of PUBG Mobile, the mobile version of the popular battle royale game PlayerUnknown's Battlegrounds, you might have heard of UC PUBG Mobile. It is the game's currency, using which you can buy various items and upgrades to enhance your gameplay and style. But how do you get UC PUBG Mobile? And what are the benefits of having it? In this article, we will answer these questions and more, and show you how to download UC PUBG Mobile using different methods. We will also give you some tips and tricks to get the most out of your UC PUBG Mobile. So, let's get started!

    -

    download uc pubg mobile


    Download File ····· https://ssurll.com/2uO03G



    -

    What is UC PUBG Mobile and What Can You Do With It?

    -

    UC PUBG Mobile is the in-game currency for the popular battle royale game

    -

    UC, which stands for Unknown Cash, is the in-game currency for PUBG Mobile. It can be purchased with real money or earned through events, achievements, and daily login rewards. You can use it to buy various items available in the shop or the royale pass.

    -

    You can use UC PUBG Mobile to buy various items and upgrades in the game

    -

    With UC PUBG Mobile, you can purchase a variety of in-game content for PUBG Mobile, available on the iOS or Android apps. You can get treasures, outfits, companions, battle passes, weapon skins, and more. These items can help you customize your character, improve your performance, or just have fun in the game. Some items are exclusive to UC PUBG Mobile users, while others are available for both UC PUBG Mobile and BP (Battle Points), which is another currency that you can earn by playing the game.

    -

    How to Download UC PUBG Mobile Using Different Methods

    -

    Method 1: Buy UC PUBG Mobile online with real money or gift cards

    -

    The easiest way to get UC PUBG Mobile is to buy it online with real money or gift cards. There are many websites that offer this service, such as Dundle, PUBG MOBILE PAYMENT, G2G.com, or UniPin. These websites accept various payment methods, such as credit cards, debit cards, prepaid cards, PayPal, Razer Gold, and other local options. You can choose from different amounts of UC PUBG Mobile, ranging from 60 to 8100. The prices may vary depending on the website, the region, and the exchange rate. After you make the payment, you will receive a UC PUBG Mobile code that you can redeem in the game.

    -

    Method 2: Earn UC PUBG Mobile by completing missions, events, and surveys

    -

    Another way to get UC PUBG Mobile is to earn it by completing various missions, events, and surveys in the game or on third-party platforms. For example, you can get UC PUBG Mobile by:

      -
    • Completing daily or weekly missions in the game and earning RP (Royale Pass) points. You can use these points to level up your royale pass and get UC PUBG Mobile as a reward.
    • -
    • Participating in special events or challenges in the game and earning event coins or tokens. You can use these coins or tokens to exchange for UC PUBG Mobile in the event shop.
    • -
    • Joining online surveys or tasks on platforms like Google Opinion Rewards, Swagbucks, or FeaturePoints. You can earn Google Play credits, PayPal cash, or gift cards that you can use to buy UC PUBG Mobile online.
    • -
    -These methods may take more time and effort than buying UC PUBG Mobile online, but they are also more rewarding and fun. You can also learn more about the game and its features by completing these missions, events, and surveys.

    -

    Method 3: Get free UC PUBG Mobile by participating in giveaways and promotions

    -

    The last method to get UC PUBG Mobile is to get it for free by participating in various giveaways and promotions on social media, websites, or apps. For example, you can get free UC PUBG Mobile by:

    -These methods may require some luck and patience, but they are also free and easy. You can also discover new content and communities related to PUBG Mobile by participating in these giveaways and promotions.

    -

    How to Redeem UC PUBG Mobile and Use It in the Game

    -

    Redeem your UC PUBG Mobile code in the PUBG Mobile app or website

    -

    Once you have your UC PUBG Mobile code, you need to redeem it in the game. You can do this by following these steps:

      -
    1. Open the PUBG Mobile app on your device and log in with your account.
    2. -
    3. Tap on the "UC" icon at the top right corner of the screen.
    4. -
    5. Select the "Redeem Code" option and enter your code.
    6. -
    7. Tap on the "OK" button and confirm your redemption.
    8. -
    9. You will see a message that says "Redeemed Successfully". Your UC PUBG Mobile will be added to your account balance.
    10. -
    -Alternatively, you can also redeem your code on the PUBG Mobile website. You need to log in with your account and enter your code on the redemption page. You will then receive your UC PUBG Mobile in your account.

    -

    How to download uc pubg mobile on android
    -Download uc pubg mobile apk latest version
    -Download uc pubg mobile lite for low-end devices
    -Download uc pubg mobile hack mod menu
    -Download uc pubg mobile kr version with vpn
    -Download uc pubg mobile global version from official website
    -Download uc pubg mobile new state beta
    -Download uc pubg mobile india version release date
    -Download uc pubg mobile for pc windows 10
    -Download uc pubg mobile redeem code 2023
    -Download uc pubg mobile season 19 royale pass
    -Download uc pubg mobile tips and tricks
    -Download uc pubg mobile emulator for mac
    -Download uc pubg mobile without google play store
    -Download uc pubg mobile free fire crossover event
    -Download uc pubg mobile best settings for fps
    -Download uc pubg mobile custom room id and password
    -Download uc pubg mobile clan recruitment
    -Download uc pubg mobile tournament registration
    -Download uc pubg mobile wallpapers hd
    -Download uc pubg mobile skins and outfits
    -Download uc pubg mobile voice chat not working
    -Download uc pubg mobile sensitivity settings for gyro
    -Download uc pubg mobile controller support ios
    -Download uc pubg mobile banned countries list
    -Download uc pubg mobile zombie mode update
    -Download uc pubg mobile 4k graphics mod
    -Download uc pubg mobile esp hack no root
    -Download uc pubg mobile live stream youtube
    -Download uc pubg mobile memes and jokes
    -Download uc pubg mobile discord server link
    -Download uc pubg mobile best guns and attachments
    -Download uc pubg mobile maps and locations
    -Download uc pubg mobile vehicles and drop rates
    -Download uc pubg mobile crate opening simulator
    -Download uc pubg mobile ranking system explained
    -Download uc pubg mobile clan wars rewards
    -Download uc pubg mobile esports news and results
    -Download uc pubg mobile fan art and cosplay
    -Download uc pubg mobile quiz and trivia

    -

    Use your UC PUBG Mobile to buy items and upgrades in the shop or the royale pass

    -

    Now that you have your UC PUBG Mobile, you can use it to buy various items and upgrades in the game. You can do this by following these steps:

      -
    1. Tap on the "Shop" icon at the bottom right corner of the screen.
    2. -
    3. Select the "Treasures", "Outfits", "Companions", or "Others" tab to browse the items available for purchase.
    4. -
    5. Tap on the item that you want to buy and check its price and details.
    6. -
    7. Tap on the "Buy" button and confirm your purchase.
    8. -
    9. You will see a message that says "Purchased Successfully". Your item will be added to your inventory.
    10. -
    -You can also use your UC PUBG Mobile to buy the "Royale Pass", which is a seasonal subscription that gives you access to exclusive rewards and missions. You can do this by following these steps:
      -
    1. Tap on the "RP" icon at the top right corner of the screen.
    2. -
    3. Select the "Upgrade Pass" option and choose between the "Elite Pass" or the "Elite Pass Plus".
    4. -
    5. Check the price and benefits of each pass and tap on the "Buy" button.
    6. -
    7. Confirm your purchase and enjoy your royale pass perks.
    8. -
    -You can check your UC PUBG Mobile balance, items, and royale pass progress at any time by tapping on the corresponding icons on the screen.

    -

    Tips and Tricks to Get the Most Out of Your UC PUBG Mobile

    -

    Tip 1: Compare prices and discounts before buying UC PUBG Mobile or items

    -

    Before you buy UC PUBG Mobile or items, you should always compare the prices and discounts offered by different websites or platforms. You may find some deals that can save you money or give you more value for your UC PUBG Mobile. For example, you can use websites like UC Price Comparison or UC Deals Finder to find the best prices for UC PUBG Mobile. You can also check the official PUBG Mobile website or social media pages for any ongoing promotions or offers that can give you discounts or bonuses for buying UC PUBG Mobile or items.

    -

    Tip 2: Share your UC PUBG Mobile or items with your teammates or friends

    -

    If you have extra UC PUBG Mobile or items that you don't need or want, you can share them with your teammates or friends who play PUBG Mobile. This can help you build a stronger bond with them and make your gameplay more enjoyable. You can share your UC PUBG Mobile or items by following these steps:

      -
    • Tap on the "Friends" icon at the bottom left corner of the screen.
    • -
    • Select the friend that you want to share with and tap on their profile.
    • -
    • Tap on the "Send Gift" button and choose between sending UC PUBG Mobile or items.
    • -
    • Select the amount of UC PUBG Mobile or the item that you want to send and tap on the "Send" button.
    • -
    • Your friend will receive a notification and a message from you with your gift.
    • -
    -You can also receive gifts from your friends in the same way. You can check your gifts by tapping on the "Mailbox" icon at the top right corner of the screen.

    -

    Tip 3: Save your UC PUBG Mobile for special events or offers

    -

    If you want to get more bang for your buck, you should save your UC PUBG Mobile for special events or offers that happen regularly in PUBG Mobile. These events or offers can give you a chance to get rare or exclusive items, discounts, bonuses, or other benefits for spending your UC PUBG Mobile. For example, some of these events or offers are:

      -
    • Lucky Spin: A roulette-like game where you can spend UC PUBG Mobile to spin a wheel and get a random item. You can also get tokens that you can use to redeem other items in the lucky shop.
    • -
    • Lucky Crate: A loot box-like game where you can spend UC PUBG Mobile to open a crate and get a random item. You can also get coupons that you can use to get discounts on other crates.
    • -
    • Lucky Treasure: A treasure hunt-like game where you can spend UC PUBG Mobile to dig a spot and get a random item. You can also get keys that you can use to open other treasures.
    • -
    • Royale Pass Event: A seasonal event where you can spend UC PUBG Mobile to buy the royale pass and get exclusive rewards and missions. You can also get extra UC PUBG Mobile or items by completing the royale pass missions or reaching certain levels.
    • -
    • Anniversary Event: A yearly event where you can spend UC PUBG Mobile to join various activities and get anniversary-themed items, rewards, or surprises.
    • -
    -You can check the current or upcoming events or offers by tapping on the "Events" icon at the bottom right corner of the screen. You can also get notifications or messages about them in the game.

    -

    Conclusion

    -

    In conclusion, UC PUBG Mobile is a valuable currency that can help you enjoy PUBG Mobile more. You can download UC PUBG Mobile using different methods, such as buying it online, earning it by completing missions, events, and surveys, or getting it for free by participating in giveaways and promotions. You can then redeem your UC PUBG Mobile and use it to buy items and upgrades in the game. You can also save your UC PUBG Mobile for special events or offers that can give you more benefits. We hope this article has helped you learn how to download UC PUBG Mobile and why you should do it. Happy gaming!

    -

    FAQs

    -

    Q: How much does UC PUBG Mobile cost?

    -

    A: The price of UC PUBG Mobile may vary depending on the website, the region, and the exchange rate. However, here is a table that shows the approximate price of UC PUBG Mobile in US dollars:

    - - - - - - - - - -
    Amount of UC PUBG MobilePrice in USD
    60$0.99
    300 (+25)$4.99
    600 (+60)$9.99
    1500 (+300)$24.99
    3000 (+850)$49.99
    6000 (+2100)$99.99
    8100 (+4200)$129.99
    -

    Q: Is UC PUBG Mobile safe to use?

    -

    A: Yes, UC PUBG Mobile is safe to use as long as you buy it from trusted websites or platforms, redeem it in the official PUBG Mobile app or website, and use it in accordance with the game's terms and conditions. You should avoid any websites or apps that claim to give you free or unlimited UC PUBG Mobile, as they may be scams or viruses that can harm your device or account.

    -

    Q: Can I transfer UC PUBG Mobile to another account?

    -

    A: No, you cannot transfer UC PUBG Mobile to another account directly. However, you can share your UC PUBG Mobile or items with your friends by sending them gifts in the game. You can also buy gift cards or codes for UC PUBG Mobile and give them to your friends.

    -

    Q: Can I refund UC PUBG Mobile or items?

    -

    A: No, you cannot refund UC PUBG Mobile or items once you have purchased or redeemed them. However, you can contact the customer service of the website or platform where you bought UC PUBG Mobile if you have any issues or problems with your purchase.

    -

    Q: How can I contact the customer service of PUBG Mobile?

    -

    A: You can contact the customer service of PUBG Mobile by following these steps:

      -
    1. Tap on the "Settings" icon at the top right corner of the screen.
    2. -
    3. Select the "Customer Service" option and tap on the "Online Customer Service" button.
    4. -
    5. You will be redirected to a chat window where you can talk to a customer service representative.
    6. -
    -You can also email them at PUBGMOBILE_CS@tencentgames.com.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Extreme Car Driving Simulator The Best Open World Car Game for Your Laptop.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Extreme Car Driving Simulator The Best Open World Car Game for Your Laptop.md deleted file mode 100644 index b135428e242aa07e1a63d4df950cb53be22179e0..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Extreme Car Driving Simulator The Best Open World Car Game for Your Laptop.md +++ /dev/null @@ -1,94 +0,0 @@ - -

    Extreme Car Driving Simulator Download for Laptop

    -

    Do you love driving fast cars and performing amazing stunts? Do you want to experience the thrill of racing in a realistic open world environment? If you answered yes, then you should try Extreme Car Driving Simulator, one of the most popular car simulator games on Android. And the best part is, you can play it on your laptop too! In this article, we will show you how to download and play Extreme Car Driving Simulator on your laptop, and why it is better than playing on your mobile device. We will also share some tips and tricks to help you master the game and have more fun.

    -

    extreme car driving simulator download for laptop


    Download Filehttps://ssurll.com/2uNRvA



    -

    What is Extreme Car Driving Simulator?

    -

    Extreme Car Driving Simulator is a casual racing game developed by AxesInMotion Racing. It was released in 2014 and has over 100 million downloads on Google Play Store. The game lets you drive, drift, and feel a racing sports car in an open world city. You can be a furious racer or a casual driver, depending on your mood. You can also choose from different game modes, such as free roam, checkpoint, traffic, or mini-games. The game features realistic physics, stunning graphics, and a variety of cars to choose from.

    -

    Features of Extreme Car Driving Simulator

    -

    Some of the features that make Extreme Car Driving Simulator a great game are:

    -
      -
    • You can drive any car you want, from sports cars to SUVs, without any restrictions or rules.
    • -
    • You can explore a huge open world city with different environments, such as highways, airports, off-road areas, and more.
    • -
    • You can perform illegal stunts and drifts without worrying about the police or other racers.
    • -
    • You can customize your car with different colors, wheels, spoilers, and vinyls.
    • -
    • You can adjust the settings of your car, such as the engine power, brake force, steering sensitivity, and more.
    • -
    • You can enjoy realistic sound effects and music while driving.
    • -
    • You can compete with your friends and other players online through leaderboards and achievements.
    • -
    -

    How to download and play Extreme Car Driving Simulator on laptop

    -

    If you want to play Extreme Car Driving Simulator on your laptop, you have three options:

    -

    Option 1: Using BlueStacks emulator

    -

    BlueStacks is an Android emulator that allows you to run Android apps and games on your PC or Mac. It is the best platform to play Extreme Car Driving Simulator on your laptop because it offers many enhancements and features that improve your gaming experience. Here are the steps to download and play Extreme Car Driving Simulator using BlueStacks:

    -
      -
    1. Download and install BlueStacks on your laptop from [this link](^1^).
    2. -
    3. Complete Google sign-in to access the Play Store, or do it later.
    4. -
    5. Look for Extreme Car Driving Simulator in the search bar at the top right corner.
    6. -
    7. Click to install Extreme Car Driving Simulator from the search results.
    8. -
    9. Complete Google sign-in (if you skipped step 2) to install Extreme Car Driving Simulator.
    10. -
    11. Click the Extreme Car Driving Simulator icon on the home screen to start playing.
    12. -
    -

    Option 2: Using Microsoft Store app

    -

    If you have a Windows 10 laptop, you can also download Extreme Car Driving Simulator from the Microsoft Store app. This is a convenient way to get the game without using an emulator. Here are the steps to download and play Extreme Car Driving drive through checkpoints that give you coins and time bonuses. -

  11. Traffic: You can drive in a busy city with other cars and avoid crashing into them. You can also overtake them and drive in the opposite lane to earn more coins.
  12. -
  13. Checkpoint: You can race against the clock and reach the checkpoints before the time runs out. You can also collect coins along the way and use nitro to speed up.
  14. -
  15. Mini-Games: You can play different mini-games that test your skills and reflexes, such as parking, slalom, cone smash, jump, and soccer. You can also earn coins and trophies for completing them.
  16. - -

    You can use the coins you earn to buy new cars or upgrade your existing ones. You can also unlock new cars by completing certain achievements or watching ads.

    -

    How to download extreme car driving simulator on laptop
    -Extreme car driving simulator for laptop windows 10
    -Extreme car driving simulator free download for pc
    -Best car driving simulator games for laptop
    -Extreme car driving simulator online play on laptop
    -Extreme car driving simulator mod apk download for laptop
    -Extreme car driving simulator cheats and hacks for laptop
    -Extreme car driving simulator 2023 download for laptop
    -Extreme car driving simulator system requirements for laptop
    -Extreme car driving simulator tips and tricks for laptop
    -Extreme car driving simulator review and rating for laptop
    -Extreme car driving simulator offline mode for laptop
    -Extreme car driving simulator update and new features for laptop
    -Extreme car driving simulator multiplayer mode for laptop
    -Extreme car driving simulator download size and speed for laptop
    -Extreme car driving simulator best cars and customization for laptop
    -Extreme car driving simulator realistic physics and graphics for laptop
    -Extreme car driving simulator controller support for laptop
    -Extreme car driving simulator gameplay and video for laptop
    -Extreme car driving simulator download link and guide for laptop
    -Extreme car driving simulator alternatives and similar games for laptop
    -Extreme car driving simulator troubleshooting and error fix for laptop
    -Extreme car driving simulator keyboard and mouse controls for laptop
    -Extreme car driving simulator rewards and achievements for laptop
    -Extreme car driving simulator maps and locations for laptop
    -Extreme car driving simulator challenges and missions for laptop
    -Extreme car driving simulator fun and addictive game for laptop
    -Extreme car driving simulator comparison and difference with other games for laptop
    -Extreme car driving simulator pros and cons for laptop
    -Extreme car driving simulator feedback and suggestions for laptop
    -Extreme car driving simulator latest version and patch notes for laptop
    -Extreme car driving simulator minimum and recommended specs for laptop
    -Extreme car driving simulator sound effects and music for laptop
    -Extreme car driving simulator screenshots and wallpapers for laptop
    -Extreme car driving simulator developer and publisher info for laptop
    -Extreme car driving simulator genres and categories for laptop
    -Extreme car driving simulator release date and history for laptop
    -Extreme car driving simulator price and discount for laptop
    -Extreme car driving simulator installation and setup for laptop
    -Extreme car driving simulator FAQ and help center for laptop

    -

    Conclusion

    -

    Extreme Car Driving Simulator is a fun and exciting game that lets you drive any car you want in a realistic open world city. You can download and play it on your laptop using different methods, such as BlueStacks emulator, Microsoft Store app, or Windows PC app. Playing on your laptop can give you many advantages, such as a bigger screen, better controls, and a full HUD display. You can also customize your car and settings, perform stunts and drifts, and earn coins and unlock new cars. If you are looking for a casual racing game that offers you freedom and variety, you should try Extreme Car Driving Simulator today!

    -

    FAQs

    -

    Here are some frequently asked questions about Extreme Car Driving Simulator:

    -
      -
    • Q: Is Extreme Car Driving Simulator free to play?
    • -
    • A: Yes, Extreme Car Driving Simulator is free to play on Android devices. However, it contains ads and in-app purchases that can enhance your gameplay or remove ads.
    • -
    • Q: Is Extreme Car Driving Simulator safe to download?
    • -
    • A: Yes, Extreme Car Driving Simulator is safe to download from Google Play Store, Microsoft Store, or the official website of AxesInMotion Racing. It does not contain any viruses or malware that can harm your device.
    • -
    • Q: How can I save my progress in Extreme Car Driving Simulator?
    • -
    • A: Extreme Car Driving Simulator automatically saves your progress in the cloud when you are connected to the internet. You can also sync your progress across different devices by signing in with your Google Play Games account.
    • -
    • Q: How can I contact the developers of Extreme Car Driving Simulator?
    • -
    • A: You can contact the developers of Extreme Car Driving Simulator by sending an email to support@axesinmotion.com or by visiting their website at [this link]. You can also follow them on Facebook, Twitter, Instagram, or YouTube for updates and news.
    • -
    • Q: How can I get more coins in Extreme Car Driving Simulator?
    • -
    • A: You can get more coins in Extreme Car Driving Simulator by completing different challenges and objectives in the game, such as free roam, traffic, checkpoint, or mini-games. You can also watch ads or buy coins with real money.
    • -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Garena Injector Apk Unlock All Premium Features in Free Fire.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Garena Injector Apk Unlock All Premium Features in Free Fire.md deleted file mode 100644 index 2a6f7a639d424f6f957f8cb4094e929c494e3453..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Garena Injector Apk Unlock All Premium Features in Free Fire.md +++ /dev/null @@ -1,158 +0,0 @@ -
    -

    Garena Injector APK: A Guide for Free Fire Players

    -

    If you are a fan of Free Fire, a popular survival shooter game on mobile, you might have heard of Garena Injector APK. This is an app that allows you to unlock premium skins, items, and mods for Free Fire without spending any money. But what exactly is Garena Injector APK, and how can you use it safely and effectively? In this article, we will answer these questions and more. We will also provide you with the download link, installation guide, alternatives, risks, reviews, and FAQs of Garena Injector APK.

    -

    garena injector apk


    Download ---> https://ssurll.com/2uNTHz



    -

    What is Garena Injector APK?

    -

    Garena Injector APK is an app developed by Garena Mod Menu, a team of modders and hackers who create tools for Free Fire players. This app works as an injector that injects skins, items, and mods into the game files of Free Fire. By using this app, you can customize your characters and get other advantages in the game, such as:

    -

    Features of Garena Injector APK

    -
      -
    • Skins: You can unlock and use any skin for your characters, weapons, vehicles, backpacks, and parachutes. There are hundreds of skins available in the app, including rare and exclusive ones.
    • -
    • No Recoil: You can reduce or eliminate the recoil of your weapons, making them more accurate and stable.
    • -
    • Drone View: You can increase or decrease the camera view distance, giving you a better perspective of the map and your enemies.
    • -
    • Maps: You can unlock and use any map in the game, including the classic maps and the new ones.
    • -
    • Speed: You can increase or decrease your movement speed, making you faster or slower than other players.
    • -
    • Wallhack: You can see through walls and other obstacles, making it easier to spot and shoot your enemies.
    • -
    • All ESPs: You can enable various ESPs (extra sensory perception) that show you information about your enemies, such as their name, health, distance, location, weapon, etc.
    • -
    • And many more: You can also use other features such as aimbot, telekill, grass removal, fake name, medkit running, unlimited gold, etc.
    • -
    -

    Benefits of Garena Injector APK

    -

    By using Garena Injector APK, you can enjoy several benefits such as:

    -
      -
    • Saving money: You don't have to spend any real money to buy diamonds or coins in Free Fire. You can get them for free with Garena Injector APK.
    • -
    • Enhancing gaming skills: You can improve your gaming skills by using the features of Garena Injector APK. You can practice with different weapons, maps, modes, and strategies.
    • -
    • Having fun: You can have more fun by Continuing the article: exploring different skins, items, and mods in the game. You can also prank your friends and enemies with funny features.
    • -
    • Competing with others: You can challenge other players and show off your skills and achievements in the game. You can also join tournaments and events and win prizes.
    • -
    -

    How to Download and Install Garena Injector APK?

    -

    If you want to download and install Garena Injector APK on your Android device, you need to follow these steps:

    -

    Requirements for Garena Injector APK

    -
      -
    • Android version: You need to have Android 4.4 or higher on your device.
    • -
    • Free space: You need to have at least 50 MB of free space on your device.
    • -
    • Free Fire version: You need to have the latest version of Free Fire installed on your device.
    • -
    • Unknown sources: You need to enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.
    • -
    -

    Steps to Download and Install Garena Injector APK

    -
      -
    1. Download the APK file: You can download the APK file of Garena Injector from this link: Garena Injector APK Download. This is a trusted and verified source that provides the latest and updated version of the app.
    2. -
    3. Locate the APK file: After downloading the APK file, you need to locate it on your device. You can use a file manager app or go to the Downloads folder on your device.
    4. -
    5. Install the APK file: Once you locate the APK file, you need to tap on it and follow the instructions on the screen. It will take a few seconds to install the app on your device.
    6. -
    7. Launch the app: After installing the app, you can launch it from your app drawer or home screen. You will see the icon of Garena Injector on your device.
    8. -
    -

    How to Use Garena Injector APK?

    -

    To use Garena Injector APK, you need to follow these steps:

    -

    How to Inject Skins and Other Items

    -
      -
    1. Open the app: Launch Garena Injector from your device and grant any permissions it asks for.
    2. -
    3. Select a category: On the main screen of the app, you will see different categories of items, such as Skins, No Recoil, Drone View, etc. Tap on the category that you want to inject into Free Fire.
    4. -
    5. Select an item: On the next screen, you will see a list of items that belong to that category. For example, if you select Skins, you will see skins for characters, weapons, vehicles, etc. Tap on the item that you want to inject into Free Fire.
    6. -
    7. Select a mode: On the final screen, you will see two modes: Online and Offline. Online mode means that you can use the item in any mode of Free Fire, such as Classic, Ranked, Clash Squad, etc. Offline mode means that you can use the item only in Training mode or Custom mode. Tap on the mode that you prefer.
    8. -
    9. Inject the item: After selecting a mode, tap on the Inject button at the bottom of the screen. It will take a few seconds to inject the item into Free Fire. You will see a confirmation message when it is done.
    10. -
    11. Enjoy the item: Now you can open Free Fire and enjoy using the item that you injected. You will see it in your inventory or loadout. You can also change or remove it anytime from Garena Injector.
    12. -
    -

    How to Enable and Disable Mods

    -
      -
    1. Open the app: Launch Garena Injector from your device and grant any permissions it asks for.
    2. -
    3. Select Mods: On the main screen of the app, tap on Mods at Continuing the article: the old version of the app from your device. To do this, go to Settings > Apps > Garena Injector and tap on Uninstall.
    4. -
    5. Download the Latest Version: You can download the latest version of Garena Injector from this link: Garena Injector APK Download. This is a trusted and verified source that provides the latest and updated version of the app.
    6. -
    7. Install the Latest Version: After downloading the latest version of the app, you need to install it on your device. To do this, follow the same steps as mentioned above in the installation guide.
    8. -
    9. Enjoy the Latest Version: After installing the latest version of the app, you can enjoy using the new features and improvements of Garena Injector.
    10. -
    -

    How to Uninstall Garena Injector APK?

    -

    If you want to uninstall Garena Injector APK from your device, you can follow these steps:

    -

    garena injector free fire apk download
    -garena injector mod menu apk
    -garena injector ff skins unlocker apk
    -garena injector no recoil apk
    -garena injector drone view apk
    -garena injector maps hack apk
    -garena injector speed hack apk
    -garena injector wallhack apk
    -garena injector all esps apk
    -garena injector premium items apk
    -garena injector latest version apk
    -garena injector android apk
    -garena injector safe apk
    -garena injector anti-ban apk
    -garena injector online apk
    -garena injector offline apk
    -garena injector update apk
    -garena injector 2023 apk
    -garena injector 1.92 apk
    -garena injector for free fire apk
    -garena injector for ff apk
    -garena injector for android apk
    -garena injector for ios apk
    -garena injector for pc apk
    -garena injector for windows apk
    -garena injector for mac apk
    -garena injector for linux apk
    -garena injector free download apk
    -garena injector direct download apk
    -garena injector fast download apk
    -garena injector easy download apk
    -garena injector secure download apk
    -garena injector virus-free download apk
    -garena injector unlimited download apk
    -garena injector full version download apk
    -how to use garena injector apk
    -how to install garena injector apk
    -how to download garena injector apk
    -how to update garena injector apk
    -how to uninstall garena injector apk
    -how to get free skins with garena injector apk
    -how to get no recoil with garena injector apk
    -how to get drone view with garena injector apk
    -how to get maps hack with garena injector apk
    -how to get speed hack with garena injector apk
    -how to get wallhack with garena injector apk
    -how to get all esps with garena injector apk
    -how to get premium items with garena injector apk

    -

    Uninstall from Settings or Play Store

    -
      -
    1. Go to Settings or Play Store: On your device, go to Settings > Apps or Play Store > My Apps and Games.
    2. -
    3. Select Garena Injector: On the list of apps, find and select Garena Injector. You will see its details and options.
    4. -
    5. Select Uninstall: On the details or options screen, tap on Uninstall. You will see a confirmation message asking if you want to uninstall the app.
    6. -
    7. Confirm Uninstall: Tap on OK or Yes to confirm that you want to uninstall the app. It will take a few seconds to uninstall the app from your device.
    8. -
    9. Delete the APK File: If you still have the APK file of Garena Injector on your device, you can delete it as well. To do this, go to your file manager app or Downloads folder and find and delete the APK file.
    10. -
    -

    Uninstall with Android Studio or ADB

    -
      -
    1. Download and Install Android Studio or ADB: On your computer, download and install Android Studio or ADB (Android Debug Bridge). These are tools that allow you to communicate with your Android device via USB cable.
    2. -
    3. Connect Your Device to Your Computer: On your device, enable USB debugging mode by going to Settings > Developer Options > USB Debugging and toggling it on. Then, connect your device to your computer using a USB cable.
    4. -
    5. Open Android Studio or ADB: On your computer, open Android Studio or ADB and wait for it to recognize your device.
    6. -
    7. Type the Command: On Android Studio or ADB, type the following command: adb uninstall com.garenamodmenu.injector. This is the package name of Garena Injector. Press Enter to execute the command.
    8. -
    9. Confirm Uninstall: You will see a message saying "Success" if the command worked. This means that Garena Injector has been uninstalled from your device.
    10. -
    -

    Reviews of Garena Injector APK

    -

    Garena Injector APK has received mixed reviews from users who have tried it. Here are some examples of positive and negative reviews:

    -

    Positive Reviews

    -
      -
    • "I love this app. It works perfectly on my device. I can use any skin I want in Free Fire. It also has many other features that make the game more fun and easy. I recommend this app to anyone who plays Free Fire."
    • -
    • "This is the best injector app for Free Fire. It has everything I need to enjoy the game. It has skins, mods, hacks, and more. It is also very easy to use and update. I have never faced any problem with this app."
    • -
    • "This app is amazing. It gives me free diamonds and coins in Free Fire. I can buy anything I want in the game without spending any money. It also has no recoil, wallhack, speed, and other mods that help me win every match."
    • -
    -

    Negative Reviews

    -
      -
    • "This app is a scam. It does not work on my device. It always crashes or freezes when I try to inject something into Free Fire. It also has malware and viruses that damage my device and steal my data."
    • -
    • "This app is a waste of time. It does not inject anything into Free Fire. It only shows ads and redirects me to fake websites that ask me to download more apps or complete surveys. It also makes my device slow and laggy."
    • -
    • "This app is Continuing the article: a risk. It got me banned from Free Fire. I lost my account and progress in the game. It also exposed me to other players who reported me for cheating. It is not worth it."
    • -
    -

    Conclusion

    -

    Garena Injector APK is an app that allows you to inject skins, items, and mods into Free Fire. It can provide you with many benefits and features, such as free diamonds, coins, skins, no recoil, wallhack, speed, etc. However, it also comes with some risks, such as getting banned from Free Fire, getting malware or viruses on your device, or getting scammed by fake websites. Therefore, you should use Garena Injector APK at your own risk and discretion. You should also follow the download, installation, update, and uninstall guide that we provided in this article.

    -

    FAQs

    -

    Here are some frequently asked questions about Garena Injector APK:

    -
      -
    1. Is Garena Injector APK safe?
      -Garena Injector APK is not an official app from Garena or Free Fire. It is a modded app that comes from an unknown source. Therefore, it is not safe to use. It might contain malware or viruses that can harm your device or steal your data. It might also get you banned from Free Fire or expose you to other players who report you for cheating.
    2. -
    3. Is Garena Injector APK legal?
      -Garena Injector APK is not legal to use. It violates the terms and conditions of Free Fire, which prohibit the use of any third-party apps or tools that modify or interfere with the game. If you use Garena Injector APK, you are breaking the rules and laws of Free Fire and your country.
    4. -
    5. Does Garena Injector APK work on iOS devices?
      -Garena Injector APK does not work on iOS devices. It is only compatible with Android devices that have Android 4.4 or higher. If you have an iOS device, you cannot use Garena Injector APK.
    6. -
    7. How can I contact the developers of Garena Injector APK?
      -You can contact the developers of Garena Injector APK by visiting their official website: Garena Mod Menu Website. There, you can find their email address, social media accounts, and other information.
    8. -
    9. Where can I find more information about Garena Injector APK?
      -You can find more information about Garena Injector APK by visiting their official website: Garena Mod Menu Website. There, you can find their latest news, updates, features, tutorials, and more.
    10. -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/sklkd93/CodeFormer/CodeFormer/basicsr/data/data_util.py b/spaces/sklkd93/CodeFormer/CodeFormer/basicsr/data/data_util.py deleted file mode 100644 index 63b1bce8e089485182c962e830a163d6d0059da8..0000000000000000000000000000000000000000 --- a/spaces/sklkd93/CodeFormer/CodeFormer/basicsr/data/data_util.py +++ /dev/null @@ -1,305 +0,0 @@ -import cv2 -import numpy as np -import torch -from os import path as osp -from torch.nn import functional as F - -from basicsr.data.transforms import mod_crop -from basicsr.utils import img2tensor, scandir - - -def read_img_seq(path, require_mod_crop=False, scale=1): - """Read a sequence of images from a given folder path. - - Args: - path (list[str] | str): List of image paths or image folder path. - require_mod_crop (bool): Require mod crop for each image. - Default: False. - scale (int): Scale factor for mod_crop. Default: 1. - - Returns: - Tensor: size (t, c, h, w), RGB, [0, 1]. - """ - if isinstance(path, list): - img_paths = path - else: - img_paths = sorted(list(scandir(path, full_path=True))) - imgs = [cv2.imread(v).astype(np.float32) / 255. for v in img_paths] - if require_mod_crop: - imgs = [mod_crop(img, scale) for img in imgs] - imgs = img2tensor(imgs, bgr2rgb=True, float32=True) - imgs = torch.stack(imgs, dim=0) - return imgs - - -def generate_frame_indices(crt_idx, max_frame_num, num_frames, padding='reflection'): - """Generate an index list for reading `num_frames` frames from a sequence - of images. - - Args: - crt_idx (int): Current center index. - max_frame_num (int): Max number of the sequence of images (from 1). - num_frames (int): Reading num_frames frames. - padding (str): Padding mode, one of - 'replicate' | 'reflection' | 'reflection_circle' | 'circle' - Examples: current_idx = 0, num_frames = 5 - The generated frame indices under different padding mode: - replicate: [0, 0, 0, 1, 2] - reflection: [2, 1, 0, 1, 2] - reflection_circle: [4, 3, 0, 1, 2] - circle: [3, 4, 0, 1, 2] - - Returns: - list[int]: A list of indices. - """ - assert num_frames % 2 == 1, 'num_frames should be an odd number.' - assert padding in ('replicate', 'reflection', 'reflection_circle', 'circle'), f'Wrong padding mode: {padding}.' - - max_frame_num = max_frame_num - 1 # start from 0 - num_pad = num_frames // 2 - - indices = [] - for i in range(crt_idx - num_pad, crt_idx + num_pad + 1): - if i < 0: - if padding == 'replicate': - pad_idx = 0 - elif padding == 'reflection': - pad_idx = -i - elif padding == 'reflection_circle': - pad_idx = crt_idx + num_pad - i - else: - pad_idx = num_frames + i - elif i > max_frame_num: - if padding == 'replicate': - pad_idx = max_frame_num - elif padding == 'reflection': - pad_idx = max_frame_num * 2 - i - elif padding == 'reflection_circle': - pad_idx = (crt_idx - num_pad) - (i - max_frame_num) - else: - pad_idx = i - num_frames - else: - pad_idx = i - indices.append(pad_idx) - return indices - - -def paired_paths_from_lmdb(folders, keys): - """Generate paired paths from lmdb files. - - Contents of lmdb. Taking the `lq.lmdb` for example, the file structure is: - - lq.lmdb - ├── data.mdb - ├── lock.mdb - ├── meta_info.txt - - The data.mdb and lock.mdb are standard lmdb files and you can refer to - https://lmdb.readthedocs.io/en/release/ for more details. - - The meta_info.txt is a specified txt file to record the meta information - of our datasets. It will be automatically created when preparing - datasets by our provided dataset tools. - Each line in the txt file records - 1)image name (with extension), - 2)image shape, - 3)compression level, separated by a white space. - Example: `baboon.png (120,125,3) 1` - - We use the image name without extension as the lmdb key. - Note that we use the same key for the corresponding lq and gt images. - - Args: - folders (list[str]): A list of folder path. The order of list should - be [input_folder, gt_folder]. - keys (list[str]): A list of keys identifying folders. The order should - be in consistent with folders, e.g., ['lq', 'gt']. - Note that this key is different from lmdb keys. - - Returns: - list[str]: Returned path list. - """ - assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. ' - f'But got {len(folders)}') - assert len(keys) == 2, ('The len of keys should be 2 with [input_key, gt_key]. ' f'But got {len(keys)}') - input_folder, gt_folder = folders - input_key, gt_key = keys - - if not (input_folder.endswith('.lmdb') and gt_folder.endswith('.lmdb')): - raise ValueError(f'{input_key} folder and {gt_key} folder should both in lmdb ' - f'formats. But received {input_key}: {input_folder}; ' - f'{gt_key}: {gt_folder}') - # ensure that the two meta_info files are the same - with open(osp.join(input_folder, 'meta_info.txt')) as fin: - input_lmdb_keys = [line.split('.')[0] for line in fin] - with open(osp.join(gt_folder, 'meta_info.txt')) as fin: - gt_lmdb_keys = [line.split('.')[0] for line in fin] - if set(input_lmdb_keys) != set(gt_lmdb_keys): - raise ValueError(f'Keys in {input_key}_folder and {gt_key}_folder are different.') - else: - paths = [] - for lmdb_key in sorted(input_lmdb_keys): - paths.append(dict([(f'{input_key}_path', lmdb_key), (f'{gt_key}_path', lmdb_key)])) - return paths - - -def paired_paths_from_meta_info_file(folders, keys, meta_info_file, filename_tmpl): - """Generate paired paths from an meta information file. - - Each line in the meta information file contains the image names and - image shape (usually for gt), separated by a white space. - - Example of an meta information file: - ``` - 0001_s001.png (480,480,3) - 0001_s002.png (480,480,3) - ``` - - Args: - folders (list[str]): A list of folder path. The order of list should - be [input_folder, gt_folder]. - keys (list[str]): A list of keys identifying folders. The order should - be in consistent with folders, e.g., ['lq', 'gt']. - meta_info_file (str): Path to the meta information file. - filename_tmpl (str): Template for each filename. Note that the - template excludes the file extension. Usually the filename_tmpl is - for files in the input folder. - - Returns: - list[str]: Returned path list. - """ - assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. ' - f'But got {len(folders)}') - assert len(keys) == 2, ('The len of keys should be 2 with [input_key, gt_key]. ' f'But got {len(keys)}') - input_folder, gt_folder = folders - input_key, gt_key = keys - - with open(meta_info_file, 'r') as fin: - gt_names = [line.split(' ')[0] for line in fin] - - paths = [] - for gt_name in gt_names: - basename, ext = osp.splitext(osp.basename(gt_name)) - input_name = f'{filename_tmpl.format(basename)}{ext}' - input_path = osp.join(input_folder, input_name) - gt_path = osp.join(gt_folder, gt_name) - paths.append(dict([(f'{input_key}_path', input_path), (f'{gt_key}_path', gt_path)])) - return paths - - -def paired_paths_from_folder(folders, keys, filename_tmpl): - """Generate paired paths from folders. - - Args: - folders (list[str]): A list of folder path. The order of list should - be [input_folder, gt_folder]. - keys (list[str]): A list of keys identifying folders. The order should - be in consistent with folders, e.g., ['lq', 'gt']. - filename_tmpl (str): Template for each filename. Note that the - template excludes the file extension. Usually the filename_tmpl is - for files in the input folder. - - Returns: - list[str]: Returned path list. - """ - assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. ' - f'But got {len(folders)}') - assert len(keys) == 2, ('The len of keys should be 2 with [input_key, gt_key]. ' f'But got {len(keys)}') - input_folder, gt_folder = folders - input_key, gt_key = keys - - input_paths = list(scandir(input_folder)) - gt_paths = list(scandir(gt_folder)) - assert len(input_paths) == len(gt_paths), (f'{input_key} and {gt_key} datasets have different number of images: ' - f'{len(input_paths)}, {len(gt_paths)}.') - paths = [] - for gt_path in gt_paths: - basename, ext = osp.splitext(osp.basename(gt_path)) - input_name = f'{filename_tmpl.format(basename)}{ext}' - input_path = osp.join(input_folder, input_name) - assert input_name in input_paths, (f'{input_name} is not in ' f'{input_key}_paths.') - gt_path = osp.join(gt_folder, gt_path) - paths.append(dict([(f'{input_key}_path', input_path), (f'{gt_key}_path', gt_path)])) - return paths - - -def paths_from_folder(folder): - """Generate paths from folder. - - Args: - folder (str): Folder path. - - Returns: - list[str]: Returned path list. - """ - - paths = list(scandir(folder)) - paths = [osp.join(folder, path) for path in paths] - return paths - - -def paths_from_lmdb(folder): - """Generate paths from lmdb. - - Args: - folder (str): Folder path. - - Returns: - list[str]: Returned path list. - """ - if not folder.endswith('.lmdb'): - raise ValueError(f'Folder {folder}folder should in lmdb format.') - with open(osp.join(folder, 'meta_info.txt')) as fin: - paths = [line.split('.')[0] for line in fin] - return paths - - -def generate_gaussian_kernel(kernel_size=13, sigma=1.6): - """Generate Gaussian kernel used in `duf_downsample`. - - Args: - kernel_size (int): Kernel size. Default: 13. - sigma (float): Sigma of the Gaussian kernel. Default: 1.6. - - Returns: - np.array: The Gaussian kernel. - """ - from scipy.ndimage import filters as filters - kernel = np.zeros((kernel_size, kernel_size)) - # set element at the middle to one, a dirac delta - kernel[kernel_size // 2, kernel_size // 2] = 1 - # gaussian-smooth the dirac, resulting in a gaussian filter - return filters.gaussian_filter(kernel, sigma) - - -def duf_downsample(x, kernel_size=13, scale=4): - """Downsamping with Gaussian kernel used in the DUF official code. - - Args: - x (Tensor): Frames to be downsampled, with shape (b, t, c, h, w). - kernel_size (int): Kernel size. Default: 13. - scale (int): Downsampling factor. Supported scale: (2, 3, 4). - Default: 4. - - Returns: - Tensor: DUF downsampled frames. - """ - assert scale in (2, 3, 4), f'Only support scale (2, 3, 4), but got {scale}.' - - squeeze_flag = False - if x.ndim == 4: - squeeze_flag = True - x = x.unsqueeze(0) - b, t, c, h, w = x.size() - x = x.view(-1, 1, h, w) - pad_w, pad_h = kernel_size // 2 + scale * 2, kernel_size // 2 + scale * 2 - x = F.pad(x, (pad_w, pad_w, pad_h, pad_h), 'reflect') - - gaussian_filter = generate_gaussian_kernel(kernel_size, 0.4 * scale) - gaussian_filter = torch.from_numpy(gaussian_filter).type_as(x).unsqueeze(0).unsqueeze(0) - x = F.conv2d(x, gaussian_filter, stride=scale) - x = x[:, :, 2:-2, 2:-2] - x = x.view(b, t, c, x.size(2), x.size(3)) - if squeeze_flag: - x = x.squeeze(0) - return x diff --git a/spaces/sklkd93/CodeFormer/CodeFormer/scripts/crop_align_face.py b/spaces/sklkd93/CodeFormer/CodeFormer/scripts/crop_align_face.py deleted file mode 100644 index 31e66266ac0e5f818fa18b6409993151086bbc8b..0000000000000000000000000000000000000000 --- a/spaces/sklkd93/CodeFormer/CodeFormer/scripts/crop_align_face.py +++ /dev/null @@ -1,192 +0,0 @@ -""" -brief: face alignment with FFHQ method (https://github.com/NVlabs/ffhq-dataset) -author: lzhbrian (https://lzhbrian.me) -link: https://gist.github.com/lzhbrian/bde87ab23b499dd02ba4f588258f57d5 -date: 2020.1.5 -note: code is heavily borrowed from - https://github.com/NVlabs/ffhq-dataset - http://dlib.net/face_landmark_detection.py.html -requirements: - conda install Pillow numpy scipy - conda install -c conda-forge dlib - # download face landmark model from: - # http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2 -""" - -import cv2 -import dlib -import glob -import numpy as np -import os -import PIL -import PIL.Image -import scipy -import scipy.ndimage -import sys -import argparse - -# download model from: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2 -predictor = dlib.shape_predictor('weights/dlib/shape_predictor_68_face_landmarks-fbdc2cb8.dat') - - -def get_landmark(filepath, only_keep_largest=True): - """get landmark with dlib - :return: np.array shape=(68, 2) - """ - detector = dlib.get_frontal_face_detector() - - img = dlib.load_rgb_image(filepath) - dets = detector(img, 1) - - # Shangchen modified - print("Number of faces detected: {}".format(len(dets))) - if only_keep_largest: - print('Detect several faces and only keep the largest.') - face_areas = [] - for k, d in enumerate(dets): - face_area = (d.right() - d.left()) * (d.bottom() - d.top()) - face_areas.append(face_area) - - largest_idx = face_areas.index(max(face_areas)) - d = dets[largest_idx] - shape = predictor(img, d) - print("Part 0: {}, Part 1: {} ...".format( - shape.part(0), shape.part(1))) - else: - for k, d in enumerate(dets): - print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format( - k, d.left(), d.top(), d.right(), d.bottom())) - # Get the landmarks/parts for the face in box d. - shape = predictor(img, d) - print("Part 0: {}, Part 1: {} ...".format( - shape.part(0), shape.part(1))) - - t = list(shape.parts()) - a = [] - for tt in t: - a.append([tt.x, tt.y]) - lm = np.array(a) - # lm is a shape=(68,2) np.array - return lm - -def align_face(filepath, out_path): - """ - :param filepath: str - :return: PIL Image - """ - try: - lm = get_landmark(filepath) - except: - print('No landmark ...') - return - - lm_chin = lm[0:17] # left-right - lm_eyebrow_left = lm[17:22] # left-right - lm_eyebrow_right = lm[22:27] # left-right - lm_nose = lm[27:31] # top-down - lm_nostrils = lm[31:36] # top-down - lm_eye_left = lm[36:42] # left-clockwise - lm_eye_right = lm[42:48] # left-clockwise - lm_mouth_outer = lm[48:60] # left-clockwise - lm_mouth_inner = lm[60:68] # left-clockwise - - # Calculate auxiliary vectors. - eye_left = np.mean(lm_eye_left, axis=0) - eye_right = np.mean(lm_eye_right, axis=0) - eye_avg = (eye_left + eye_right) * 0.5 - eye_to_eye = eye_right - eye_left - mouth_left = lm_mouth_outer[0] - mouth_right = lm_mouth_outer[6] - mouth_avg = (mouth_left + mouth_right) * 0.5 - eye_to_mouth = mouth_avg - eye_avg - - # Choose oriented crop rectangle. - x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1] - x /= np.hypot(*x) - x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8) - y = np.flipud(x) * [-1, 1] - c = eye_avg + eye_to_mouth * 0.1 - quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) - qsize = np.hypot(*x) * 2 - - # read image - img = PIL.Image.open(filepath) - - output_size = 512 - transform_size = 4096 - enable_padding = False - - # Shrink. - shrink = int(np.floor(qsize / output_size * 0.5)) - if shrink > 1: - rsize = (int(np.rint(float(img.size[0]) / shrink)), - int(np.rint(float(img.size[1]) / shrink))) - img = img.resize(rsize, PIL.Image.ANTIALIAS) - quad /= shrink - qsize /= shrink - - # Crop. - border = max(int(np.rint(qsize * 0.1)), 3) - crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), - int(np.ceil(max(quad[:, 0]))), int(np.ceil(max(quad[:, 1])))) - crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), - min(crop[2] + border, - img.size[0]), min(crop[3] + border, img.size[1])) - if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]: - img = img.crop(crop) - quad -= crop[0:2] - - # Pad. - pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), - int(np.ceil(max(quad[:, 0]))), int(np.ceil(max(quad[:, 1])))) - pad = (max(-pad[0] + border, - 0), max(-pad[1] + border, - 0), max(pad[2] - img.size[0] + border, - 0), max(pad[3] - img.size[1] + border, 0)) - if enable_padding and max(pad) > border - 4: - pad = np.maximum(pad, int(np.rint(qsize * 0.3))) - img = np.pad( - np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), - 'reflect') - h, w, _ = img.shape - y, x, _ = np.ogrid[:h, :w, :1] - mask = np.maximum( - 1.0 - - np.minimum(np.float32(x) / pad[0], - np.float32(w - 1 - x) / pad[2]), 1.0 - - np.minimum(np.float32(y) / pad[1], - np.float32(h - 1 - y) / pad[3])) - blur = qsize * 0.02 - img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0) - img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0) - img = PIL.Image.fromarray( - np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB') - quad += pad[:2] - - img = img.transform((transform_size, transform_size), PIL.Image.QUAD, - (quad + 0.5).flatten(), PIL.Image.BILINEAR) - - if output_size < transform_size: - img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS) - - # Save aligned image. - print('saveing: ', out_path) - img.save(out_path) - - return img, np.max(quad[:, 0]) - np.min(quad[:, 0]) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--in_dir', type=str, default='./inputs/whole_imgs') - parser.add_argument('--out_dir', type=str, default='./inputs/cropped_faces') - args = parser.parse_args() - - img_list = sorted(glob.glob(f'{args.in_dir}/*.png')) - img_list = sorted(img_list) - - for in_path in img_list: - out_path = os.path.join(args.out_dir, in_path.split("/")[-1]) - out_path = out_path.replace('.jpg', '.png') - size_ = align_face(in_path, out_path) \ No newline at end of file diff --git a/spaces/smjain/smjainvoice/starganv2vc_paddle/Utils/ASR/__init__.py b/spaces/smjain/smjainvoice/starganv2vc_paddle/Utils/ASR/__init__.py deleted file mode 100644 index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000 --- a/spaces/smjain/smjainvoice/starganv2vc_paddle/Utils/ASR/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/spaces/society-ethics/StableBiasGen/app.py b/spaces/society-ethics/StableBiasGen/app.py deleted file mode 100644 index aaf70fafbfe12bd4bfb9794587d3199660ecca37..0000000000000000000000000000000000000000 --- a/spaces/society-ethics/StableBiasGen/app.py +++ /dev/null @@ -1,87 +0,0 @@ -import gradio as gr -import random, os, shutil -from PIL import Image -from diffusers import DiffusionPipeline -import pandas as pd -import torch -import tempfile - -from huggingface_hub.utils import build_hf_headers, paginate -from huggingface_hub import upload_folder - -headers = build_hf_headers() -params = {"pipeline_tag": "text-to-image", "sort" : "downloads", "direction": -1} - -#models (can make this list static?) -models = list(paginate("https://huggingface.co/api/models", params=params, headers=headers)) -model_ids = [m['id'] for m in models if m['id'] != 'stabilityai/sd-vae-ft-mse'][:20] -short_list = model_ids[:1] -for m in short_list: - DiffusionPipeline.from_pretrained(m, torch_dtype=torch.float16) - -#prompts -prompts = pd.read_csv('promptsadjectives.csv') -m_adjectives = prompts['Masc-adj'].tolist()[:10] -f_adjectives = prompts['Fem-adj'].tolist()[:10] -adjectives = sorted(m_adjectives+f_adjectives) -adjectives.insert(0, '') -professions = sorted([p.lower() for p in prompts['Occupation-Noun'].tolist()]) - -vowels = ["a","e","i","o","u"] - -def query_tti_model1 (modelid, adj, noun): - if adj[0] in vowels: - promptstring = ('Photo portrait of an ' + adj + ' '+ noun).strip() - else: - promptstring = ('Photo portrait of a '+ adj + ' '+ noun).strip() - pipe1 = DiffusionPipeline.from_pretrained(modelid, torch_dtype=torch.float16) - pipe1 = pipe1.to("cuda") - images = pipe1(prompt= promptstring, num_images_per_prompt=9).images - with tempfile.TemporaryDirectory() as tmpdirname: - for i, image in enumerate(images): - image.save(f"{tmpdirname}/{i}.jpg") - upload_folder( - folder_path=tmpdirname, - path_in_repo=f"images/{modelid.replace('/', '-')}/{adj or 'no_adjective'}/{noun}".replace(" ", "_"), - repo_id="society-ethics/stable-bias-generations", - repo_type="dataset", - token=os.getenv("token") - ) - return images - -def query_tti_model2(modelid, adj, noun): - if adj[0] in vowels: - promptstring = ('Photo portrait of an '+ adj + ' '+ noun).strip() - else: - promptstring = ('Photo portrait of a ' + adj + ' '+ noun).strip() - promptstring = (str(adj) + ' '+ noun).strip() - pipe2 = DiffusionPipeline.from_pretrained(modelid, torch_dtype=torch.float16) - pipe2 = pipe2.to("cuda") - images = pipe2(prompt= promptstring, num_images_per_prompt=9) - return images.images - -with gr.Blocks() as demo: - gr.Markdown("# Stable Bias Generator") - gr.Markdown("## Choose from the prompts below to explore how the text-to-image models represent different identity groups.") - gr.Markdown("### See the Stable Bias [Space](https://huggingface.co/spaces/society-ethics/StableBias) for more information about our approach.") - - with gr.Row(): - with gr.Column(): - model1 = gr.Dropdown(model_ids, label = "Choose a model to compare results", value = models[0], interactive=True) - adj1 = gr.Dropdown(adjectives, label = "Choose a first adjective (or leave this blank!)", interactive=True) - choice1 = gr.Dropdown(professions, label = "Choose a first group", interactive=True) - button1 = gr.Button(value="Get my images!") - images1 = gr.Gallery(label="Images").style(grid=[3], height="auto") - with gr.Column(): - model2 = gr.Dropdown(model_ids, label = "Choose a model to compare results", value = models[1], interactive=True) - adj2 = gr.Dropdown(adjectives, label = "Choose a second adjective (or leave this blank!)", interactive=True) - choice2 = gr.Dropdown(professions, label = "Choose a second group", interactive=True) - button2 = gr.Button(value="Get my images!") - images2 = gr.Gallery(label="Images").style(grid=[3], height="auto") - - button1.click(query_tti_model1, [model1, adj1,choice1], [images1]) - button2.click(query_tti_model2, [model2, adj2,choice2], [images2]) - #adj1.change(query_tti_model1, [model1, adj1, choice1], [images1]) - #adj2.change(query_tti_model2, [model2, adj2, choice2], [images2]) - -demo.launch() diff --git a/spaces/sohojoe/project_charles/sample_utils/__init__.py b/spaces/sohojoe/project_charles/sample_utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/songwy/VITS-Umamusume-voice-synthesizer/text/shanghainese.py b/spaces/songwy/VITS-Umamusume-voice-synthesizer/text/shanghainese.py deleted file mode 100644 index cb29c24a08d2e406e8399cf7bc9fe5cb43cb9c61..0000000000000000000000000000000000000000 --- a/spaces/songwy/VITS-Umamusume-voice-synthesizer/text/shanghainese.py +++ /dev/null @@ -1,64 +0,0 @@ -import re -import cn2an -import opencc - - -converter = opencc.OpenCC('zaonhe') - -# List of (Latin alphabet, ipa) pairs: -_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('A', 'ᴇ'), - ('B', 'bi'), - ('C', 'si'), - ('D', 'di'), - ('E', 'i'), - ('F', 'ᴇf'), - ('G', 'dʑi'), - ('H', 'ᴇtɕʰ'), - ('I', 'ᴀi'), - ('J', 'dʑᴇ'), - ('K', 'kʰᴇ'), - ('L', 'ᴇl'), - ('M', 'ᴇm'), - ('N', 'ᴇn'), - ('O', 'o'), - ('P', 'pʰi'), - ('Q', 'kʰiu'), - ('R', 'ᴀl'), - ('S', 'ᴇs'), - ('T', 'tʰi'), - ('U', 'ɦiu'), - ('V', 'vi'), - ('W', 'dᴀbɤliu'), - ('X', 'ᴇks'), - ('Y', 'uᴀi'), - ('Z', 'zᴇ') -]] - - -def _number_to_shanghainese(num): - num = cn2an.an2cn(num).replace('一十','十').replace('二十', '廿').replace('二', '两') - return re.sub(r'((?:^|[^三四五六七八九])十|廿)两', r'\1二', num) - - -def number_to_shanghainese(text): - return re.sub(r'\d+(?:\.?\d+)?', lambda x: _number_to_shanghainese(x.group()), text) - - -def latin_to_ipa(text): - for regex, replacement in _latin_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def shanghainese_to_ipa(text): - text = number_to_shanghainese(text.upper()) - text = converter.convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text) - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/criterions/hubert_criterion.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/criterions/hubert_criterion.py deleted file mode 100644 index 68cb24e6f142c46e108c53479fd4027a741f5f92..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/criterions/hubert_criterion.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math -import re -from dataclasses import dataclass, field -from typing import List, Optional - -import torch -import torch.nn.functional as F -from fairseq import metrics, utils -from fairseq.criterions import FairseqCriterion, register_criterion -from fairseq.dataclass import FairseqDataclass - - -@dataclass -class HubertCriterionConfig(FairseqDataclass): - pred_masked_weight: float = field( - default=1.0, - metadata={"help": "weight for predictive loss for masked frames"}, - ) - pred_nomask_weight: float = field( - default=0.0, - metadata={"help": "weight for predictive loss for unmasked frames"}, - ) - loss_weights: Optional[List[float]] = field( - default=None, - metadata={"help": "weights for additional loss terms (not first one)"}, - ) - log_keys: List[str] = field( - default_factory=lambda: [], - metadata={"help": "output keys to log"}, - ) - - -@register_criterion("hubert", dataclass=HubertCriterionConfig) -class HubertCriterion(FairseqCriterion): - def __init__(self, task, pred_masked_weight, pred_nomask_weight, loss_weights=None, log_keys=None): - super().__init__(task) - self.pred_masked_weight = pred_masked_weight - self.pred_nomask_weight = pred_nomask_weight - self.loss_weights = loss_weights - self.log_keys = [] if log_keys is None else log_keys - - def forward(self, model, sample, reduce=True, log_pred=False): - """Compute the loss for the given sample. - Returns a tuple with three elements: - 1) the loss - 2) the sample size, which is used as the denominator for the gradient - 3) logging outputs to display while training - """ - net_output = model(target_list=sample["target_list"], **sample["net_input"]) - loss = 0. - sample_size = 0 - logging_output = {} - reduction = "sum" if reduce else "none" - - loss_m_list = [] - logp_m_list = model.get_logits(net_output, True) - targ_m_list = model.get_targets(net_output, True) - assert self.pred_masked_weight == 0 or len(logp_m_list) > 0 - for i, (logp_m, targ_m) in enumerate(zip(logp_m_list, targ_m_list)): - loss_m = F.cross_entropy(logp_m, targ_m, reduction=reduction) - loss_m_list.append(loss_m) - logging_output[f"loss_m_{i}"] = loss_m.detach().item() - if self.pred_masked_weight > 0: - loss += self.pred_masked_weight * sum(loss_m_list) - sample_size += targ_m_list[0].numel() - - loss_u_list = [] - logp_u_list = model.get_logits(net_output, False) - targ_u_list = model.get_targets(net_output, False) - assert self.pred_nomask_weight == 0 or len(logp_u_list) > 0 - for i, (logp_u, targ_u) in enumerate(zip(logp_u_list, targ_u_list)): - loss_u = F.cross_entropy(logp_u, targ_u, reduction=reduction) - loss_u_list.append(loss_u) - logging_output[f"loss_u_{i}"] = loss_u.detach().item() - if self.pred_nomask_weight > 0: - loss += self.pred_nomask_weight * sum(loss_u_list) - sample_size += targ_u_list[0].numel() - - if self.loss_weights is not None: - assert hasattr(model, "get_extra_losses") - extra_losses, names = model.get_extra_losses(net_output) - if torch.is_tensor(extra_losses): - extra_losses = [extra_losses] - names = [names] - if len(self.loss_weights) == 1 and len(extra_losses) != 1: - self.loss_weights = [self.loss_weights[0]] * len(extra_losses) - assert len(extra_losses) == len(self.loss_weights), f"{len(extra_losses)}, {len(self.loss_weights)}" - for p, n, coef in zip(extra_losses, names, self.loss_weights): - if coef != 0 and p is not None: - p = coef * p.float() * sample_size - loss += p - logging_output[f"loss_{n}"] = p.item() - - logging_output = { - "loss": loss.item() if reduce else loss, - "ntokens": sample_size, - "nsentences": sample["id"].numel(), - "sample_size": sample_size, - **logging_output, - } - - for lk in self.log_keys: - if lk in net_output: - logging_output[lk] = float((net_output[lk])) - - def compute_correct(logits): - if logits.numel() == 0: - return 0, 0 - else: - assert logits.dim() > 1, logits.shape - max = logits.argmax(-1) == 0 - min = logits.argmin(-1) == 0 - both = max & min - corr = max.long().sum().item() - both.long().sum().item() - count = max.numel() - return corr, count - - with torch.no_grad(): - for i, logp_m in enumerate(logp_m_list): - corr_m, count_m = compute_correct(logp_m) - logging_output[f"correct_m_{i}"] = corr_m - logging_output[f"count_m_{i}"] = count_m - - for i, logp_u in enumerate(logp_u_list): - corr_u, count_u = compute_correct(logp_u) - logging_output[f"correct_u_{i}"] = corr_u - logging_output[f"count_u_{i}"] = count_u - - return loss, sample_size, logging_output - - @staticmethod - def reduce_metrics(logging_outputs) -> None: - """Aggregate logging outputs from data parallel training (copied from normal cross entropy).""" - loss_sum = sum(log.get("loss", 0) for log in logging_outputs) - ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) - sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) - - metrics.log_scalar("loss", loss_sum / sample_size / math.log(2), sample_size, round=3) - if sample_size != ntokens: - metrics.log_scalar("nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3) - metrics.log_derived("ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)) - else: - metrics.log_derived("ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)) - - counts = {} - for lk in logging_outputs[0].keys(): - if lk.startswith("count_"): - val = sum(log[lk] for log in logging_outputs) - metrics.log_scalar(lk, val) - counts[lk] = val - - for lk in logging_outputs[0].keys(): - if lk.startswith("loss_"): - val = sum(log[lk] for log in logging_outputs) - metrics.log_scalar(lk, val / sample_size / math.log(2), round=3) - elif lk.startswith("correct_"): - val = sum(log[lk] for log in logging_outputs) - metrics.log_scalar(lk, val / counts[re.sub("correct", "count", lk)]) - - @staticmethod - def aggregate_logging_outputs(logging_outputs): - """Aggregate logging outputs from data parallel training.""" - raise NotImplementedError() - - @staticmethod - def logging_outputs_can_be_summed() -> bool: - """ - Whether the logging outputs returned by `forward` can be summed - across workers prior to calling `reduce_metrics`. Setting this - to True will improves distributed training speed. - """ - return False diff --git a/spaces/stomexserde/gpt4-ui/Examples/EaseUS Partition Master 13.8 WinPE Edition !LINK!.md b/spaces/stomexserde/gpt4-ui/Examples/EaseUS Partition Master 13.8 WinPE Edition !LINK!.md deleted file mode 100644 index f0eda892e92ca2d82f50aad4ac79b952ffbff107..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/EaseUS Partition Master 13.8 WinPE Edition !LINK!.md +++ /dev/null @@ -1,32 +0,0 @@ - -

    How to Use EaseUS Partition Master 13.8 WinPE Edition to Manage Your Disk Partitions

    -

    EaseUS Partition Master is a powerful and easy-to-use disk partitioning software that allows you to create, resize, merge, split, clone, format, delete, wipe, and convert partitions on your hard drive or SSD. It also supports dynamic disk management, RAID recovery, Windows Storage Spaces, and more.

    -

    One of the features of EaseUS Partition Master is the WinPE bootable disk creator, which enables you to boot your computer from a CD/DVD or USB drive and access the partition tool without loading Windows. This is useful when you encounter system failure, virus infection, or other problems that prevent you from booting into Windows normally.

    -

    EaseUS Partition Master 13.8 WinPE Edition


    Download Zip 🗹 https://urlgoal.com/2uI9sA



    -

    In this article, we will show you how to use EaseUS Partition Master 13.8 WinPE Edition to create a WinPE bootable disk and perform some common partition operations on your disk.

    -

    How to Create a WinPE Bootable Disk with EaseUS Partition Master 13.8

    -

    To create a WinPE bootable disk with EaseUS Partition Master 13.8, you need to have a blank CD/DVD or USB drive with at least 256 MB of space. You also need to download and install EaseUS Partition Master 13.8 on your computer. Then follow these steps:

    -
      -
    1. Launch EaseUS Partition Master 13.8 and click "WinPE Creator" on the toolbar.
    2. -
    3. Select the device type (CD/DVD or USB drive) and the drive letter of your device. Click "Proceed" to start creating the bootable disk.
    4. -
    5. Wait for the process to finish and then eject the device.
    6. -
    -

    You have successfully created a WinPE bootable disk with EaseUS Partition Master 13.8. You can now use it to boot your computer and access the partition tool.

    -

    How to Boot Your Computer from a WinPE Bootable Disk

    -

    To boot your computer from a WinPE bootable disk, you need to change the boot order in the BIOS or UEFI settings of your computer. The steps may vary depending on your computer model and manufacturer, but generally you can follow these steps:

    -
      -
    1. Insert the WinPE bootable disk into your CD/DVD drive or USB port.
    2. -
    3. Restart your computer and press the key (such as F2, F12, Del, Esc) that shows on the screen to enter the BIOS or UEFI settings.
    4. -
    5. Navigate to the Boot menu and select the CD/DVD or USB device as the first boot option. Save the changes and exit.
    6. -
    7. Your computer will reboot and load the WinPE environment from the bootable disk.
    8. -
    -

    You have successfully booted your computer from a WinPE bootable disk. You can now use EaseUS Partition Master 13.8 to manage your disk partitions.

    -

    How to Use EaseUS Partition Master 13.8 WinPE Edition to Manage Your Disk Partitions

    -

    Once you have booted your computer from a WinPE bootable disk, you will see the main interface of EaseUS Partition Master 13.8. You can use it to perform various partition operations on your disk, such as:

    -
      -
    • Create a new partition: Right-click on an unallocated space on your disk and select "Create". Specify the partition size, drive letter, file system, label, and other options. Click "OK" and then "Execute Operation" to apply the changes.
    • -
    • Resize a partition: Right-click on a partition that you want to resize and select "Resize/Move". Drag the partition handle left or right to adjust its size. Click "OK" and then "Execute Operation" to apply the changes.
    • -
    • Merge two partitions: Right-click on a partition that you want to merge with another one and select "Merge". Select another partition that you want to merge with and click "OK". Click "Execute Operation" to apply the changes.
    • -
    • Split a partition: Right-click on a partition that you want to split and select "Split". Specify the size of each new partition and click "OK". Click "Execute Operation" to

      7b8c122e87
      -
      -
      \ No newline at end of file diff --git a/spaces/sub314xxl/MetaGPT/metagpt/roles/teacher.py b/spaces/sub314xxl/MetaGPT/metagpt/roles/teacher.py deleted file mode 100644 index 031ce94c99698d7dcdcfae8510b0aac6a207d336..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/metagpt/roles/teacher.py +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/7/27 -@Author : mashenquan -@File : teacher.py -@Modified By: mashenquan, 2023/8/22. A definition has been provided for the return value of _think: returning false indicates that further reasoning cannot continue. - -""" - - -import re - -import aiofiles - -from metagpt.actions.write_teaching_plan import ( - TeachingPlanRequirement, - WriteTeachingPlanPart, -) -from metagpt.config import CONFIG -from metagpt.logs import logger -from metagpt.roles import Role -from metagpt.schema import Message - - -class Teacher(Role): - """Support configurable teacher roles, - with native and teaching languages being replaceable through configurations.""" - - def __init__( - self, - name="Lily", - profile="{teaching_language} Teacher", - goal="writing a {language} teaching plan part by part", - constraints="writing in {language}", - desc="", - *args, - **kwargs, - ): - super().__init__(name=name, profile=profile, goal=goal, constraints=constraints, desc=desc, *args, **kwargs) - actions = [] - for topic in WriteTeachingPlanPart.TOPICS: - act = WriteTeachingPlanPart(topic=topic, llm=self._llm) - actions.append(act) - self._init_actions(actions) - self._watch({TeachingPlanRequirement}) - - async def _think(self) -> bool: - """Everything will be done part by part.""" - if self._rc.todo is None: - self._set_state(0) - return True - - if self._rc.state + 1 < len(self._states): - self._set_state(self._rc.state + 1) - return True - - self._rc.todo = None - return False - - async def _react(self) -> Message: - ret = Message(content="") - while True: - await self._think() - if self._rc.todo is None: - break - logger.debug(f"{self._setting}: {self._rc.state=}, will do {self._rc.todo}") - msg = await self._act() - if ret.content != "": - ret.content += "\n\n\n" - ret.content += msg.content - logger.info(ret.content) - await self.save(ret.content) - return ret - - async def save(self, content): - """Save teaching plan""" - filename = Teacher.new_file_name(self.course_title) - pathname = CONFIG.workspace / "teaching_plan" - pathname.mkdir(exist_ok=True) - pathname = pathname / filename - try: - async with aiofiles.open(str(pathname), mode="w", encoding="utf-8") as writer: - await writer.write(content) - except Exception as e: - logger.error(f"Save failed:{e}") - logger.info(f"Save to:{pathname}") - - @staticmethod - def new_file_name(lesson_title, ext=".md"): - """Create a related file name based on `lesson_title` and `ext`.""" - # Define the special characters that need to be replaced. - illegal_chars = r'[#@$%!*&\\/:*?"<>|\n\t \']' - # Replace the special characters with underscores. - filename = re.sub(illegal_chars, "_", lesson_title) + ext - return re.sub(r"_+", "_", filename) - - @property - def course_title(self): - """Return course title of teaching plan""" - default_title = "teaching_plan" - for act in self._actions: - if act.topic != WriteTeachingPlanPart.COURSE_TITLE: - continue - if act.rsp is None: - return default_title - title = act.rsp.lstrip("# \n") - if "\n" in title: - ix = title.index("\n") - title = title[0:ix] - return title - - return default_title diff --git a/spaces/sub314xxl/MetaGPT/tests/metagpt/actions/test_write_prd_review.py b/spaces/sub314xxl/MetaGPT/tests/metagpt/actions/test_write_prd_review.py deleted file mode 100644 index 5077fa4657ee95a5e28d350769de86b4576f1a0a..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/tests/metagpt/actions/test_write_prd_review.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/11 17:45 -@Author : alexanderwu -@File : test_write_prd_review.py -""" -import pytest - -from metagpt.actions.write_prd_review import WritePRDReview - - -@pytest.mark.asyncio -async def test_write_prd_review(): - prd = """ - Introduction: This is a new feature for our product. - Goals: The goal is to improve user engagement. - User Scenarios: The expected user group is millennials who like to use social media. - Requirements: The feature needs to be interactive and user-friendly. - Constraints: The feature needs to be implemented within 2 months. - Mockups: There will be a new button on the homepage that users can click to access the feature. - Metrics: We will measure the success of the feature by user engagement metrics. - Timeline: The feature should be ready for testing in 1.5 months. - """ - - write_prd_review = WritePRDReview("write_prd_review") - - prd_review = await write_prd_review.run(prd) - - # We cannot exactly predict the generated PRD review, but we can check if it is a string and if it is not empty - assert isinstance(prd_review, str) - assert len(prd_review) > 0 diff --git a/spaces/supertori/files/stable-diffusion-webui/extensions-builtin/LDSR/sd_hijack_autoencoder.py b/spaces/supertori/files/stable-diffusion-webui/extensions-builtin/LDSR/sd_hijack_autoencoder.py deleted file mode 100644 index 8e03c7f898988c237c714ed949610f5035b30b50..0000000000000000000000000000000000000000 --- a/spaces/supertori/files/stable-diffusion-webui/extensions-builtin/LDSR/sd_hijack_autoencoder.py +++ /dev/null @@ -1,286 +0,0 @@ -# The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo -# The VQModel & VQModelInterface were subsequently removed from ldm/models/autoencoder.py when we moved to the stability-ai/stablediffusion repo -# As the LDSR upscaler relies on VQModel & VQModelInterface, the hijack aims to put them back into the ldm.models.autoencoder - -import torch -import pytorch_lightning as pl -import torch.nn.functional as F -from contextlib import contextmanager -from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer -from ldm.modules.diffusionmodules.model import Encoder, Decoder -from ldm.util import instantiate_from_config - -import ldm.models.autoencoder - -class VQModel(pl.LightningModule): - def __init__(self, - ddconfig, - lossconfig, - n_embed, - embed_dim, - ckpt_path=None, - ignore_keys=[], - image_key="image", - colorize_nlabels=None, - monitor=None, - batch_resize_range=None, - scheduler_config=None, - lr_g_factor=1.0, - remap=None, - sane_index_shape=False, # tell vector quantizer to return indices as bhw - use_ema=False - ): - super().__init__() - self.embed_dim = embed_dim - self.n_embed = n_embed - self.image_key = image_key - self.encoder = Encoder(**ddconfig) - self.decoder = Decoder(**ddconfig) - self.loss = instantiate_from_config(lossconfig) - self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, - remap=remap, - sane_index_shape=sane_index_shape) - self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1) - self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) - if colorize_nlabels is not None: - assert type(colorize_nlabels)==int - self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) - if monitor is not None: - self.monitor = monitor - self.batch_resize_range = batch_resize_range - if self.batch_resize_range is not None: - print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.") - - self.use_ema = use_ema - if self.use_ema: - self.model_ema = LitEma(self) - print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") - - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) - self.scheduler_config = scheduler_config - self.lr_g_factor = lr_g_factor - - @contextmanager - def ema_scope(self, context=None): - if self.use_ema: - self.model_ema.store(self.parameters()) - self.model_ema.copy_to(self) - if context is not None: - print(f"{context}: Switched to EMA weights") - try: - yield None - finally: - if self.use_ema: - self.model_ema.restore(self.parameters()) - if context is not None: - print(f"{context}: Restored training weights") - - def init_from_ckpt(self, path, ignore_keys=list()): - sd = torch.load(path, map_location="cpu")["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - missing, unexpected = self.load_state_dict(sd, strict=False) - print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - print(f"Missing Keys: {missing}") - print(f"Unexpected Keys: {unexpected}") - - def on_train_batch_end(self, *args, **kwargs): - if self.use_ema: - self.model_ema(self) - - def encode(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - quant, emb_loss, info = self.quantize(h) - return quant, emb_loss, info - - def encode_to_prequant(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - return h - - def decode(self, quant): - quant = self.post_quant_conv(quant) - dec = self.decoder(quant) - return dec - - def decode_code(self, code_b): - quant_b = self.quantize.embed_code(code_b) - dec = self.decode(quant_b) - return dec - - def forward(self, input, return_pred_indices=False): - quant, diff, (_,_,ind) = self.encode(input) - dec = self.decode(quant) - if return_pred_indices: - return dec, diff, ind - return dec, diff - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() - if self.batch_resize_range is not None: - lower_size = self.batch_resize_range[0] - upper_size = self.batch_resize_range[1] - if self.global_step <= 4: - # do the first few batches with max size to avoid later oom - new_resize = upper_size - else: - new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16)) - if new_resize != x.shape[2]: - x = F.interpolate(x, size=new_resize, mode="bicubic") - x = x.detach() - return x - - def training_step(self, batch, batch_idx, optimizer_idx): - # https://github.com/pytorch/pytorch/issues/37142 - # try not to fool the heuristics - x = self.get_input(batch, self.image_key) - xrec, qloss, ind = self(x, return_pred_indices=True) - - if optimizer_idx == 0: - # autoencode - aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train", - predicted_indices=ind) - - self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True) - return aeloss - - if optimizer_idx == 1: - # discriminator - discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train") - self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True) - return discloss - - def validation_step(self, batch, batch_idx): - log_dict = self._validation_step(batch, batch_idx) - with self.ema_scope(): - log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema") - return log_dict - - def _validation_step(self, batch, batch_idx, suffix=""): - x = self.get_input(batch, self.image_key) - xrec, qloss, ind = self(x, return_pred_indices=True) - aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, - self.global_step, - last_layer=self.get_last_layer(), - split="val"+suffix, - predicted_indices=ind - ) - - discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, - self.global_step, - last_layer=self.get_last_layer(), - split="val"+suffix, - predicted_indices=ind - ) - rec_loss = log_dict_ae[f"val{suffix}/rec_loss"] - self.log(f"val{suffix}/rec_loss", rec_loss, - prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) - self.log(f"val{suffix}/aeloss", aeloss, - prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) - if version.parse(pl.__version__) >= version.parse('1.4.0'): - del log_dict_ae[f"val{suffix}/rec_loss"] - self.log_dict(log_dict_ae) - self.log_dict(log_dict_disc) - return self.log_dict - - def configure_optimizers(self): - lr_d = self.learning_rate - lr_g = self.lr_g_factor*self.learning_rate - print("lr_d", lr_d) - print("lr_g", lr_g) - opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ - list(self.decoder.parameters())+ - list(self.quantize.parameters())+ - list(self.quant_conv.parameters())+ - list(self.post_quant_conv.parameters()), - lr=lr_g, betas=(0.5, 0.9)) - opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), - lr=lr_d, betas=(0.5, 0.9)) - - if self.scheduler_config is not None: - scheduler = instantiate_from_config(self.scheduler_config) - - print("Setting up LambdaLR scheduler...") - scheduler = [ - { - 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }, - { - 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }, - ] - return [opt_ae, opt_disc], scheduler - return [opt_ae, opt_disc], [] - - def get_last_layer(self): - return self.decoder.conv_out.weight - - def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs): - log = dict() - x = self.get_input(batch, self.image_key) - x = x.to(self.device) - if only_inputs: - log["inputs"] = x - return log - xrec, _ = self(x) - if x.shape[1] > 3: - # colorize with random projection - assert xrec.shape[1] > 3 - x = self.to_rgb(x) - xrec = self.to_rgb(xrec) - log["inputs"] = x - log["reconstructions"] = xrec - if plot_ema: - with self.ema_scope(): - xrec_ema, _ = self(x) - if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema) - log["reconstructions_ema"] = xrec_ema - return log - - def to_rgb(self, x): - assert self.image_key == "segmentation" - if not hasattr(self, "colorize"): - self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) - x = F.conv2d(x, weight=self.colorize) - x = 2.*(x-x.min())/(x.max()-x.min()) - 1. - return x - - -class VQModelInterface(VQModel): - def __init__(self, embed_dim, *args, **kwargs): - super().__init__(embed_dim=embed_dim, *args, **kwargs) - self.embed_dim = embed_dim - - def encode(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - return h - - def decode(self, h, force_not_quantize=False): - # also go through quantization layer - if not force_not_quantize: - quant, emb_loss, info = self.quantize(h) - else: - quant = h - quant = self.post_quant_conv(quant) - dec = self.decoder(quant) - return dec - -setattr(ldm.models.autoencoder, "VQModel", VQModel) -setattr(ldm.models.autoencoder, "VQModelInterface", VQModelInterface) diff --git a/spaces/supertori/files/stable-diffusion-webui/extensions/openpose-editor/scripts/openpose/model.py b/spaces/supertori/files/stable-diffusion-webui/extensions/openpose-editor/scripts/openpose/model.py deleted file mode 100644 index 9c3456fcc372de1e0c0cf95497d39a4310e7c87f..0000000000000000000000000000000000000000 --- a/spaces/supertori/files/stable-diffusion-webui/extensions/openpose-editor/scripts/openpose/model.py +++ /dev/null @@ -1,221 +0,0 @@ -# This code from https://github.com/lllyasviel/ControlNet - -import torch -from collections import OrderedDict - -import torch -import torch.nn as nn - -def make_layers(block, no_relu_layers): - layers = [] - for layer_name, v in block.items(): - if 'pool' in layer_name: - layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], - padding=v[2]) - layers.append((layer_name, layer)) - else: - conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], - kernel_size=v[2], stride=v[3], - padding=v[4]) - layers.append((layer_name, conv2d)) - if layer_name not in no_relu_layers: - layers.append(('relu_'+layer_name, nn.ReLU(inplace=True))) - - return nn.Sequential(OrderedDict(layers)) - -class bodypose_model(nn.Module): - def __init__(self): - super(bodypose_model, self).__init__() - - # these layers have no relu layer - no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1',\ - 'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2',\ - 'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1',\ - 'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1'] - blocks = {} - block0 = OrderedDict([ - ('conv1_1', [3, 64, 3, 1, 1]), - ('conv1_2', [64, 64, 3, 1, 1]), - ('pool1_stage1', [2, 2, 0]), - ('conv2_1', [64, 128, 3, 1, 1]), - ('conv2_2', [128, 128, 3, 1, 1]), - ('pool2_stage1', [2, 2, 0]), - ('conv3_1', [128, 256, 3, 1, 1]), - ('conv3_2', [256, 256, 3, 1, 1]), - ('conv3_3', [256, 256, 3, 1, 1]), - ('conv3_4', [256, 256, 3, 1, 1]), - ('pool3_stage1', [2, 2, 0]), - ('conv4_1', [256, 512, 3, 1, 1]), - ('conv4_2', [512, 512, 3, 1, 1]), - ('conv4_3_CPM', [512, 256, 3, 1, 1]), - ('conv4_4_CPM', [256, 128, 3, 1, 1]) - ]) - - - # Stage 1 - block1_1 = OrderedDict([ - ('conv5_1_CPM_L1', [128, 128, 3, 1, 1]), - ('conv5_2_CPM_L1', [128, 128, 3, 1, 1]), - ('conv5_3_CPM_L1', [128, 128, 3, 1, 1]), - ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]), - ('conv5_5_CPM_L1', [512, 38, 1, 1, 0]) - ]) - - block1_2 = OrderedDict([ - ('conv5_1_CPM_L2', [128, 128, 3, 1, 1]), - ('conv5_2_CPM_L2', [128, 128, 3, 1, 1]), - ('conv5_3_CPM_L2', [128, 128, 3, 1, 1]), - ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]), - ('conv5_5_CPM_L2', [512, 19, 1, 1, 0]) - ]) - blocks['block1_1'] = block1_1 - blocks['block1_2'] = block1_2 - - self.model0 = make_layers(block0, no_relu_layers) - - # Stages 2 - 6 - for i in range(2, 7): - blocks['block%d_1' % i] = OrderedDict([ - ('Mconv1_stage%d_L1' % i, [185, 128, 7, 1, 3]), - ('Mconv2_stage%d_L1' % i, [128, 128, 7, 1, 3]), - ('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]), - ('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]), - ('Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]), - ('Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]), - ('Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0]) - ]) - - blocks['block%d_2' % i] = OrderedDict([ - ('Mconv1_stage%d_L2' % i, [185, 128, 7, 1, 3]), - ('Mconv2_stage%d_L2' % i, [128, 128, 7, 1, 3]), - ('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]), - ('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]), - ('Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]), - ('Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]), - ('Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0]) - ]) - - for k in blocks.keys(): - blocks[k] = make_layers(blocks[k], no_relu_layers) - - self.model1_1 = blocks['block1_1'] - self.model2_1 = blocks['block2_1'] - self.model3_1 = blocks['block3_1'] - self.model4_1 = blocks['block4_1'] - self.model5_1 = blocks['block5_1'] - self.model6_1 = blocks['block6_1'] - - self.model1_2 = blocks['block1_2'] - self.model2_2 = blocks['block2_2'] - self.model3_2 = blocks['block3_2'] - self.model4_2 = blocks['block4_2'] - self.model5_2 = blocks['block5_2'] - self.model6_2 = blocks['block6_2'] - - - def forward(self, x): - - out1 = self.model0(x) - - out1_1 = self.model1_1(out1) - out1_2 = self.model1_2(out1) - out2 = torch.cat([out1_1, out1_2, out1], 1) - - out2_1 = self.model2_1(out2) - out2_2 = self.model2_2(out2) - out3 = torch.cat([out2_1, out2_2, out1], 1) - - out3_1 = self.model3_1(out3) - out3_2 = self.model3_2(out3) - out4 = torch.cat([out3_1, out3_2, out1], 1) - - out4_1 = self.model4_1(out4) - out4_2 = self.model4_2(out4) - out5 = torch.cat([out4_1, out4_2, out1], 1) - - out5_1 = self.model5_1(out5) - out5_2 = self.model5_2(out5) - out6 = torch.cat([out5_1, out5_2, out1], 1) - - out6_1 = self.model6_1(out6) - out6_2 = self.model6_2(out6) - - return out6_1, out6_2 - -class handpose_model(nn.Module): - def __init__(self): - super(handpose_model, self).__init__() - - # these layers have no relu layer - no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3',\ - 'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6'] - # stage 1 - block1_0 = OrderedDict([ - ('conv1_1', [3, 64, 3, 1, 1]), - ('conv1_2', [64, 64, 3, 1, 1]), - ('pool1_stage1', [2, 2, 0]), - ('conv2_1', [64, 128, 3, 1, 1]), - ('conv2_2', [128, 128, 3, 1, 1]), - ('pool2_stage1', [2, 2, 0]), - ('conv3_1', [128, 256, 3, 1, 1]), - ('conv3_2', [256, 256, 3, 1, 1]), - ('conv3_3', [256, 256, 3, 1, 1]), - ('conv3_4', [256, 256, 3, 1, 1]), - ('pool3_stage1', [2, 2, 0]), - ('conv4_1', [256, 512, 3, 1, 1]), - ('conv4_2', [512, 512, 3, 1, 1]), - ('conv4_3', [512, 512, 3, 1, 1]), - ('conv4_4', [512, 512, 3, 1, 1]), - ('conv5_1', [512, 512, 3, 1, 1]), - ('conv5_2', [512, 512, 3, 1, 1]), - ('conv5_3_CPM', [512, 128, 3, 1, 1]) - ]) - - block1_1 = OrderedDict([ - ('conv6_1_CPM', [128, 512, 1, 1, 0]), - ('conv6_2_CPM', [512, 22, 1, 1, 0]) - ]) - - blocks = {} - blocks['block1_0'] = block1_0 - blocks['block1_1'] = block1_1 - - # stage 2-6 - for i in range(2, 7): - blocks['block%d' % i] = OrderedDict([ - ('Mconv1_stage%d' % i, [150, 128, 7, 1, 3]), - ('Mconv2_stage%d' % i, [128, 128, 7, 1, 3]), - ('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]), - ('Mconv4_stage%d' % i, [128, 128, 7, 1, 3]), - ('Mconv5_stage%d' % i, [128, 128, 7, 1, 3]), - ('Mconv6_stage%d' % i, [128, 128, 1, 1, 0]), - ('Mconv7_stage%d' % i, [128, 22, 1, 1, 0]) - ]) - - for k in blocks.keys(): - blocks[k] = make_layers(blocks[k], no_relu_layers) - - self.model1_0 = blocks['block1_0'] - self.model1_1 = blocks['block1_1'] - self.model2 = blocks['block2'] - self.model3 = blocks['block3'] - self.model4 = blocks['block4'] - self.model5 = blocks['block5'] - self.model6 = blocks['block6'] - - def forward(self, x): - out1_0 = self.model1_0(x) - out1_1 = self.model1_1(out1_0) - concat_stage2 = torch.cat([out1_1, out1_0], 1) - out_stage2 = self.model2(concat_stage2) - concat_stage3 = torch.cat([out_stage2, out1_0], 1) - out_stage3 = self.model3(concat_stage3) - concat_stage4 = torch.cat([out_stage3, out1_0], 1) - out_stage4 = self.model4(concat_stage4) - concat_stage5 = torch.cat([out_stage4, out1_0], 1) - out_stage5 = self.model5(concat_stage5) - concat_stage6 = torch.cat([out_stage5, out1_0], 1) - out_stage6 = self.model6(concat_stage6) - return out_stage6 - - diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/HD Online Player (the Tarix Jabrix 3 Full Movie Downlo) PATCHED.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/HD Online Player (the Tarix Jabrix 3 Full Movie Downlo) PATCHED.md deleted file mode 100644 index 440406717567b8a5029be895e4af2fecc250b973..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/HD Online Player (the Tarix Jabrix 3 Full Movie Downlo) PATCHED.md +++ /dev/null @@ -1,17 +0,0 @@ -
      -

      How to Watch The Tarix Jabrix 3 Online in HD Quality

      -

      The Tarix Jabrix 3 is a 2011 Indonesian comedy film directed by Iqbal Rais and starring Tria Changcut, Erick Changcut, Dipa Changcut, and Alda Changcut. It is the third installment of the The Tarix Jabrix film series, following The Tarix Jabrix (2008) and The Tarix Jabrix 2 (2009). The film follows the adventures of a group of friends who call themselves The Tarix Jabrix, as they have to negotiate with their old enemies, The Road Devils motorcycle gang, to stop causing more riots in Bandung City.

      -

      HD Online Player (the tarix jabrix 3 full movie downlo)


      Download File https://cinurl.com/2uEZ5R



      -

      If you are a fan of The Tarix Jabrix series or Indonesian comedy films in general, you might be wondering how to watch The Tarix Jabrix 3 online in HD quality. Well, you are in luck, because there are several options available for you to enjoy this hilarious film from the comfort of your home. Here are some of them:

      -
        -
      • Disney+ Hotstar: This is the official streaming platform for The Tarix Jabrix 3, as well as many other Indonesian films and shows. You can watch The Tarix Jabrix 3 on Disney+ Hotstar with a subscription or a one-time purchase. You can also download the film to watch offline on your device. Disney+ Hotstar offers HD quality and subtitles in various languages.
      • -
      • IMDb: This is the most popular website for movie information and ratings. You can watch The Tarix Jabrix 3 on IMDb with an IMDb TV account, which is free but supported by ads. You can also rent or buy the film on Amazon Prime Video through IMDb. IMDb offers HD quality and subtitles in various languages.
      • -
      • BioskopGaul: This is a website that provides free streaming and downloading of Indonesian films and shows. You can watch The Tarix Jabrix 3 on BioskopGaul without registration or payment. However, the quality and subtitles may vary depending on the source. BioskopGaul also has a mobile app that you can use to watch on your device.
      • -
      -

      These are some of the ways you can watch The Tarix Jabrix 3 online in HD quality. Whichever option you choose, we hope you have a great time watching this fun and entertaining film.

      - -

      If you want to know more about The Tarix Jabrix 3 and the other films in the series, you can visit the IMDb page for the film, where you can find the cast and crew information, trivia, reviews, and more. You can also watch the trailer and clips of the film on YouTube or Vidio. You can also follow the official social media accounts of the film and the actors for updates and behind-the-scenes content.

      -

      -

      The Tarix Jabrix 3 is a film that celebrates friendship, loyalty, and humor. It is a film that will make you laugh and feel nostalgic for your own childhood memories. It is a film that showcases the talent and charm of Indonesian cinema. If you are looking for a good comedy film to watch online, you should definitely check out The Tarix Jabrix 3.

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Roms 3ds La Nouvelle Maison Du Style.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Roms 3ds La Nouvelle Maison Du Style.md deleted file mode 100644 index 747d324c473fc7ee175f53851cb66b16f79b3fd7..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Roms 3ds La Nouvelle Maison Du Style.md +++ /dev/null @@ -1,8 +0,0 @@ - -

      Ce projet autorise par le ministere de la santecite, de la protection de lenvironnement et de la protection du consommet inventent de la biodynamiques. Ilssont malgrcs, les premiers auberges biologiques en Europe. Un centre de diffusion distribue 2 000 maisons de biologiques dans 10 pays europains. Ce projet fait partie de la campagne "Madame Nature" qui soutient 35 000 house owners dans un partenariat europ des autorites de la protection de la llorerie et de la biodiversite.

      -

      Roms 3ds La Nouvelle Maison Du Style


      Download File ⚙⚙⚙ https://cinurl.com/2uEY1s



      -

      Ce projet autorise par le ministre de la santecite, de la protection de lenvironnement et de la protection du consommet inventent de la biodynamiques. Ilssont malgrcs, les premiers auberges biologiques en Europe. Un centre de diffusion distribue 2 000 maisons de biologiques dans 10 pays europains. Ce projet fait partie de la campagne "Madame Nature" qui soutient 35 000 house owners dans un partenariat europ des autorites de la protection de la llorerie et de la biodiversite.

      -

      Ce projet autorise par le ministre de la santecite, de la protection de lenvironnement et de la protection du consommet inventent de la biodynamiques. Ilssont malgrcs, les premiers auberges biologiques en Europe. Un centre de diffusion distribue 2 000 maisons de biologiques dans 10 pays europains.

      -

      The first film from the potentially expansive, ambitious Army of the Dead cinematic universe willbeArmy of the Dead itself, the fast-paced thriller that chronicles theaftermath of the initial zombie outbreak, with the undead effectively quarantined in Nevadas infamous desert city. Although the movie promises large-scale zombie action, the plot itself lasers in on the central heist teams attempts to recover money from a casino under siege by the undead. There will reportedly be alpha zombies andan Area 51 origin for the outbreak that, knowing Snyder's recurring theme of duplicitous authorities, will see some shadowy Stranger Things-style government conspiracies uncovered.

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/S.f.i. Flasher !NEW! Download.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/S.f.i. Flasher !NEW! Download.md deleted file mode 100644 index 77b389c7e7240c22f02a19fc68f814cdfbfea8a5..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/S.f.i. Flasher !NEW! Download.md +++ /dev/null @@ -1,8 +0,0 @@ -

      s.f.i. flasher download


      Download File ☆☆☆☆☆ https://cinurl.com/2uEY6b



      - -Nokia N8 N800 - Nokia N8, www.nokia.com. N8(N800) flasher=0 CD-Roms=0, USB=0, mem=0, Phone memory=0,... should be used. I got the initial firmware error above in my recovery menu.. Phone won't boot - get messages about SFI. I have the GPEH to phone, and i can get into recovery. -. I use QT as my primary system (a user reported a bug in the N8 - that the phone runs...HP-NX2 - -Nokia N8 Power Guide - Guides, Blogs and Discussions about N8... Nokia N8 N800! This is a guide to the Nokia N8: How to reset the phone and get a clean...Nokia N8 life test - Bluetooth / WIFI problems can someone help. also when i connect to a wifi network it does not automatically reconnect. i've tried using my n8 online and then toggling.. I am having to reset the N8 when i get home from work and it runs out of charge.. About Me. My name is Rosalind, I am a geocacher, and a mother of one..I have a N8, it justs worked perfectly and i didnt have a problem. but i.I have a N8, it justs worked perfectly and i didnt have a problem. but i.Qik - mobile video calling. Uploading videos is as easy as sending a text message.. I have a N8, I have been using the build 66 for some time.. I have been trying to restart it several times and it keeps freezing on me.. I have a N8, I have been using the build 66 for some time.. I have been trying to restart it several times and it keeps freezing on me.. I have a N8, I have been using the build 66 for some time.. I have been trying to restart it several times and it keeps freezing on me..I have a N8, I have been using the build 66 for some time.. I have been trying to restart it several times and it keeps freezing on me.. I have a N8, I have been using the build 66 for some time.. I have been trying to restart it several times and it keeps freezing on me..I have a N8, I have been using the 4fefd39f24
      -
      -
      -

      diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Android Vmware Image Download NEW!.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Android Vmware Image Download NEW!.md deleted file mode 100644 index b990f494440925abeb43645a87b01eb3a6060e0a..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Android Vmware Image Download NEW!.md +++ /dev/null @@ -1,6 +0,0 @@ -

      android vmware image download


      Download File 🌟 https://urluss.com/2uCGU5



      -
      -Installation; Host installer for all VDI configurations; Citrix downloads; VMware ... must be installed within the Virtual Desktop, typically within the image on the ... 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/segmentors/cascade_encoder_decoder.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/segmentors/cascade_encoder_decoder.py deleted file mode 100644 index 873957d8d6468147c994493d92ff5c1b15bfb703..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/segmentors/cascade_encoder_decoder.py +++ /dev/null @@ -1,98 +0,0 @@ -from torch import nn - -from annotator.uniformer.mmseg.core import add_prefix -from annotator.uniformer.mmseg.ops import resize -from .. import builder -from ..builder import SEGMENTORS -from .encoder_decoder import EncoderDecoder - - -@SEGMENTORS.register_module() -class CascadeEncoderDecoder(EncoderDecoder): - """Cascade Encoder Decoder segmentors. - - CascadeEncoderDecoder almost the same as EncoderDecoder, while decoders of - CascadeEncoderDecoder are cascaded. The output of previous decoder_head - will be the input of next decoder_head. - """ - - def __init__(self, - num_stages, - backbone, - decode_head, - neck=None, - auxiliary_head=None, - train_cfg=None, - test_cfg=None, - pretrained=None): - self.num_stages = num_stages - super(CascadeEncoderDecoder, self).__init__( - backbone=backbone, - decode_head=decode_head, - neck=neck, - auxiliary_head=auxiliary_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained) - - def _init_decode_head(self, decode_head): - """Initialize ``decode_head``""" - assert isinstance(decode_head, list) - assert len(decode_head) == self.num_stages - self.decode_head = nn.ModuleList() - for i in range(self.num_stages): - self.decode_head.append(builder.build_head(decode_head[i])) - self.align_corners = self.decode_head[-1].align_corners - self.num_classes = self.decode_head[-1].num_classes - - def init_weights(self, pretrained=None): - """Initialize the weights in backbone and heads. - - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - self.backbone.init_weights(pretrained=pretrained) - for i in range(self.num_stages): - self.decode_head[i].init_weights() - if self.with_auxiliary_head: - if isinstance(self.auxiliary_head, nn.ModuleList): - for aux_head in self.auxiliary_head: - aux_head.init_weights() - else: - self.auxiliary_head.init_weights() - - def encode_decode(self, img, img_metas): - """Encode images with backbone and decode into a semantic segmentation - map of the same size as input.""" - x = self.extract_feat(img) - out = self.decode_head[0].forward_test(x, img_metas, self.test_cfg) - for i in range(1, self.num_stages): - out = self.decode_head[i].forward_test(x, out, img_metas, - self.test_cfg) - out = resize( - input=out, - size=img.shape[2:], - mode='bilinear', - align_corners=self.align_corners) - return out - - def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg): - """Run forward function and calculate loss for decode head in - training.""" - losses = dict() - - loss_decode = self.decode_head[0].forward_train( - x, img_metas, gt_semantic_seg, self.train_cfg) - - losses.update(add_prefix(loss_decode, 'decode_0')) - - for i in range(1, self.num_stages): - # forward test again, maybe unnecessary for most methods. - prev_outputs = self.decode_head[i - 1].forward_test( - x, img_metas, self.test_cfg) - loss_decode = self.decode_head[i].forward_train( - x, prev_outputs, img_metas, gt_semantic_seg, self.train_cfg) - losses.update(add_prefix(loss_decode, f'decode_{i}')) - - return losses diff --git a/spaces/taesiri/DeepSimilarity/app.py b/spaces/taesiri/DeepSimilarity/app.py deleted file mode 100644 index 9c6fce342398397ee8e377145b6713826ad7d375..0000000000000000000000000000000000000000 --- a/spaces/taesiri/DeepSimilarity/app.py +++ /dev/null @@ -1,187 +0,0 @@ -import csv -import sys - -import gradio as gr -import numpy as np -import skimage.transform -import torch -import torchvision.models as models -import torchvision.transforms as transforms -from matplotlib import pyplot as plt -from numpy import matlib as mb -from PIL import Image - -csv.field_size_limit(sys.maxsize) - - -def compute_spatial_similarity(conv1, conv2): - """ - Takes in the last convolutional layer from two images, computes the pooled output - feature, and then generates the spatial similarity map for both images. - """ - conv1 = conv1.reshape(-1, 7 * 7).T - conv2 = conv2.reshape(-1, 7 * 7).T - - pool1 = np.mean(conv1, axis=0) - pool2 = np.mean(conv2, axis=0) - out_sz = (int(np.sqrt(conv1.shape[0])), int(np.sqrt(conv1.shape[0]))) - conv1_normed = conv1 / np.linalg.norm(pool1) / conv1.shape[0] - conv2_normed = conv2 / np.linalg.norm(pool2) / conv2.shape[0] - im_similarity = np.zeros((conv1_normed.shape[0], conv1_normed.shape[0])) - - for zz in range(conv1_normed.shape[0]): - repPx = mb.repmat(conv1_normed[zz, :], conv1_normed.shape[0], 1) - im_similarity[zz, :] = np.multiply(repPx, conv2_normed).sum(axis=1) - similarity1 = np.reshape(np.sum(im_similarity, axis=1), out_sz) - similarity2 = np.reshape(np.sum(im_similarity, axis=0), out_sz) - return similarity1, similarity2 - - -# Get Layer 4 - -display_transform = transforms.Compose( - [transforms.Resize(256), transforms.CenterCrop((224, 224))] -) - -imagenet_transform = transforms.Compose( - [ - transforms.Resize(256), - transforms.CenterCrop((224, 224)), - transforms.ToTensor(), - transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), - ] -) - - -class Wrapper(torch.nn.Module): - def __init__(self, model): - super(Wrapper, self).__init__() - self.model = model - self.layer4_ouputs = None - - def fw_hook(module, input, output): - self.layer4_ouputs = output - - self.model.layer4.register_forward_hook(fw_hook) - - def forward(self, input): - _ = self.model(input) - return self.layer4_ouputs - - def __repr__(self): - return "Wrapper" - - -def get_layer4(input_image): - l4_model = models.resnet50(pretrained=True) - # l4_model = l4_model.cuda() - l4_model.eval() - wrapped_model = Wrapper(l4_model) - - with torch.no_grad(): - data = imagenet_transform(input_image).unsqueeze(0) - # data = data.cuda() - reference_layer4 = wrapped_model(data) - - return reference_layer4.data.to("cpu").numpy() - - -def NormalizeData(data): - return (data - np.min(data)) / (np.max(data) - np.min(data)) - - -# Visualization -def visualize_similarities(q, n): - image1 = Image.fromarray(q) - image2 = Image.fromarray(n) - - a = get_layer4(image1).squeeze() - b = get_layer4(image2).squeeze() - sim1, sim2 = compute_spatial_similarity(a, b) - - sim1 = NormalizeData(sim1) - sim2 = NormalizeData(sim2) - - fig, axes = plt.subplots(1, 2, figsize=(12, 5)) - axes[0].imshow(display_transform(image1)) - im1 = axes[0].imshow( - skimage.transform.resize(sim1, (224, 224)), - alpha=0.5, - cmap="jet", - vmin=0, - vmax=1, - ) - - axes[1].imshow(display_transform(image2)) - im2 = axes[1].imshow( - skimage.transform.resize(sim2, (224, 224)), - alpha=0.5, - cmap="jet", - vmin=0, - vmax=1, - ) - - axes[0].set_axis_off() - axes[1].set_axis_off() - - fig.colorbar(im1, ax=axes[0]) - fig.colorbar(im2, ax=axes[1]) - plt.tight_layout() - - q_image = display_transform(image1) - nearest_image = display_transform(image2) - - # make a binarized veruin of the Q - fig2, ax = plt.subplots(1, figsize=(5, 5)) - ax.imshow(display_transform(image1)) - - # create a binarized version of sim1 , for value below 0.5 set to 0 and above 0.5 set to 1 - sim1_bin = np.where(sim1 > 0.5, 1, 0) - print(sim1_bin) - # create a binarized version of sim2 , for value below 0.5 set to 0 and above 0.5 set to 1 - sim2_bin = np.where(sim2 > 0.5, 1, 0) - - ax.imshow( - skimage.transform.resize(sim1_bin, (224, 224)), - alpha=1, - cmap="binary", - vmin=0, - vmax=1, - ) - - return fig, q_image, nearest_image, fig2 - - -# GRADIO APP -main = gr.Interface( - fn=visualize_similarities, - inputs=["image", "image"], - allow_flagging="never", - outputs=["plot", "image", "image", "plot"], - cache_examples=True, - enable_queue=False, - examples=[ - [ - "./examples/Red_Winged_Blackbird_0012_6015.jpg", - "./examples/Red_Winged_Blackbird_0025_5342.jpg", - ], - ], -) - -# iface.launch() - -blocks = gr.Blocks() -with blocks: - - gr.Markdown( - """ -# Visualizing Deep Similarity Networks -A quick demo to visualize the similarity between two images. -[Original Paper](https://arxiv.org/pdf/1901.00536.pdf) - [Github Page](https://github.com/GWUvision/Similarity-Visualization) - """ - ) - - gr.TabbedInterface([main], ["Main"]) - - -blocks.launch(debug=True) diff --git a/spaces/temp-late/rhyme-ai/rhyme_with_ai/rhyme_generator.py b/spaces/temp-late/rhyme-ai/rhyme_with_ai/rhyme_generator.py deleted file mode 100644 index 9e92f7c60fdb31cdfaf07362521b6dbb818eac85..0000000000000000000000000000000000000000 --- a/spaces/temp-late/rhyme-ai/rhyme_with_ai/rhyme_generator.py +++ /dev/null @@ -1,175 +0,0 @@ -import logging -from typing import List - -import numpy as np -import tensorflow as tf -from transformers import BertTokenizer, TFAutoModelForMaskedLM - -from rhyme_with_ai.token_weighter import TokenWeighter -from rhyme_with_ai.utils import pairwise - - -class RhymeGenerator: - def __init__( - self, - model: TFAutoModelForMaskedLM, - tokenizer: BertTokenizer, - token_weighter: TokenWeighter = None, - ): - """Generate rhymes. - Parameters - ---------- - model : Model for masked language modelling - tokenizer : Tokenizer for model - token_weighter : Class that weighs tokens - """ - - self.model = model - self.tokenizer = tokenizer - if token_weighter is None: - token_weighter = TokenWeighter(tokenizer) - self.token_weighter = token_weighter - self._logger = logging.getLogger(__name__) - - self.tokenized_rhymes_ = None - self.position_probas_ = None - - # Easy access. - self.comma_token_id = self.tokenizer.encode(",", add_special_tokens=False)[0] - self.period_token_id = self.tokenizer.encode(".", add_special_tokens=False)[0] - self.mask_token_id = self.tokenizer.mask_token_id - - def start(self, query: str, rhyme_words: List[str]) -> None: - """Start the sentence generator. - Parameters - ---------- - query : Seed sentence - rhyme_words : Rhyme words for next sentence - """ - # TODO: What if no content? - self._logger.info("Got sentence %s", query) - tokenized_rhymes = [ - self._initialize_rhymes(query, rhyme_word) for rhyme_word in rhyme_words - ] - # Make same length. - self.tokenized_rhymes_ = tf.keras.preprocessing.sequence.pad_sequences( - tokenized_rhymes, padding="post", value=self.tokenizer.pad_token_id - ) - p = self.tokenized_rhymes_ == self.tokenizer.mask_token_id - self.position_probas_ = p / p.sum(1).reshape(-1, 1) - - def _initialize_rhymes(self, query: str, rhyme_word: str) -> List[int]: - """Initialize the rhymes. - * Tokenize input - * Append a comma if the sentence does not end in it (might add better predictions as it - shows the two sentence parts are related) - * Make second line as long as the original - * Add a period - Parameters - ---------- - query : First line - rhyme_word : Last word for second line - Returns - ------- - Tokenized rhyme lines - """ - - query_token_ids = self.tokenizer.encode(query, add_special_tokens=False) - rhyme_word_token_ids = self.tokenizer.encode( - rhyme_word, add_special_tokens=False - ) - - if query_token_ids[-1] != self.comma_token_id: - query_token_ids.append(self.comma_token_id) - - magic_correction = len(rhyme_word_token_ids) + 1 # 1 for comma - return ( - query_token_ids - + [self.tokenizer.mask_token_id] * (len(query_token_ids) - magic_correction) - + rhyme_word_token_ids - + [self.period_token_id] - ) - - def mutate(self): - """Mutate the current rhymes. - Returns - ------- - Mutated rhymes - """ - self.tokenized_rhymes_ = self._mutate( - self.tokenized_rhymes_, self.position_probas_, self.token_weighter.proba - ) - - rhymes = [] - for i in range(len(self.tokenized_rhymes_)): - rhymes.append( - self.tokenizer.convert_tokens_to_string( - self.tokenizer.convert_ids_to_tokens( - self.tokenized_rhymes_[i], skip_special_tokens=True - ) - ) - ) - return rhymes - - def _mutate( - self, - tokenized_rhymes: np.ndarray, - position_probas: np.ndarray, - token_id_probas: np.ndarray, - ) -> np.ndarray: - - replacements = [] - for i in range(tokenized_rhymes.shape[0]): - mask_idx, masked_token_ids = self._mask_token( - tokenized_rhymes[i], position_probas[i] - ) - tokenized_rhymes[i] = masked_token_ids - replacements.append(mask_idx) - - predictions = self._predict_masked_tokens(tokenized_rhymes) - - for i, token_ids in enumerate(tokenized_rhymes): - replace_ix = replacements[i] - token_ids[replace_ix] = self._draw_replacement( - predictions[i], token_id_probas, replace_ix - ) - tokenized_rhymes[i] = token_ids - - return tokenized_rhymes - - def _mask_token(self, token_ids, position_probas): - """Mask line and return index to update.""" - token_ids = self._mask_repeats(token_ids, position_probas) - ix = self._locate_mask(token_ids, position_probas) - token_ids[ix] = self.mask_token_id - return ix, token_ids - - def _locate_mask(self, token_ids, position_probas): - """Update masks or a random token.""" - if self.mask_token_id in token_ids: - # Already masks present, just return the last. - # We used to return thee first but this returns worse predictions. - return np.where(token_ids == self.tokenizer.mask_token_id)[0][-1] - return np.random.choice(range(len(position_probas)), p=position_probas) - - def _mask_repeats(self, token_ids, position_probas): - """Repeated tokens are generally of less quality.""" - repeats = [ - ii for ii, ids in enumerate(pairwise(token_ids[:-2])) if ids[0] == ids[1] - ] - for ii in repeats: - if position_probas[ii] > 0: - token_ids[ii] = self.mask_token_id - if position_probas[ii + 1] > 0: - token_ids[ii + 1] = self.mask_token_id - return token_ids - - def _predict_masked_tokens(self, tokenized_rhymes): - return self.model(tf.constant(tokenized_rhymes))[0] - - def _draw_replacement(self, predictions, token_probas, replace_ix): - """Get probability, weigh and draw.""" - # TODO (HG): Can't we softmax when calling the model? - probas = tf.nn.softmax(predictions[replace_ix]).numpy() * token_probas - probas /= probas.sum() - return np.random.choice(range(len(probas)), p=probas) \ No newline at end of file diff --git a/spaces/teowu/Q-Instruct-on-mPLUG-Owl-2/mplug_owl2/serve/cli.py b/spaces/teowu/Q-Instruct-on-mPLUG-Owl-2/mplug_owl2/serve/cli.py deleted file mode 100644 index 6c1f210a9af206a21bf4ab1e7a6411f0c96a280f..0000000000000000000000000000000000000000 --- a/spaces/teowu/Q-Instruct-on-mPLUG-Owl-2/mplug_owl2/serve/cli.py +++ /dev/null @@ -1,120 +0,0 @@ -import argparse -import torch - -from mplug_owl2.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN -from mplug_owl2.conversation import conv_templates, SeparatorStyle -from mplug_owl2.model.builder import load_pretrained_model -from mplug_owl2.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria - -from PIL import Image - -import requests -from PIL import Image -from io import BytesIO -from transformers import TextStreamer - - -def disable_torch_init(): - """ - Disable the redundant torch default initialization to accelerate model creation. - """ - import torch - setattr(torch.nn.Linear, "reset_parameters", lambda self: None) - setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None) - - -def load_image(image_file): - if image_file.startswith('http://') or image_file.startswith('https://'): - response = requests.get(image_file) - image = Image.open(BytesIO(response.content)).convert('RGB') - else: - image = Image.open(image_file).convert('RGB') - return image - - -def main(args): - # Model - disable_torch_init() - - model_name = get_model_name_from_path(args.model_path) - tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit, device=args.device) - - conv_mode = "mplug_owl2" - - if args.conv_mode is not None and conv_mode != args.conv_mode: - print('[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}'.format(conv_mode, args.conv_mode, args.conv_mode)) - else: - args.conv_mode = conv_mode - - conv = conv_templates[args.conv_mode].copy() - roles = conv.roles - - image = load_image(args.image_file) - # Similar operation in model_worker.py - image_tensor = process_images([image], image_processor, args) - if type(image_tensor) is list: - image_tensor = [image.to(model.device, dtype=torch.float16) for image in image_tensor] - else: - image_tensor = image_tensor.to(model.device, dtype=torch.float16) - - while True: - try: - inp = input(f"{roles[0]}: ") - except EOFError: - inp = "" - if not inp: - print("exit...") - break - - print(f"{roles[1]}: ", end="") - - if image is not None: - # first message - inp = DEFAULT_IMAGE_TOKEN + inp - conv.append_message(conv.roles[0], inp) - image = None - else: - # later messages - conv.append_message(conv.roles[0], inp) - conv.append_message(conv.roles[1], None) - prompt = conv.get_prompt() - - input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device) - stop_str = conv.sep if conv.sep_style not in [SeparatorStyle.TWO, SeparatorStyle.TWO_NO_SYS] else conv.sep2 - keywords = [stop_str] - stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids) - streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) - - with torch.inference_mode(): - output_ids = model.generate( - input_ids, - images=image_tensor, - do_sample=True, - temperature=args.temperature, - max_new_tokens=args.max_new_tokens, - streamer=streamer, - use_cache=True, - stopping_criteria=[stopping_criteria]) - - outputs = tokenizer.decode(output_ids[0, input_ids.shape[1]:]).strip() - conv.messages[-1][-1] = outputs - - if args.debug: - print("\n", {"prompt": prompt, "outputs": outputs}, "\n") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--model-path", type=str, default="facebook/opt-350m") - parser.add_argument("--model-base", type=str, default=None) - parser.add_argument("--image-file", type=str, required=True) - parser.add_argument("--device", type=str, default="cuda") - parser.add_argument("--conv-mode", type=str, default=None) - parser.add_argument("--temperature", type=float, default=0.2) - parser.add_argument("--max-new-tokens", type=int, default=512) - parser.add_argument("--load-8bit", action="store_true") - parser.add_argument("--load-4bit", action="store_true") - parser.add_argument("--debug", action="store_true") - parser.add_argument("--image-aspect-ratio", type=str, default='pad') - args = parser.parse_args() - main(args) \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/Farzand E Pakistan PDF.md b/spaces/terfces0erbo/CollegeProjectV2/Farzand E Pakistan PDF.md deleted file mode 100644 index cdabd9f76e122441cb9e27f648b7f879f68be8b1..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Farzand E Pakistan PDF.md +++ /dev/null @@ -1,18 +0,0 @@ -
      -

      Farzand E Pakistan: A Biography of Sheikh Rasheed Ahmad

      -

      Farzand E Pakistan (Son of Pakistan) is a book written by Sheikh Rasheed Ahmad, a Pakistani politician and the current Minister of Railways. The book was first published in 1995 and contains his personal and political memoirs, as well as his views on various national and international issues.

      -

      The book covers his early life, education, political career, imprisonment, exile, and return to Pakistan. It also reveals some of the secrets and scandals of Pakistani politics, such as the role of the military establishment, the corruption of the ruling elite, the conspiracies against democracy, and the involvement of foreign powers.

      -

      Farzand E Pakistan PDF


      Download File »»» https://bytlly.com/2uGkNX



      -

      The book is written in a candid and humorous style, reflecting the personality of the author, who is known for his witty and controversial remarks. The book has been praised by some as a courageous and honest account of Pakistani history, while criticized by others as a self-glorifying and sensationalist propaganda.

      -

      The book is available in Urdu language and can be downloaded as a PDF file from various online sources. However, some of the content may be offensive or inappropriate for some readers, so discretion is advised.

      - -

      Sheikh Rasheed Ahmad has been a prominent figure in Pakistani politics for over three decades. He has served as a federal minister in various cabinets under different prime ministers, holding a record of 15 ministries in 35 years[^2^]. He has also been a member of the National Assembly of Pakistan for several terms, representing his hometown of Rawalpindi.

      -

      Sheikh Rasheed Ahmad is known for his outspoken and often controversial statements on various political and social issues. He has also been involved in several legal cases and controversies, such as his disqualification from the parliament, his arrest and house arrest during the emergency rule in 2007, his hunger strike against corruption in 2010, and his clash with the Pakistan Muslim League (N) leadership.

      -

      Sheikh Rasheed Ahmad is also an author of two books: Farzand E Pakistan and Sub Achha Hai. The former is his autobiography, while the latter is a collection of his columns and articles. He has also hosted a talk show on a private TV channel, where he interviewed various celebrities and politicians.

      - -

      Farzand E Pakistan has received mixed reviews from the readers and critics. Some have praised the book for its bold and candid narration of the political events and personalities of Pakistan, as well as its humorous and witty style. Some have also appreciated the book for its historical value and insight into the life of Sheikh Rasheed Ahmad.

      -

      However, some have criticized the book for its lack of objectivity and credibility, as well as its self-serving and sensationalist tone. Some have also accused the book of being biased, inaccurate, and misleading. Some have also found the book to be offensive and vulgar in some parts.

      -

      -

      Farzand E Pakistan is a book that reflects the personality and perspective of Sheikh Rasheed Ahmad, who is a controversial and influential figure in Pakistani politics. The book is not a comprehensive or impartial account of Pakistani history, but rather a personal and political memoir of the author. The book may appeal to those who are interested in Pakistani politics and history, or those who are fans of Sheikh Rasheed Ahmad.

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/thunder-007/weld-canvas/README.md b/spaces/thunder-007/weld-canvas/README.md deleted file mode 100644 index 9f52f37fbdc7ff0ebf7e306aa4483e0a4b45f76a..0000000000000000000000000000000000000000 --- a/spaces/thunder-007/weld-canvas/README.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -license: openrail -title: Weld Canvas -sdk: gradio -emoji: ⚡ -colorFrom: blue -colorTo: yellow ---- \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Achieve Your Writing Goals with Ready to Write 2 PDF Download A Book that Teaches You How to Write Perfect Paragraphs.md b/spaces/tialenAdioni/chat-gpt-api/logs/Achieve Your Writing Goals with Ready to Write 2 PDF Download A Book that Teaches You How to Write Perfect Paragraphs.md deleted file mode 100644 index 69c5d7dc9b5ef4dc9c29f66a5dafc7c0b48297c8..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Achieve Your Writing Goals with Ready to Write 2 PDF Download A Book that Teaches You How to Write Perfect Paragraphs.md +++ /dev/null @@ -1,93 +0,0 @@ - -

      Ready to Write 2 PDF Download: A Guide for Perfecting Paragraphs

      -

      Do you want to improve your writing skills and write better paragraphs? Do you need a reliable and easy-to-follow guide that will help you master the basics of paragraph writing? If you answered yes to any of these questions, then you should consider downloading Ready to Write 2 PDF.

      -

      ready to write 2 pdf download


      DOWNLOAD >>> https://urlcod.com/2uK8YM



      -

      Introduction

      -

      What is Ready to Write 2?

      -

      Ready to Write 2 is a textbook for intermediate-level English learners who want to perfect their paragraphs. It was written by Karen Blanchard and Christine Root, two experienced ESL teachers and authors. It was first published in 2002 as Ready to Write, A First Composition Text, and then revised and updated in 2010 as Ready to Write 2: Perfecting Paragraphs.

      -

      Why is it important to perfect paragraphs?

      -

      Paragraphs are the building blocks of any written text. They are units of thought that express a main idea and support it with details, examples, explanations, or arguments. A well-written paragraph has a clear topic sentence, coherent sentences, logical transitions, and a concluding sentence. A well-written paragraph also follows a specific order of information, such as time order, order of importance, spatial order, or cause-effect order.

      -

      Perfecting paragraphs is important because it helps you communicate your ideas effectively and persuasively. It also helps you organize your thoughts and structure your text. By perfecting paragraphs, you can improve your writing style, clarity, accuracy, and coherence. You can also avoid common errors such as run-on sentences, fragments, comma splices, or lack of unity.

      -

      How can you download Ready to Write 2 PDF?

      -

      If you want to download Ready to Write 2 PDF, you have several options. You can buy the book online from Pearson Education or other online retailers. You can also borrow it from a library or a friend. However, if you want to get it for free, you can try some websites that offer free PDF downloads of books. For example, you can visit Archive.org, Yumpu.com, or Academia.edu and search for Ready to Write 2 PDF download. You will find links to download the book in PDF format. However, be careful when downloading files from unknown sources, as they may contain viruses or malware.

      -

      Main Body

      -

      What are the features of Ready to Write 2?

      -

      Ready to Write 2 is a comprehensive and practical guide that covers all the aspects of paragraph writing. It has 14 chapters that teach you how to:

      -

      Getting organized: The key to good writing

      -

      This chapter introduces you to the importance of planning and outlining your paragraphs before writing them. It shows you how to brainstorm ideas, choose a topic, narrow down your focus, and write a thesis statement.

      -

      ready to write 2 pdf free download
      -ready to write 2 answer key pdf download
      -ready to write 2 third edition pdf download
      -ready to write 2 perfecting paragraphs pdf download
      -ready to write 2 ebook download
      -ready to write 2 book pdf download
      -ready to write 2 online pdf download
      -ready to write 2 teacher's guide pdf download
      -ready to write 2 by karen blanchard pdf download
      -ready to write 2 with essential online resources pdf download
      -how to download ready to write 2 pdf
      -where can i download ready to write 2 pdf
      -download ready to write 2 pdf for free
      -download ready to write 2 pdf full version
      -download ready to write 2 pdf from google drive
      -download ready to write 2 pdf from scribd
      -download ready to write 2 pdf from amazon
      -download ready to write 2 pdf from pearson
      -best site to download ready to write 2 pdf
      -best way to download ready to write 2 pdf
      -ready to write 2 pdf file download
      -ready to write 2 pdf format download
      -ready to write 2 pdf document download
      -ready to write 2 pdf text download
      -ready to write 2 pdf content download
      -ready to write more from paragraph to essay pdf download
      -ready to write series level 2 pdf download
      -pearson longman ready to write 2 pdf download
      -blanchard and root ready to write 2 pdf download
      -blanchard karen and christine root. (2016).ready.to.write.3rd.edition.pdf. new york: pearson longman. pp. xii + 186. isbn:9780131363343. us$53.32. (paperback) (review) (download)
      -how can i get a free copy of the book "ready to write" by karen blanchard and christine root in a PDF format? (download)
      -what are some good websites for downloading free PDF books like "ready to write" by karen blanchard and christine root? (download)
      -is there a torrent link for downloading "ready to write" by karen blanchard and christine root in PDF format? (download)
      -can i use a VPN to download "ready to write" by karen blanchard and christine root in PDF format without getting caught? (download)
      -can i print out the PDF file of "ready to write" by karen blanchard and christine root after downloading it? (download)
      -how much space does the PDF file of "ready to write" by karen blanchard and christine root take up on my device after downloading it? (download)
      -how long does it take to download the PDF file of "ready to write" by karen blanchard and christine root? (download)
      -how can i convert the PDF file of "ready to write" by karen blanchard and christine root into other formats like epub or mobi after downloading it? (download)
      -how can i edit the PDF file of "ready to write" by karen blanchard and christine root after downloading it? (download)
      -how can i share the PDF file of "ready to write" by karen blanchard and christine root with others after downloading it? (download)

      -

      Understanding paragraphs

      -

      This chapter explains what a paragraph is and what its components are. It teaches you how to write a topic sentence that states the main idea of the paragraph, how to write supporting sentences that develop the main idea with details and examples, and how to write a concluding sentence that summarizes the main idea or provides a transition.

      -

      Organizing information by different orders

      -

      This chapter teaches you how to organize your information by different orders depending on your purpose and audience. It shows you how to use time order when describing events or processes that happen in sequence, how to use order of importance when ranking items or ideas according to their significance or relevance, and how to use spatial order when describing locations or directions.

      -

      Understanding the writing process

      -

      This chapter introduces you to the steps of the writing process: prewriting, drafting, revising, editing, and proofreading. It gives you tips and strategies on how to perform each step effectively and efficiently.

      -

      Supporting the main idea

      -

      This chapter teaches you how to support your main idea with relevant and sufficient details and examples. It shows you how to use facts, statistics, quotations, anecdotes, comparisons, contrasts, definitions, classifications, causes, effects, reasons, opinions, arguments, or recommendations.

      -

      Explaining a process

      -

      This chapter teaches you how to explain a process or procedure that involves several steps or stages. It shows you how to use time order words such as first, second, next, then, after, finally, etc., how to use imperative verbs such as cut, mix, boil, etc., how to use transition words such as before, after, while, during, etc., and how to use diagrams or illustrations if necessary.

      -

      Writing descriptions

      -

      This chapter teaches you how to write descriptions that appeal to the senses and create vivid images in the reader's mind. It shows you how to use adjectives, adverbs, similes, metaphors, personification, or other figurative language devices to make your descriptions more interesting and engaging.

      -

      Expressing your opinion

      -

      This chapter teaches you how to express your opinion on a topic or issue. It shows you how to state your opinion clearly and politely, how to support your opinion with reasons and evidence, how to acknowledge opposing views and refute them, and how to use modal verbs such as can, could, may, might, should, would, etc., to express possibility, probability, obligation, or suggestion.

      -

      Comparing and contrasting

      -

      This chapter teaches you how to compare and contrast two or more items or ideas. It shows you how to use comparison words such as like, similarly, in the same way, etc., and contrast words such as but, however, on the other hand, etc., to show similarities and differences. It also shows you how to use different methods of organization such as block method or point-by-point method.

      -

      Analyzing causes and effects

      -

      This chapter teaches you how to analyze causes and effects of a situation or phenomenon. It shows you how to use cause-effect words such as because, since, as a result, therefore, etc., to show causal relationships. It also shows you how to use different methods of organization such as chronological order or order of importance.

      -

      Writing personal letters and business letters

      -

      This chapter teaches you how to write personal letters and business letters for different purposes and audiences. It shows you how to use appropriate formats and styles for each type of letter. It also shows you how to use polite expressions and tone in your letters.

      -

      What are the benefits of Ready to Write 2?

      -

      Ready to Write 2 is a valuable resource for anyone who wants to improve their paragraph writing skills. Some of the benefits of using this book are:

      -

      Improving your writing skills and confidence

      -

      This book will help you improve your writing skills by teaching you the essential elements of paragraph writing. You will learn how to write clear and coherent paragraphs that express your ideas effectively. You will also practice your writing skills by doing various exercises and activities that reinforce what you have learned. You will also get feedback and guidance from the answer key and the teacher's manual. By practicing your writing skills regularly, you will gain more confidence and fluency in your writing.

      -

      Enhancing your academic and professional performance

      -

      This book will help you enhance your academic and professional performance by preparing you for different types of writing tasks and situations. You will learn how to write paragraphs for essays, reports, research papers, presentations, exams, and other assignments. You will also learn how to write letters for personal and business purposes, such as applying for a job, requesting information, making a complaint, or expressing gratitude. By writing effective paragraphs, you will impress your teachers, employers, clients, or colleagues with your knowledge and skills.

      -

      Learning from examples and exercises

      -

      This book will help you learn from examples and exercises that illustrate the concepts and principles of paragraph writing. You will see how other writers use different techniques and strategies to write clear and coherent paragraphs. You will also practice applying those techniques and strategies to your own writing through various exercises and activities. You will check your understanding and progress by completing quizzes, tests, and self-evaluations.

      -

      Conclusion

      -

      Summary of the main points

      -

      In conclusion, Ready to Write 2 is a useful and practical book that teaches you how to perfect your paragraphs. It covers all the aspects of paragraph writing, such as getting organized, understanding paragraphs, organizing information by different orders, understanding the writing process, supporting the main idea, explaining a process, writing descriptions, expressing your opinion, comparing and contrasting, analyzing causes and effects, and writing personal and business letters. It also provides you with many benefits, such as improving your writing skills and confidence, enhancing your academic and professional performance, and learning from examples and exercises.

      -

      Call to action

      -

      If you want to improve your paragraph writing skills and write better texts for any purpose or audience, you should download Ready to Write 2 PDF today. You can find it online from various sources or buy it from Pearson Education or other online retailers. Don't miss this opportunity to learn from one of the best books on paragraph writing.

      - **FAQs** Q: Who are the authors of Ready to Write 2? A: The authors of Ready to Write 2 are Karen Blanchard and Christine Root, two experienced ESL teachers and authors. Q: What is the difference between block style and AMS style for formal letters? A: Block style is characterized by all elements being aligned on the left margin of the page. AMS style is similar but uses indentation for paragraphs. Q: What are some examples of comparison words and contrast words for comparing and contrasting? A: Some examples of comparison words are like, similarly, in the same way, etc. Some examples of contrast words are but, however, on the other hand, etc. Q: What are some examples of cause-effect words for analyzing causes and effects? A: Some examples of cause-effect words are because, since, as a result, therefore, etc. Q: What are some tips for writing personal letters and business letters? A: Some tips for writing personal letters are to use a friendly tone, use personal pronouns, write in a conversational style, and express your feelings or emotions. Some tips for writing business letters are to use a formal tone, use titles and last names, write in a clear and concise style, and express your purpose and request.

      0a6ba089eb
      -
      -
      \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Discografia - Rick e Renner Torrent Download Descubra os Melhores Sites para Baixar a Discografia Completa da Dupla.md b/spaces/tialenAdioni/chat-gpt-api/logs/Discografia - Rick e Renner Torrent Download Descubra os Melhores Sites para Baixar a Discografia Completa da Dupla.md deleted file mode 100644 index a44532a7ec58dfd08e5eeb8e79fe7c7e18919703..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Discografia - Rick e Renner Torrent Download Descubra os Melhores Sites para Baixar a Discografia Completa da Dupla.md +++ /dev/null @@ -1,163 +0,0 @@ - -

      Discografia - Rick e Renner Torrent Download

      -

      If you are a fan of Brazilian sertanejo music, you might have heard of Rick e Renner, one of the most popular duos in the genre. They have released more than 20 albums since their debut in 1991, and have sold over 10 million copies worldwide. Their songs are known for their catchy melodies, romantic lyrics, and emotional vocals.

      -

      But how can you listen to all their songs without spending a fortune on CDs or streaming services? The answer is simple: download their discography as a torrent. In this article, we will explain what a torrent is, why you should download Rick e Renner discography as a torrent, how to do it step by step, and some tips and tricks to make the process easier and safer.

      -

      Discografia - Rick e Renner Torrent Download


      Download Ziphttps://urlcod.com/2uK9us



      -

      Introduction

      -

      Who are Rick e Renner?

      -

      Rick e Renner are a Brazilian sertanejo duo formed by Rinaldo Santos Teixeira (Rick) and Ivair dos Reis Gonçalves (Renner). They met in Brasília in 1986, and started performing together in bars and clubs. They released their first album, É Dez, É Cem, É Mil, in 1991, which was a success in the sertanejo scene.

      -

      Since then, they have released more than 20 albums, including studio albums, live albums, compilations, and DVDs. Some of their most famous songs are "Ela É Demais", "Filha", "Mãe", "Nos Bares da Cidade", "Só Pensando em Você", and "Credencial". They have won several awards, such as Prêmio da Música Brasileira, Troféu Imprensa, and Prêmio Multishow de Música Brasileira.

      -

      In 2015, they announced their separation due to personal and professional differences. However, in 2018, they reunited and released a new album called Seguir em Frente. They are still active and performing together.

      -

      What is a torrent?

      -

      A torrent is a file that contains information about other files that you want to download from the internet. It does not contain the actual files, but rather the metadata, such as the name, size, format, and location of the files. The files are stored on other users' computers, who share them with each other using a peer-to-peer (P2P) network.

      -

      To download a torrent, you need two things: a torrent file and a torrent client. A torrent file is a small file that you can download from various websites that host torrents. A torrent client is a software that you install on your computer that allows you to open the torrent file and connect to other users who have the files you want. Some of the most popular torrent clients are uTorrent, BitTorrent, qBittorrent, and Vuze.

      -

      Rick e Renner discography torrent download free
      -How to download Rick e Renner albums torrent
      -Rick e Renner best songs torrent download mp3
      -Rick e Renner complete discography torrent magnet
      -Rick e Renner discografia completa download torrent
      -Baixar discografia Rick e Renner torrent gratis
      -Rick e Renner discografia torrent 320 kbps
      -Download Rick e Renner discography torrent full
      -Rick e Renner all albums torrent download
      -Rick e Renner discografia download torrent link
      -Rick e Renner discography torrent flac
      -Download Rick e Renner albums torrent fast
      -Rick e Renner discografia torrent rar
      -Rick e Renner discography torrent zip
      -Rick e Renner discografia baixar torrent
      -Download Rick e Renner discography torrent online
      -Rick e Renner discografia torrent mega
      -Rick e Renner discography torrent kickass
      -Rick e Renner discografia download utorrent
      -Download Rick e Renner albums torrent high quality
      -Rick e Renner discografia torrent the pirate bay
      -Rick e Renner discography torrent limetorrents
      -Baixar Rick e Renner discografia completa torrent
      -Download Rick e Renner songs torrent best hits
      -Rick e Renner discografia torrent cd
      -Download Rick e Renner discography torrent dvd
      -Rick e Renner discografia baixar utorrent gratis
      -Download Rick e Renner albums torrent latest
      -Rick e Renner discografia download via torrent
      -Download Rick e Renner songs torrent collection
      -Baixar musica de Rick e Renner torrent mp3
      -Download music by Rick e Renner torrent flac
      -Baixar album de Rick e Renner torrent completo
      -Download album by Rick e Renner torrent full hd
      -Baixar cancoes de Rick e Renner torrent melhores
      -Download songs by Rick e Renner torrent top hits
      -Baixar coletanea de Rick e Renner torrent raridade
      -Download compilation by Rick e Renner torrent rare
      -Baixar show de Rick e Renner torrent ao vivo
      -Download concert by Rick e Renner torrent live
      -Baixar dvd de Rick e Renner torrent video
      -Download dvd by Rick e Renner torrent video hd
      -Baixar box de Rick e Renner torrent especial
      -Download box by Rick e Renner torrent special edition
      -Baixar acustico de Rick e Renner torrent unplugged
      -Download acoustic by Rick e Renner torrent unplugged hd
      -Baixar remix de Rick e Renner torrent dance mix
      -Download remix by Rick e Renner torrent dance mix mp3

      -

      Downloading a torrent is different from downloading a regular file from a website. Instead of downloading from one source, you download from multiple sources at the same time. This makes the download faster and more efficient. However, it also means that you have to upload some parts of the files to other users while you download. This is called seeding. Seeding helps keep the torrent alive and benefits other users who want to download the same files.

      -

      Why download Rick e Renner discography as a torrent?

      -

      There are many reasons why you might want to download Rick e Renner discography as a torrent. Here are some of them:

      -
        -
      • You can listen to all their songs offline without relying on an internet connection or paying for a streaming service.
      • -
      • You can save money by not buying CDs or digital downloads of their albums.
      • -
      • You can enjoy high-quality audio files that preserve the original sound quality of their recordings.
      • -
      • You can discover new songs or albums that you might have missed or forgotten.
      • -
      • You can support their music by sharing it with other fans who might not have access to it otherwise.
      • -
      -

      How to download Rick e Renner discography torrent

      -

      Step 1: Find a reliable torrent site

      -

      The first step to download Rick e Renner discography as a torrent is to find a reliable torrent site that hosts it. A torrent site is a website that allows users to upload and download torrents. There are many torrent sites on the internet, but not all of them are safe or trustworthy. Some of them might contain viruses, malware, fake files, or illegal content.

      -

      To avoid these risks, you should use only reputable and verified torrent sites that have positive reviews and ratings from other users. Some examples of good torrent sites are The Pirate Bay, RARBG, 1337x, Torrentz2, and LimeTorrents. You can also use specialized sites that focus on music torrents only, such as Soundpark or Rutracker.

      -

      Step 2: Search for Rick e Renner discography torrent

      -

      The next step is to search for Rick e Renner discography torrent on the site you chose. You can use the search bar or browse through the categories or tags to find it. You might find different versions or editions of their discography depending on the uploader or source. You should look for the one that has the most seeders (users who have completed downloading and are uploading) and leechers (users who are still downloading) as this indicates its popularity and availability.

      -

      You should also check the details of the torrent before downloading it. These include:

      -
        -
      • The file name: It should match what you are looking for.
      • -
      • The file size: It should be reasonable for the number and quality of files included.
      • -
      • The file format: It should be compatible with your device or media player.
      • -
      • The file description: It should provide information about the content and source of the files.
      • -
      • The file comments: They should give feedback from other users who have downloaded it.
      • -
      -

      Step 3: Download and open the torrent file

      -

      Once you have found the right version of Rick e Renner discography torrent, you can click on the download button or link to get it. This will download a small file with a .torrent extension to your computer. You need to open this file with your torrent client to start downloading the actual files.

      -

      To open the .torrent file with your torrent client:

      -
        -
      1. Launch your torrent client if it is not already running.
      2. -
      3. Go to File > Open Torrent or click on the + icon (depending on your client).
      4. -
      5. Browse your computer folders and select the .torrent file you downloaded.
      6. -
      7. Click on Open or OK (depending on your client).
      8. -
      -

      Step 4: Choose a torrent client

      -

      A torrent client is a software that allows you to open .torrent files and connect to other users who have the files you want. There are many options available for different operating systems and devices. Some of them are free while others require payment or subscription.

      -

      You should choose a torrent client that suits your needs and preferences. Some factors to consider are:

      -
        -
      • The features: Some clients offer more options than others in terms of speed control, bandwidth management, encryption, proxy support, streaming, remote access, etc.
      • -
      • The interface: Some clients have more user-friendly interfaces than others in terms of design, layout, navigation, customization, etc.
      • -
      • The performance: Some clients consume more resources than others in terms of CPU, RAM, disk space, battery life, etc.
      • -
      • The reputation: Some clients have better reviews than others in terms of reliability, security, privacy, support, etc.
      • -
      -

      Some examples of popular torrent clients are uTorrent, BitTorrent, qBittorrent, and Vuze.

      -

      Step 5: Start the download and wait for it to finish

      -

      After you have opened the .torrent file with your torrent client, you will see a window that shows the details of the torrent. You can choose which files you want to download, where you want to save them, and how much bandwidth you want to allocate for the download. You can also see the progress, speed, peers, and other information about the download.

      -

      Click on Start or OK (depending on your client) to begin the download. Depending on the size of the files and the speed of your internet connection, the download might take from a few minutes to several hours. You can pause or resume the download at any time. You can also close your torrent client and reopen it later to continue the download.

      -

      When the download is complete, you will see a notification or a status change on your torrent client. You can then open the folder where you saved the files and enjoy listening to Rick e Renner discography.

      -

      Tips and tricks for downloading Rick e Renner discography torrent

      -

      Use a VPN to protect your privacy and security

      -

      Downloading torrents can expose your IP address and online activity to other users and third parties who might monitor or track you. This can compromise your privacy and security, and expose you to legal risks or cyberattacks. To avoid these dangers, you should use a VPN (Virtual Private Network) when downloading torrents.

      -

      A VPN is a service that encrypts your internet traffic and routes it through a secure server in another location. This hides your real IP address and location from anyone who might spy on you. It also allows you to access geo-restricted or censored content that might not be available in your region.

      -

      There are many VPN providers available for different devices and platforms. Some of them are free while others require payment or subscription. You should choose a VPN that offers fast speed, unlimited bandwidth, strong encryption, no-logs policy, and P2P support. Some examples of good VPNs for torrenting are ExpressVPN, NordVPN, Surfshark, and CyberGhost.

      -

      Check the comments and ratings of the torrent before downloading

      -

      Not all torrents are created equal. Some of them might be fake, incomplete, corrupted, or infected with malware. To avoid wasting your time and risking your device, you should check the comments and ratings of the torrent before downloading it.

      -

      The comments and ratings are feedback from other users who have downloaded or tried the torrent. They can give you useful information about the quality, content, source, and problems of the torrent. They can also warn you about potential threats or scams that might be associated with the torrent.

      -

      You should look for torrents that have positive comments and ratings from many users. You should also avoid torrents that have negative comments and ratings from many users. You should also be wary of torrents that have no comments or ratings at all, as they might be new or untested.

      -

      Verify the file size and format of the torrent

      -

      Another way to ensure that you are downloading the right version of Rick e Renner discography torrent is to verify the file size and format of the torrent. The file size and format can indicate the quality and compatibility of the files included in the torrent.

      -

      The file size is the amount of space that the files occupy on your device or storage media. The file size can affect the download speed and duration, as well as the storage space required for saving the files. Generally speaking, larger files mean higher quality but slower download and more storage space needed.

      -

      The file format is the type of file that determines how it can be played or opened on your device or media player. The file format can affect the sound quality and compatibility of the files with different devices or players. Generally speaking, common file formats for music are MP3, FLAC, WAV, AAC, etc.

      -

      You should compare the file size and format of different versions of Rick e Renner discography torrent and choose the one that suits your preferences and needs. You should also make sure that your device or player supports the file format that you choose.

      -

      Seed the torrent after downloading to help other users

      -

      One of the most important things to do after downloading Rick e Renner discography torrent is to seed it. Seeding is the process of uploading parts of the files that you have downloaded to other users who are still downloading them. Seeding helps keep the torrent alive and available for other users who want to download it.

      -

      Seeding is not only a courtesy but also a responsibility for torrent users. By seeding, you contribute to the P2P network and support the creators and distributors of the content that you enjoy. Seeding also benefits you in several ways, such as:

      -
        -
      • Improving your download speed and ratio: The more you seed, the more you can download faster and easier.
      • -
      • Increasing your reputation and credibility: The more you seed, the more you are trusted and respected by other users.
      • -
      • Enhancing your security and privacy: The more you seed, the more you are protected from being targeted or tracked by third parties.
      • -
      -

      To seed a torrent, you just need to leave your torrent client running after the download is complete. You can adjust the settings of your torrent client to control how much bandwidth or time you want to allocate for seeding. You can also stop seeding at any time if you need to.

      -

      Conclusion

      -

      In this article, we have explained what a torrent is, why you should download Rick e Renner discography as a torrent, how to do it step by step, and some tips and tricks to make the process easier and safer. We hope that this article has helped you learn how to download Rick e Renner discography torrent and enjoy listening to their songs.

      -

      If you have any questions or comments about this article, feel free to leave them below. We would love to hear from you. And if you liked this article, please share it with your friends who might be interested in Rick e Renner discography torrent. Thank you for reading!

      -

      FAQs

      -

      Q: Is downloading Rick e Renner discography torrent legal?

      -

      A: The legality of downloading Rick e Renner discography torrent depends on your location and the copyright status of the content. In some countries or regions, downloading torrents is legal as long as you do not distribute or profit from them. In other countries or regions, downloading torrents is illegal regardless of the content or purpose.

      -

      To avoid legal risks, you should check the laws and regulations of your country or region before downloading torrents. You should also use a VPN to hide your IP address and online activity from anyone who might monitor or track you.

      -

      Q: Is downloading Rick e Renner discography torrent safe?

      -

      A: The safety of downloading Rick e Renner discography torrent depends on the source and quality of the torrent. Some torrents might be fake, incomplete, corrupted, or infected with malware. These can harm your device or compromise your security or privacy.

      -

      To avoid these risks, you should use only reputable and verified torrent sites that have positive reviews and ratings from other users. You should also check the comments and ratings of the torrent before downloading it. You should also use a VPN to protect your privacy and security from anyone who might spy on you.

      -

      Q: How long does it take to download Rick e Renner discography torrent?

      -

      A: The time it takes to download Rick e Renner discography torrent depends on several factors, such as:

      -
        -
      • The size of the files: The larger the files, the longer it takes to download them.
      • -
      • The speed of your internet connection: The faster your internet connection, the shorter it takes to download them.
      • -
      • The number of seeders and leechers: The more seeders and leechers, the faster it takes to download them.
      • -
      • The settings of your torrent client: The more bandwidth or time you allocate for downloading, the shorter it takes to download them.
      • -
      -

      To estimate how long it takes to download Rick e Renner discography torrent, you can use a calculator like this one: https://www.download-time.com/

      -

      Q: How can I play Rick e Renner discography torrent on my device or media player?

      -

      A: To play Rick e Renner discography torrent on your device or media player, you need to make sure that they support the file format that you downloaded. Some common file formats for music are MP3, FLAC, WAV, AAC, etc. If your device or player does not support the file format that you downloaded, you need to convert it to a compatible one using a software like this one: https://www.freemake.com/free_audio_converter/

      -

      Q: How can I share Rick e Renner discography torrent with my friends?

      -

      A: To share Rick e Renner discography torrent with your friends, you can either send them the .torrent file that you downloaded from the torrent site or create a magnet link that they can open with their torrent client. A magnet link is a URL that contains information about the files that you want to share without requiring a .torrent file. To create a magnet link from a .torrent file using uTorrent:

      -
        -
      1. Open uTorrent and go to File > Add Torrent (no default save).
      2. -
      3. Select the .torrent file that you want to share and click on Open.
      4. -
      5. Right-click on the torrent in uTorrent and go to Copy > Copy Magnet URI.
      6. -
      7. Paste the magnet link in an email or message and send it to your friends.
      8. -
      -

      0a6ba089eb
      -
      -
      \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Download HOT Dukh Bhanjani Sahib Paath Pdf In Hindi.md b/spaces/tialenAdioni/chat-gpt-api/logs/Download HOT Dukh Bhanjani Sahib Paath Pdf In Hindi.md deleted file mode 100644 index 4cc02f96d7333f86bfcd9dfdb3c52b16f3b8c5a9..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Download HOT Dukh Bhanjani Sahib Paath Pdf In Hindi.md +++ /dev/null @@ -1,33 +0,0 @@ - -Here is a possible title and article with html formatting for the keyword "Download Dukh Bhanjani Sahib Paath Pdf In Hindi": - -

      Download Dukh Bhanjani Sahib Paath Pdf In Hindi: A Guide to the Sikh Prayer for Healing and Peace

      - -

      Dukh Bhanjani Sahib is a collection of shabads (hymns) from the Guru Granth Sahib, the holy scripture of the Sikhs. The shabads were compiled by Guru Arjan Dev Ji, the fifth Sikh Guru, and are recited to relieve pain and suffering. The name Dukh Bhanjani means "the destroyer of sorrow".

      -

      Download Dukh Bhanjani Sahib Paath Pdf In Hindi


      Download Ziphttps://urlcod.com/2uK9Tk



      - -

      The shabads in Dukh Bhanjani Sahib are arranged in a specific order, starting with Gauri Maajh, a musical measure that invokes the grace and mercy of God. The shabads praise the name and attributes of God, who is the source of all happiness and comfort. The shabads also express the longing and devotion of the soul for God, who is the true friend and protector of the devotees. The shabads also describe the benefits of meditating on God's name, which removes all fears and diseases and grants peace and bliss.

      - -

      Dukh Bhanjani Sahib is recited by Sikhs as a daily prayer, especially in times of distress and hardship. It is also recited at the Harmandir Sahib (Golden Temple) in Amritsar, where a pool of water called Dukh Bhanjani Beri (the tree of sorrow-removal) is located. According to a legend, a Sikh named Bhai Joga Singh cured his leprosy by bathing in this pool while reciting Dukh Bhanjani Sahib.

      - -

      If you want to download Dukh Bhanjani Sahib Paath Pdf In Hindi, you can follow these steps:

      - -
        -
      1. Visit https://panotbook.com/dukh-bhanjani-sahib-path/, which is a website that provides free PDFs of various religious books in Hindi.
      2. -
      3. Scroll down to the bottom of the page and click on the download button that says "दुख भंजनी साहिब पाठ PDF डाउनलोड करें".
      4. -
      5. A new tab will open with a Google Drive link. Click on the download icon on the top right corner of the screen.
      6. -
      7. The PDF file will be downloaded to your device. You can open it with any PDF reader app or software.
      8. -
      - -

      You can also visit other websites that offer Dukh Bhanjani Sahib Paath Pdf In Hindi, such as https://www.dekho-ji.com/dukh-bhanjani-sahib/ or https://nitnemsahib.com/dukh-bhanjani-sahib-in-hindi/. However, make sure to check the authenticity and quality of the PDFs before downloading them.

      - -

      Dukh Bhanjani Sahib is a powerful and soothing prayer that can help you overcome any difficulties and challenges in life. By reciting it with faith and love, you can experience the healing and peace of God's presence. May you always be blessed by Guru Arjan Dev Ji's words.

      Here are a few more paragraphs for the article: - -

      Dukh Bhanjani Sahib is not only a prayer for physical healing, but also for mental and spiritual healing. It helps to remove the negative thoughts and emotions that cause pain and suffering in the mind. It also helps to awaken the inner wisdom and awareness that lead to liberation and enlightenment. By reciting Dukh Bhanjani Sahib, you can cleanse your mind of ignorance and ego, and fill it with divine light and love.

      -

      - -

      Dukh Bhanjani Sahib is also a prayer for social and universal healing. It promotes harmony and unity among all beings, regardless of their religion, caste, gender, or status. It teaches us to see God in everyone and everything, and to serve God by serving His creation. By reciting Dukh Bhanjani Sahib, you can spread peace and joy in the world, and contribute to the welfare of all.

      - -

      Dukh Bhanjani Sahib is a prayer that connects us with our true self, which is one with God. It reminds us of our divine origin and destiny, and inspires us to live according to God's will. It also gives us hope and courage to face any challenges and obstacles in life, knowing that God is always with us and within us. By reciting Dukh Bhanjani Sahib, you can experience the bliss and grace of God's name.

      7196e7f11a
      -
      -
      \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Download Windows 10 Crack Loader and Enjoy Windows 10 for Free in 3 Easy Steps.md b/spaces/tialenAdioni/chat-gpt-api/logs/Download Windows 10 Crack Loader and Enjoy Windows 10 for Free in 3 Easy Steps.md deleted file mode 100644 index 72fdf85005bf8f5af0a110bb6a366368e45a6523..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Download Windows 10 Crack Loader and Enjoy Windows 10 for Free in 3 Easy Steps.md +++ /dev/null @@ -1,26 +0,0 @@ -
      -

      How to Download Windows 10 Crack Loader and Activate Windows 10 for Free

      -

      Windows 10 is the latest and most advanced operating system from Microsoft, but it also comes with a hefty price tag. If you want to use Windows 10 without paying for a license key, you can try using a Windows 10 crack loader. A Windows 10 crack loader is a software that can bypass the activation process and make Windows 10 think that it is genuine and activated.

      -

      windows 10 crack loader download


      Download Ziphttps://urlcod.com/2uK9IW



      -

      In this article, we will show you how to download Windows 10 crack loader and activate Windows 10 for free. We will also provide some tips and warnings about using cracked software. Please note that we do not condone or encourage piracy and we are not responsible for any legal or technical issues that may arise from using Windows 10 crack loader. Use it at your own risk.

      -

      Step 1: Download Windows 10 Crack Loader

      -

      The first step is to download Windows 10 crack loader from a reliable source. There are many websites that claim to offer Windows 10 crack loader, but some of them may contain malware or viruses that can harm your PC. To avoid this, you should only download Windows 10 crack loader from trusted and verified sources.

      -

      One such source is Windows 10 Crack Loader, which provides the latest version of Windows 10 crack loader for free. You can visit their website and click on the "Download Now" button to get the Windows 10 crack loader file. The file size is about 4 MB and it will be downloaded as a ZIP archive.

      -

      Step 2: Extract and Run Windows 10 Crack Loader

      -

      The next step is to extract and run Windows 10 crack loader on your PC. To do this, you will need a software that can extract ZIP files, such as WinRAR or 7-Zip. You can download and install one of them from their official websites.

      -

      After installing the extraction software, you can right-click on the downloaded ZIP file and select "Extract Here" or "Extract to windows10crackloader" option. This will create a folder with the same name as the ZIP file, containing the Windows 10 crack loader files.

      -

      -

      Open the folder and double-click on the "Windows 10 Crack Loader.exe" file to run it as administrator by right-clicking on it and selecting "Run as administrator". A window will pop up asking you to confirm the cracking process. Click on "Crack" and wait for a few seconds until you see a message saying "Cracking Done". Click on "OK" and close the window.

      -

      Step 3: Restart Your PC and Enjoy Windows 10

      -

      The final step is to restart your PC and enjoy Windows 10 for free. To do this, you can click on the "Start" button and select "Power" option. Then, click on "Restart" and wait for your PC to reboot.

      -

      Congratulations! You have successfully downloaded and activated Windows 10 for free using Windows 10 crack loader. You can now enjoy all the features and benefits of Windows 10 without paying a dime.

      -

      Tips and Warnings

      -
        -
      • Before using Windows 10 crack loader, you should backup your important data and files in case something goes wrong.
      • -
      • You should also disable your antivirus software temporarily during the cracking process, as it may interfere with the crack files.
      • -
      • You should not update Windows 10 after using the crack loader, as it may revert the activation and cause errors.
      • -
      • You should be careful when downloading files from unknown sources, as they may contain malware or viruses that can harm your PC.
      • -
      • You should respect the intellectual property rights of Microsoft and support them by purchasing a license key if you can afford it.
      • -

      ddb901b051
      -
      -
      \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Free McBoot 1.9 How to Unlock the Full Potential of Your PS2 Slim SCPH 90004.md b/spaces/tialenAdioni/chat-gpt-api/logs/Free McBoot 1.9 How to Unlock the Full Potential of Your PS2 Slim SCPH 90004.md deleted file mode 100644 index 9109a3d78965c56c2f19aedacb53143a0068132b..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Free McBoot 1.9 How to Unlock the Full Potential of Your PS2 Slim SCPH 90004.md +++ /dev/null @@ -1,75 +0,0 @@ -
      -

      How to Download and Install Free McBoot 1.9 on PS2 Slim SCPH 90004

      -

      Free McBoot (FMCB) is a homebrew software that allows you to launch other homebrew applications from your PlayStation 2 memory card. It works on most PS2 models, including the slim SCPH 90004. In this article, we will show you how to download and install FMCB 1.9 on your PS2 slim using a USB drive and a compatible game disc.

      -

      download free mcboot 1.9 scph 90004


      Download Zip ★★★ https://urlcod.com/2uK4J2



      -

      What You Need

      -
        -
      • A PS2 slim SCPH 90004 console with a date code of 8C or lower. You can check the date code on the sticker at the back of your console. If it is higher than 8C, FMCB will not work.
      • -
      • A USB drive formatted to FAT32 with at least 4 GB of free space.
      • -
      • A compatible game disc that supports disc swapping. Some examples are 007 Agent Under Fire, 007 Nightfire, Splinter Cell, and Swap Magic.
      • -
      • A computer with an internet connection and a program that can extract ZIP files.
      • -
      -

      Step 1: Download FMCB 1.9 Package

      -

      First, you need to download the FMCB 1.9 package for OpenTuna from this link[^1^]. This package will allow you to launch FMCB 1.9 by using OpenTuna on any slim/OpenTuna-compatible console. Choose the appropriate package for your console: FMCB-1966-OPENTUNA-PACK for non-modchipped consoles or FMCB-1953-MODCHIP-OPENTUNA-PACK for modchipped consoles.

      -

      Extract the ZIP file to your USB drive. You should see four folders: APPS, BOOT, OPENTUNA, and SYS-CONF.

      -

      Step 2: Copy Files to Memory Card

      -

      Next, you need to copy the files from your USB drive to your PS2 memory card. To do this, you need to use a program called uLaunchELF, which can be launched from your game disc using a disc swap trick.

      -

      Insert your game disc into your PS2 and turn it on. Wait for the game to load until you see the main menu or a loading screen. Then, open the disc tray and swap the game disc with another disc that has uLaunchELF burned on it. You can download uLaunchELF from this link[^2^] and burn it to a CD-R using any burning software.

      -

      Close the disc tray and wait for uLaunchELF to load. You should see a file browser with different devices. Navigate to mass:/ (your USB drive) and press O to open it. You should see the four folders that you extracted earlier.

      -

      Delete any FreeMCBoot related folder and FORTUNA/OPENTUNA folder from your memory card (mc0:/ or mc1:/). Then, copy all the files and folders from mass:/ to your memory card root. This may take some time, so be patient.

      -

      Step 3: Launch FMCB 1.9

      -

      Once the copying is done, you can launch FMCB 1.9 from your memory card. To do this, you need to use OpenTuna, which is a modified version of Fortuna Project that allows you to boot homebrew ELFs from any partition on your HDD or USB drive.

      -

      How to install free mcboot 1.9 on ps2 slim scph 90004
      -Free mcboot 1.9 for opentuna compatible consoles
      -Free mcboot 1.9 series packages for ps2 homebrew
      -Free mcboot 1.9 vs free harddisk drive boot
      -Free mcboot 1.9 features and benefits
      -Free mcboot 1.9 download link and tutorial
      -Free mcboot 1.9 compatibility list and issues
      -Free mcboot 1.9 configurator and launcher
      -Free mcboot 1.9 update and changelog
      -Free mcboot 1.9 alternatives and comparisons
      -Free mcboot 1.9 for psx and debugstation consoles
      -Free mcboot 1.9 for chinese ps2 models
      -Free mcboot 1.9 memory card size and format
      -Free mcboot 1.9 elf booting from hdd partition
      -Free mcboot 1.9 dvd player selection and region free
      -Free mcboot 1.9 shutdown function and hdd support
      -Free mcboot 1.9 cnf parsing and optimization
      -Free mcboot 1.9 memory map and user memory
      -Free mcboot 1.9 protokernel and hdd osd support
      -Free mcboot 1.9 cd/dvd drive stop and start
      -How to uninstall free mcboot 1.9 from ps2 slim scph 90004
      -How to backup free mcboot 1.9 to pc or usb
      -How to customize free mcboot 1.9 with themes and icons
      -How to troubleshoot free mcboot 1.9 errors and bugs
      -How to upgrade free mcboot 1.9 to newer versions
      -Best homebrew apps for free mcboot 1.9 users
      -How to play ps2 games from usb with free mcboot 1.9
      -How to play psx games from hdd with free mcboot 1.9
      -How to use cheats and codes with free mcboot 1.9
      -How to stream ps2 games with free mcboot 1.9 and obs
      -How to mod ps2 games with free mcboot 1.9 and tools
      -How to rip ps2 games with free mcboot 1.9 and esr patcher
      -How to burn ps2 games with free mcboot 1.9 and imgburn
      -How to emulate ps2 games with free mcboot 1.9 and pcsx2
      -How to transfer ps2 saves with free mcboot 1.9 and uLaunchELF
      -How to use ps2 controllers on pc with free mcboot 1.9 and adapter
      -How to use ps3 controllers on ps2 with free mcboot 1.9 and wireless receiver
      -How to use ps4 controllers on ps2 with free mcboot 1.9 and bluetooth dongle
      -How to connect ps2 to internet with free mcboot 1.9 and network adapter
      -How to play online games with free mcboot 1.9 and xlink kai or open spy
      -How to improve ps2 graphics with free mcboot 1.9 and gsm or opl modes
      -How to enhance ps2 sound with free mcboot 1.9 and spu2-x or opl settings
      -How to record ps2 gameplay with free mcboot 1.9 and capture card or hdmi converter
      -How to clean ps2 lens with free mcboot 1.9 and cotton swab or alcohol solution
      -How to replace ps2 fan with free mcboot 1.9 and screwdriver or pliers
      -How to fix ps2 disc read error with free mcboot 1.9 and potentiometer or laser adjustment
      -How to softmod ps2 slim scph 90004 without free mcboot
      -How to hardmod ps2 slim scph 90004 without soldering
      -Pros and cons of using free mcboot on ps2 slim scph 90004

      -

      Eject your uLaunchELF disc and insert your game disc again. Turn off your PS2 and turn it on again. Wait for the game to load until you see the main menu or a loading screen. Then, open the disc tray and swap the game disc with another disc that has OpenTuna burned on it. You can download OpenTuna from this link[^3^] and burn it to a CD-R using any burning software.

      -

      Close the disc tray and wait for OpenTuna to load. You should see a black screen with some text saying \"OpenTuna v0.x.x\" and \"Launching BOOT/BOOT.KELF\".

      -

      FMCB 1.9 should be triggered after a few seconds. You

      e753bf7129
      -
      -
      \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Discover the Secrets of Farm Bay and Unlock New Adventures.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Discover the Secrets of Farm Bay and Unlock New Adventures.md deleted file mode 100644 index 5eee2b3f698b365755f3c7b122b7752f5cbe0d99..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Discover the Secrets of Farm Bay and Unlock New Adventures.md +++ /dev/null @@ -1,132 +0,0 @@ -
      -

      Farm Bay: A Fun and Relaxing Farming Game

      -

      Introduction

      -

      What is Farm Bay and why should you play it?

      -

      farm bay


      Download ————— https://bltlly.com/2uOrxZ



      -

      Features of Farm Bay

      -

      What are some of the things you can do in Farm Bay?

      -

      Harvest crops and fruits

      -

      How to grow and harvest various crops and fruits in Farm Bay.

      -

      Raise animals

      -

      How to feed and care for cute animals in Farm Bay.

      -

      Trade with friends

      -

      How to chat and trade with friends in the neighborhood in Farm Bay.

      -

      Earn coins and resources

      -

      How to earn coins and resources by completing orders, exploring the mine, and sailing to the treasure islands in Farm Bay.

      -

      farm bay game
      -farm bay resort
      -farm bay area
      -farm bay of plenty
      -farm bay city mi
      -farm bay city tx
      -farm bay city oregon
      -farm bay city michigan
      -farm bay county fl
      -farm bay county florida
      -farm bay road bowen island
      -farm bay road north saanich
      -farm bay road sidney bc
      -farm bay road victoria bc
      -farm bay road sidney
      -farm bay road north saanich bc
      -farm bay road bowen island bc
      -farm bay road sidney british columbia
      -farm bay village ohio
      -farm bay village oh
      -farm bay village farmers market
      -farm bay village police department
      -farm bay village library
      -farm bay village schools
      -farm bay village recreation department
      -farm bayswater vic 3153
      -farm bayswater victoria 3153
      -farm bayswater victoria australia 3153
      -farm bayswater vic australia 3153
      -farm bayswater vic au 3153
      -farm bayswater north vic 3153
      -farm bayswater north victoria 3153
      -farm bayswater north victoria australia 3153
      -farm bayswater north vic australia 3153
      -farm bayswater north vic au 3153
      -farm bayside ny 11361
      -farm bayside new york 11361
      -farm bayside new york ny 11361
      -farm bayside ny usa 11361
      -farm bayside ny us 11361
      -farm bayside queens ny 11361
      -farm bayside queens new york 11361
      -farm bayside queens new york ny 11361
      -farm bayside queens ny usa 11361
      -farm bayside queens ny us 11361

      -

      Expand your farm

      -

      How to use your coins and resources to upgrade your buildings, expand your land, and decorate your farm in Farm Bay.

      -

      Participate in events

      -

      How to join regular in-game events and enjoy the family atmosphere on your farm in Farm Bay.

      -

      Benefits of playing Farm Bay

      -

      What are some of the benefits of playing Farm Bay?

      -

      Mental health

      -

      How playing Farm Bay can reduce stress, improve mood, and boost creativity.

      -

      Social skills

      -

      How playing Farm Bay can enhance communication, cooperation, and friendship.

      -

      Environmental awareness

      -

      How playing Farm Bay can increase knowledge, appreciation, and responsibility for nature.

      -

      Tips and tricks for playing Farm Bay

      -

      What are some of the tips and tricks for playing Farm Bay?

      -

      Plan ahead

      -

      How to plan ahead for your crops, products, orders, and expansions in Farm Bay.

      -

      Balanced production

      -

      How to balance your production of crops, products, and resources in Farm Bay.

      -

      Prioritize orders

      -

      How to prioritize orders that give you the most coins, resources, or xp in Farm Bay.

      -

      Use helpers

      -

      How to use helpers such as the airship, the market stand, the ship, and the neighbors in Farm Bay.

      -

      Conclusion

      -

      A summary of the main points of the article and a call to action for the readers.

      - Article:

      Farm Bay: A Fun and Relaxing Farming Game

      -

      If you are looking for a free farming game that is fun, relaxing, and rewarding, then you should try Farm Bay. Farm Bay is a game where you can turn a pretty farm into a paradise bay land. You can enjoy the farming activities such as harvesting crops, raising animals, trading with friends, earning coins and resources, expanding your farm, and participating in events. You can also explore the mine and sail to the treasure islands to find precious resources and fantastic treasures. In this article, we will tell you more about the features, benefits, tips and tricks of playing Farm Bay.

      -

      Features of Farm Bay

      -

      Farm Bay has many features that make it an enjoyable and engaging game. Here are some of them:

      -

      Harvest crops and fruits

      -

      In Farm Bay, you can grow and harvest various crops and fruits such as wheat, corn, carrots, apples, grapes, strawberries, etc. You can use them to make products or sell them for coins. You can also plant special crops that give you more xp or resources. To grow crops and fruits, you need to plow the land, sow the seeds or saplings, water them if needed, and wait for them to mature. Then you can harvest them by tapping on them.

      -

      Raise animals

      -

      In Farm Bay, you can also feed and care for cute animals such as cows, chickens, pigs, sheep, etc. You can get healthy farm products from them such as milk, eggs, bacon, wool, etc. You can also collect animal feed from the feed mill or buy it from the market. To raise animals, you need to build animal houses, buy animals from the shop, feed them regularly, and collect their products by tapping on them.

      -

      Trade with friends

      -

      In Farm Bay, you can also chat and trade with friends in the neighborhood. You can join or create a neighborhood with other players and help each other with orders, resources, and tips. You can also chat with your neighbors and send them gifts. To trade with friends, you need to join a neighborhood, tap on the chat icon, and select the items you want to trade or request.

      -

      Earn coins and resources

      -

      In Farm Bay, you can also earn coins and resources by completing orders, exploring the mine, and sailing to the treasure islands. Coins are used to buy items from the shop, upgrade buildings, expand land, and decorate your farm. Resources are used to make products, craft tools, and unlock new areas. To earn coins and resources, you need to complete orders from the order board, the airship, or the ship. You can also explore the mine with dynamites and shovels to find ores and gems. You can also sail to the treasure islands with maps and keys to find chests and artifacts.

      -

      Expand your farm

      -

      In Farm Bay, you can also use your coins and resources to upgrade your buildings, expand your land, and decorate your farm. Upgrading your buildings will increase their capacity, efficiency, and variety. Expanding your land will give you more space to grow crops, raise animals, and build structures. Decorating your farm will make it more beautiful and personalized. To expand your farm, you need to tap on the building, land, or decoration you want to upgrade, expand, or buy.

      -

      Participate in events

      -

      In Farm Bay, you can also join regular in-game events and enjoy the family atmosphere on your farm. Events are special occasions where you can earn extra rewards, compete with other players, or celebrate holidays. Some examples of events are the harvest festival, the fishing contest, the Halloween party, etc. To participate in events, you need to tap on the event icon and follow the instructions.

      -

      Benefits of playing Farm Bay

      -

      Playing Farm Bay is not only fun but also beneficial for your mental health, social skills, and environmental awareness. Here are some of the benefits of playing Farm Bay:

      -

      Mental health

      -

      Playing Farm Bay can reduce stress, improve mood, and boost creativity. Farming games are known to have a calming effect on the mind as they involve repetitive tasks that require focus and concentration. They also provide a sense of accomplishment and satisfaction as you see your farm grow and prosper. Moreover, they stimulate your imagination as you design and decorate your farm according to your preferences. Playing Farm Bay can help you relax, feel happy, and express yourself.

      Social skills

      -

      Playing Farm Bay can also enhance your communication, cooperation, and friendship. Farming games are social games that allow you to interact with other players in various ways. You can chat with your neighbors, trade with your friends, help each other with orders and resources, and compete or cooperate in events. You can also join or create a neighborhood with like-minded players and share your farming experiences. Playing Farm Bay can help you connect, collaborate, and have fun with others.

      -

      Environmental awareness

      -

      Playing Farm Bay can also increase your knowledge, appreciation, and responsibility for nature. Farming games are educational games that teach you about the basics of farming, such as crops, animals, products, and resources. They also show you the beauty and diversity of nature, such as flowers, trees, birds, fish, etc. They also inspire you to care for the environment, such as recycling, composting, saving water, etc. Playing Farm Bay can help you learn, enjoy, and protect nature.

      -

      Tips and tricks for playing Farm Bay

      -

      Playing Farm Bay is easy and fun, but there are some tips and tricks that can help you play better and faster. Here are some of them:

      -

      Plan ahead

      -

      One of the most important tips for playing Farm Bay is to plan ahead for your crops, products, orders, and expansions. You should always check what you need to grow, make, or buy before you start planting, producing, or purchasing. You should also check what you have in your storage or market stand before you sell or trade anything. You should also plan your expansions according to your needs and goals. Planning ahead will help you avoid wasting time, money, or resources.

      -

      Balanced production

      -

      Another tip for playing Farm Bay is to balance your production of crops, products, and resources. You should not focus on one type of item too much or too little. You should try to produce a variety of items that can be used for different purposes. You should also try to produce enough items to meet your orders, demands, and expansions. You should also try to produce items that give you more xp or resources. Balancing your production will help you optimize your farm efficiency and profitability.

      Prioritize orders

      -

      A third tip for playing Farm Bay is to prioritize orders that give you the most coins, resources, or xp. You should always check the rewards of each order before you accept or reject it. You should also check the difficulty and time limit of each order. You should try to complete orders that are easy, fast, and profitable. You should also try to complete orders that are related to events or achievements. Prioritizing orders will help you earn more rewards and progress faster.

      -

      Use helpers

      -

      A fourth tip for playing Farm Bay is to use helpers such as the airship, the market stand, the ship, and the neighbors. These helpers can help you with your production, trading, and expansion. The airship can help you deliver orders and earn resources. The market stand can help you sell or buy items and earn coins. The ship can help you sail to the treasure islands and earn resources and treasures. The neighbors can help you with your orders, resources, and tips. Using helpers will help you save time, money, and effort.

      -

      Conclusion

      -

      Farm Bay is a fun and relaxing farming game that you can play for free on your mobile device. You can enjoy the farming activities such as harvesting crops, raising animals, trading with friends, earning coins and resources, expanding your farm, and participating in events. You can also explore the mine and sail to the treasure islands to find precious resources and fantastic treasures. Playing Farm Bay can also benefit your mental health, social skills, and environmental awareness. With these tips and tricks, you can play Farm Bay better and faster. So what are you waiting for? Download Farm Bay today and start your farming adventure!

      -

      FAQs

      -

      Here are some of the frequently asked questions about Farm Bay:

      -

      Q: How can I download Farm Bay?

      -

      A: You can download Farm Bay from the Google Play Store or the App Store for free.

      -

      Q: How can I contact the support team of Farm Bay?

      -

      A: You can contact the support team of Farm Bay by tapping on the settings icon and then tapping on the help and support button.

      -

      Q: How can I get more coins and resources in Farm Bay?

      -

      A: You can get more coins and resources in Farm Bay by completing orders, exploring the mine, sailing to the treasure islands, trading with friends, watching ads, or buying them with real money.

      -

      Q: How can I get more xp in Farm Bay?

      -

      A: You can get more xp in Farm Bay by harvesting crops, making products, completing orders, expanding your farm, or participating in events.

      -

      Q: How can I join or create a neighborhood in Farm Bay?

      -

      A: You can join or create a neighborhood in Farm Bay by tapping on the neighborhood house icon and then tapping on the join or create button.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Google Play Store Zip File The Fastest and Safest Way to Get the Most Out of Your Android.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Google Play Store Zip File The Fastest and Safest Way to Get the Most Out of Your Android.md deleted file mode 100644 index f0c51aacc01e452b502d10711f59c1d2c9af696f..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Google Play Store Zip File The Fastest and Safest Way to Get the Most Out of Your Android.md +++ /dev/null @@ -1,134 +0,0 @@ - -

      Download Google Play Store Zip File: A Complete Guide

      -

      Google Play Store is the official app store for Android devices, where you can find millions of apps, games, books, movies, and more. But what if your Android device doesn't have Google Play Store installed? Or what if you want to update or reinstall Google Play Store on your device? In this article, we will show you how to download Google Play Store zip file and how to unzip and install it on your Android device.

      -

      What is a zip file and why do you need it?

      -

      A zip file is a compressed file that contains one or more files or folders

      -

      A zip file is a type of file that uses compression to reduce the size of the files or folders inside it. A zip file can have the extension .zip or .zipx. You can recognize a zip file by its icon, which looks like a folder with a zipper on it. A zip file can contain any type of files, such as images, documents, videos, etc.

      -

      download google play store zip file


      Download File ———>>> https://bltlly.com/2uOhVJ



      -

      You may need a zip file to install Google Play Store on your Android device

      -

      Some Android devices, especially those from China or other regions where Google services are not available, may not have Google Play Store pre-installed. In that case, you may need to download and install Google Play Store manually on your device. One way to do that is to download Google Play Store zip file, which contains the necessary files and folders for installing Google Play Store on your device.

      -

      How to download Google Play Store zip file?

      -

      There are two ways to download Google Play Store zip file: from the web or from an app

      -

      Download from the web

      -

      You can download Google Play Store zip file from various websites that offer it. However, you need to be careful and choose a reliable and trustworthy source, as some websites may provide fake or malicious files that can harm your device. Here are some steps to download Google Play Store zip file from the web:

      -
        -
      1. Open your browser and search for "google play store zip file" or use this link:
      2. -
      3. Select the download button and save the file to your device's storage
      4. -
      5. Make sure the file name ends with .zip or .zipx and has a reasonable size (around 47 MB)
      6. -
      -

      Download from an app

      -

      You can also download Google Play Store zip file from an app that allows you to download files from the web. For example, you can use WinZip app, which is a zip file opener app that also lets you download files from various sources. Here are some steps to download Google Play Store zip file from WinZip app:

      -

      How to download google play store zip file on android
      -Download google play store zip file for nabi jr
      -Download google play store zip file apk mirror
      -Download google play store zip file for pc
      -Download google play store zip file for fire tablet
      -Download google play store zip file for samsung
      -Download google play store zip file for huawei
      -Download google play store zip file for kindle
      -Download google play store zip file for xiaomi
      -Download google play store zip file for oppo
      -Download google play store zip file for vivo
      -Download google play store zip file for lg
      -Download google play store zip file for nokia
      -Download google play store zip file for sony
      -Download google play store zip file for moto
      -Download google play store zip file for oneplus
      -Download google play store zip file for asus
      -Download google play store zip file for lenovo
      -Download google play store zip file for acer
      -Download google play store zip file for dell
      -Download google play store zip file for hp
      -Download google play store zip file for mac
      -Download google play store zip file for chromebook
      -Download google play store zip file for windows 10
      -Download google play store zip file for linux
      -Download google play store zip file without root
      -Download google play store zip file with winzip
      -Download google play store zip file with 7zip
      -Download google play store zip file with rar extractor
      -Download google play store zip file with es file explorer
      -Download google play store zip file with zarchiver
      -Download google play store zip file with apk installer
      -Download google play store zip file with adb tools
      -Download google play store zip file with fastboot commands
      -Download google play store zip file with twrp recovery
      -Download google play store zip file with magisk manager
      -Download google play store zip file with xposed framework
      -Download google play store zip file with lucky patcher
      -Download google play store zip file with titanium backup
      -Download google play store zip file with root explorer

      -
        -
      1. Download and install WinZip app from the Google Play Store or from this link:
      2. -
      3. Open WinZip app and tap on the menu icon (three horizontal lines) at the top left corner
      4. -
      5. Select "Web Files" and enter this URL:
      6. -
      7. Select "Download" and choose a location to save the file to your device's storage
      8. -
      -

      How to unzip and install Google Play Store zip file?

      -

      You need a zip file opener app to unzip and install Google Play Store zip file

      -

      A zip file opener app is an app that can extract the files or folders from a zip file and save them to your device's storage. You need a zip file opener app to unzip and install Google Play Store zip file, as you cannot install it directly from the zip file. There are many zip file opener apps available for Android devices, such as Files by Google, WinZip, RAR, etc. You can choose any of them, but we will show you how to unzip and install Google Play Store zip file with two of them: Files by Google and WinZip.

      -

      How to unzip with Files by Google app

      -

      Files by Google is a file manager app that also has a built-in zip file opener feature. You can use Files by Google app to unzip and install Google Play Store zip file with these steps:

      -
        -
      1. Download and install Files by Google app from the Google Play Store or from this link:
      2. -
      3. Open Files by Google app and locate the Google Play Store zip file that you downloaded
      4. -
      5. Tap on the zip file and select "Extract" at the bottom of the screen
      6. -
      7. Choose a location to save the extracted files and folders and tap on "Done"
      8. -
      9. Open the extracted folder and tap on the file named "GoogleServicesFramework.apk" to install it
      10. -
      11. Tap on "Install" and then on "Done"
      12. -
      13. Repeat the same process for the files named "GooglePlayServices.apk" and "GooglePlayStore.apk"
      14. -
      15. Restart your device and you should see the Google Play Store icon on your home screen or app drawer
      16. -
      -

      How to unzip with WinZip app

      -

      WinZip is a zip file opener app that also has a web file downloader feature. You can use WinZip app to unzip and install Google Play Store zip file with these steps:

      -
        -
      1. Download and install WinZip app from the Google Play Store or from this link:
      2. -
      3. Open WinZip app and locate the Google Play Store zip file that you downloaded
      4. -
      5. Tap on the zip file and select "Unzip Here" at the bottom of the screen
      6. -
      7. Open the unzipped folder and tap on the file named "GoogleServicesFramework.apk" to install it
      8. -
      9. Tap on "Install" and then on "Done"
      10. -
      11. Repeat the same process for the files named "GooglePlayServices.apk" and "GooglePlayStore.apk"
      12. -
      13. Restart your device and you should see the Google Play Store icon on your home screen or app drawer
      14. -
      -

      Benefits of using Google Play Store on your Android device

      -

      Google Play Store is the official app store for Android devices

      -

      Google Play Store is the official app store for Android devices, where you can find millions of apps, games, books, movies, and more. You can browse, download, update, uninstall, rate, review, and manage your apps with Google Play Store. You can also access other Google services, such as Gmail, YouTube, Maps, Photos, etc., with your Google account.

      -

      Google Play Store offers millions of apps, games, books, movies, and more

      -

      Google Play Store offers millions of apps, games, books, movies, and more for you to enjoy on your Android device. You can find apps for various categories, such as education, entertainment, health, lifestyle, productivity, social, etc. You can also find games for different genres, such as action, adventure, puzzle, racing, sports, etc. You can also find books for different topics, such as fiction, non-fiction, biography, romance, thriller, etc. You can also find movies for different genres, such as comedy, drama, horror, action, etc.

      -

      Google Play Store provides security updates, parental controls, and user reviews

      -

      Google Play Store provides security updates, parental controls, and user reviews for your apps and games. You can get the latest security updates and bug fixes for your apps and games with Google Play Store. You can also set up parental controls to restrict the content and purchases that your children can access with Google Play Store. You can also read and write user reviews for your apps and games to share your feedback and opinions with other users.

      -

      Conclusion and FAQs

      -

      In conclusion, Google Play Store is the official app store for Android devices, where you can find millions of apps, games, books, movies, and more. If your Android device doesn't have Google Play Store installed, or if you want to update or reinstall it, you can download Google Play Store zip file and unzip and install it on your device. You need a zip file opener app to do that, such as Files by Google or WinZip. You can enjoy the benefits of using Google Play Store on your Android device, such as security updates, parental controls, and user reviews.

      -

      Here are some FAQs that you may have about downloading Google Play Store zip file:

      -
        -
      1. Is it safe to download Google Play Store zip file?
      2. -

        It is safe to download Google Play Store zip file if you download it from a reliable and trustworthy source, such as the link we provided in this article. However, you should always be careful and check the file name, size, and extension before downloading any file from the web.

        -
      3. Do I need to root my device to install Google Play Store zip file?
      4. -

        No, you do not need to root your device to install Google Play Store zip file. However, you may need to enable the "Unknown sources" option in your device's settings to allow the installation of apps from sources other than the Google Play Store.

        -
      5. What if I encounter an error while installing Google Play Store zip file?
      6. -

        If you encounter an error while installing Google Play Store zip file, such as "App not installed" or "Parse error", you may need to check the following things:

        -
          -
        • Make sure you have enough storage space on your device
        • -
        • Make sure you have downloaded the correct and compatible version of Google Play Store zip file for your device
        • -
        • Make sure you have unzipped the zip file correctly and installed the files in the right order
        • -
        • Make sure you have restarted your device after installing the files
        • -
        -
      7. How can I update Google Play Store after installing it from a zip file?
      8. -

        You can update Google Play Store after installing it from a zip file by following these steps:

        -
          -
        1. Open Google Play Store app on your device
        2. -
        3. Tap on the menu icon (three horizontal lines) at the top left corner
        4. -
        5. Select "Settings" and scroll down to "About"
        6. -
        7. Select "Play Store version" and check for updates
        8. -
        9. If there is an update available, tap on "Update" and follow the instructions
        10. -
        -
      9. How can I uninstall Google Play Store if I don't want it anymore?
      10. -

        You can uninstall Google Play Store if you don't want it anymore by following these steps:

        -
          -
        1. Open your device's settings and select "Apps" or "Applications"
        2. -
        3. Select "Google Play Store" and tap on "Uninstall" or "Disable"
        4. -
        5. Confirm your action and wait for the process to complete
        6. -
        7. You may also need to uninstall or disable the other files that you installed from the zip file, such as "GoogleServicesFramework.apk" and "GooglePlayServices.apk"
        8. -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/timqian/like-history/README.md b/spaces/timqian/like-history/README.md deleted file mode 100644 index 1d8398c75a7fd03c7a4441633e0ef214509aa425..0000000000000000000000000000000000000000 --- a/spaces/timqian/like-history/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Like History -emoji: 😻 -colorFrom: green -colorTo: pink -sdk: static -pinned: false -base_path: /build/ -license: gpl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - -API: https://huggingface.co/api/spaces/facebook/seamless_m4t/likers?expand[]=likeAt \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/commands/debug.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/commands/debug.py deleted file mode 100644 index 6fad1fe894387f07f2484e6a0d27126b0a64f2b1..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/commands/debug.py +++ /dev/null @@ -1,199 +0,0 @@ -import importlib.resources -import locale -import logging -import os -import sys -from optparse import Values -from types import ModuleType -from typing import Any, Dict, List, Optional - -import pip._vendor -from pip._vendor.certifi import where -from pip._vendor.packaging.version import parse as parse_version - -from pip._internal.cli import cmdoptions -from pip._internal.cli.base_command import Command -from pip._internal.cli.cmdoptions import make_target_python -from pip._internal.cli.status_codes import SUCCESS -from pip._internal.configuration import Configuration -from pip._internal.metadata import get_environment -from pip._internal.utils.logging import indent_log -from pip._internal.utils.misc import get_pip_version - -logger = logging.getLogger(__name__) - - -def show_value(name: str, value: Any) -> None: - logger.info("%s: %s", name, value) - - -def show_sys_implementation() -> None: - logger.info("sys.implementation:") - implementation_name = sys.implementation.name - with indent_log(): - show_value("name", implementation_name) - - -def create_vendor_txt_map() -> Dict[str, str]: - with importlib.resources.open_text("pip._vendor", "vendor.txt") as f: - # Purge non version specifying lines. - # Also, remove any space prefix or suffixes (including comments). - lines = [ - line.strip().split(" ", 1)[0] for line in f.readlines() if "==" in line - ] - - # Transform into "module" -> version dict. - return dict(line.split("==", 1) for line in lines) - - -def get_module_from_module_name(module_name: str) -> ModuleType: - # Module name can be uppercase in vendor.txt for some reason... - module_name = module_name.lower() - # PATCH: setuptools is actually only pkg_resources. - if module_name == "setuptools": - module_name = "pkg_resources" - - __import__(f"pip._vendor.{module_name}", globals(), locals(), level=0) - return getattr(pip._vendor, module_name) - - -def get_vendor_version_from_module(module_name: str) -> Optional[str]: - module = get_module_from_module_name(module_name) - version = getattr(module, "__version__", None) - - if not version: - # Try to find version in debundled module info. - assert module.__file__ is not None - env = get_environment([os.path.dirname(module.__file__)]) - dist = env.get_distribution(module_name) - if dist: - version = str(dist.version) - - return version - - -def show_actual_vendor_versions(vendor_txt_versions: Dict[str, str]) -> None: - """Log the actual version and print extra info if there is - a conflict or if the actual version could not be imported. - """ - for module_name, expected_version in vendor_txt_versions.items(): - extra_message = "" - actual_version = get_vendor_version_from_module(module_name) - if not actual_version: - extra_message = ( - " (Unable to locate actual module version, using" - " vendor.txt specified version)" - ) - actual_version = expected_version - elif parse_version(actual_version) != parse_version(expected_version): - extra_message = ( - " (CONFLICT: vendor.txt suggests version should" - " be {})".format(expected_version) - ) - logger.info("%s==%s%s", module_name, actual_version, extra_message) - - -def show_vendor_versions() -> None: - logger.info("vendored library versions:") - - vendor_txt_versions = create_vendor_txt_map() - with indent_log(): - show_actual_vendor_versions(vendor_txt_versions) - - -def show_tags(options: Values) -> None: - tag_limit = 10 - - target_python = make_target_python(options) - tags = target_python.get_tags() - - # Display the target options that were explicitly provided. - formatted_target = target_python.format_given() - suffix = "" - if formatted_target: - suffix = f" (target: {formatted_target})" - - msg = "Compatible tags: {}{}".format(len(tags), suffix) - logger.info(msg) - - if options.verbose < 1 and len(tags) > tag_limit: - tags_limited = True - tags = tags[:tag_limit] - else: - tags_limited = False - - with indent_log(): - for tag in tags: - logger.info(str(tag)) - - if tags_limited: - msg = ( - "...\n[First {tag_limit} tags shown. Pass --verbose to show all.]" - ).format(tag_limit=tag_limit) - logger.info(msg) - - -def ca_bundle_info(config: Configuration) -> str: - levels = set() - for key, _ in config.items(): - levels.add(key.split(".")[0]) - - if not levels: - return "Not specified" - - levels_that_override_global = ["install", "wheel", "download"] - global_overriding_level = [ - level for level in levels if level in levels_that_override_global - ] - if not global_overriding_level: - return "global" - - if "global" in levels: - levels.remove("global") - return ", ".join(levels) - - -class DebugCommand(Command): - """ - Display debug information. - """ - - usage = """ - %prog """ - ignore_require_venv = True - - def add_options(self) -> None: - cmdoptions.add_target_python_options(self.cmd_opts) - self.parser.insert_option_group(0, self.cmd_opts) - self.parser.config.load() - - def run(self, options: Values, args: List[str]) -> int: - logger.warning( - "This command is only meant for debugging. " - "Do not use this with automation for parsing and getting these " - "details, since the output and options of this command may " - "change without notice." - ) - show_value("pip version", get_pip_version()) - show_value("sys.version", sys.version) - show_value("sys.executable", sys.executable) - show_value("sys.getdefaultencoding", sys.getdefaultencoding()) - show_value("sys.getfilesystemencoding", sys.getfilesystemencoding()) - show_value( - "locale.getpreferredencoding", - locale.getpreferredencoding(), - ) - show_value("sys.platform", sys.platform) - show_sys_implementation() - - show_value("'cert' config value", ca_bundle_info(self.parser.config)) - show_value("REQUESTS_CA_BUNDLE", os.environ.get("REQUESTS_CA_BUNDLE")) - show_value("CURL_CA_BUNDLE", os.environ.get("CURL_CA_BUNDLE")) - show_value("pip._vendor.certifi.where()", where()) - show_value("pip._vendor.DEBUNDLED", pip._vendor.DEBUNDLED) - - show_vendor_versions() - - show_tags(options) - - return SUCCESS diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/idna/uts46data.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/idna/uts46data.py deleted file mode 100644 index 8f65705ee91fcf56d8eaf8d538a86d3e5d457d51..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/idna/uts46data.py +++ /dev/null @@ -1,8512 +0,0 @@ -# This file is automatically generated by tools/idna-data -# vim: set fileencoding=utf-8 : - -from typing import List, Tuple, Union - - -"""IDNA Mapping Table from UTS46.""" - - -__version__ = '14.0.0' -def _seg_0() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x0, '3'), - (0x1, '3'), - (0x2, '3'), - (0x3, '3'), - (0x4, '3'), - (0x5, '3'), - (0x6, '3'), - (0x7, '3'), - (0x8, '3'), - (0x9, '3'), - (0xA, '3'), - (0xB, '3'), - (0xC, '3'), - (0xD, '3'), - (0xE, '3'), - (0xF, '3'), - (0x10, '3'), - (0x11, '3'), - (0x12, '3'), - (0x13, '3'), - (0x14, '3'), - (0x15, '3'), - (0x16, '3'), - (0x17, '3'), - (0x18, '3'), - (0x19, '3'), - (0x1A, '3'), - (0x1B, '3'), - (0x1C, '3'), - (0x1D, '3'), - (0x1E, '3'), - (0x1F, '3'), - (0x20, '3'), - (0x21, '3'), - (0x22, '3'), - (0x23, '3'), - (0x24, '3'), - (0x25, '3'), - (0x26, '3'), - (0x27, '3'), - (0x28, '3'), - (0x29, '3'), - (0x2A, '3'), - (0x2B, '3'), - (0x2C, '3'), - (0x2D, 'V'), - (0x2E, 'V'), - (0x2F, '3'), - (0x30, 'V'), - (0x31, 'V'), - (0x32, 'V'), - (0x33, 'V'), - (0x34, 'V'), - (0x35, 'V'), - (0x36, 'V'), - (0x37, 'V'), - (0x38, 'V'), - (0x39, 'V'), - (0x3A, '3'), - (0x3B, '3'), - (0x3C, '3'), - (0x3D, '3'), - (0x3E, '3'), - (0x3F, '3'), - (0x40, '3'), - (0x41, 'M', 'a'), - (0x42, 'M', 'b'), - (0x43, 'M', 'c'), - (0x44, 'M', 'd'), - (0x45, 'M', 'e'), - (0x46, 'M', 'f'), - (0x47, 'M', 'g'), - (0x48, 'M', 'h'), - (0x49, 'M', 'i'), - (0x4A, 'M', 'j'), - (0x4B, 'M', 'k'), - (0x4C, 'M', 'l'), - (0x4D, 'M', 'm'), - (0x4E, 'M', 'n'), - (0x4F, 'M', 'o'), - (0x50, 'M', 'p'), - (0x51, 'M', 'q'), - (0x52, 'M', 'r'), - (0x53, 'M', 's'), - (0x54, 'M', 't'), - (0x55, 'M', 'u'), - (0x56, 'M', 'v'), - (0x57, 'M', 'w'), - (0x58, 'M', 'x'), - (0x59, 'M', 'y'), - (0x5A, 'M', 'z'), - (0x5B, '3'), - (0x5C, '3'), - (0x5D, '3'), - (0x5E, '3'), - (0x5F, '3'), - (0x60, '3'), - (0x61, 'V'), - (0x62, 'V'), - (0x63, 'V'), - ] - -def _seg_1() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x64, 'V'), - (0x65, 'V'), - (0x66, 'V'), - (0x67, 'V'), - (0x68, 'V'), - (0x69, 'V'), - (0x6A, 'V'), - (0x6B, 'V'), - (0x6C, 'V'), - (0x6D, 'V'), - (0x6E, 'V'), - (0x6F, 'V'), - (0x70, 'V'), - (0x71, 'V'), - (0x72, 'V'), - (0x73, 'V'), - (0x74, 'V'), - (0x75, 'V'), - (0x76, 'V'), - (0x77, 'V'), - (0x78, 'V'), - (0x79, 'V'), - (0x7A, 'V'), - (0x7B, '3'), - (0x7C, '3'), - (0x7D, '3'), - (0x7E, '3'), - (0x7F, '3'), - (0x80, 'X'), - (0x81, 'X'), - (0x82, 'X'), - (0x83, 'X'), - (0x84, 'X'), - (0x85, 'X'), - (0x86, 'X'), - (0x87, 'X'), - (0x88, 'X'), - (0x89, 'X'), - (0x8A, 'X'), - (0x8B, 'X'), - (0x8C, 'X'), - (0x8D, 'X'), - (0x8E, 'X'), - (0x8F, 'X'), - (0x90, 'X'), - (0x91, 'X'), - (0x92, 'X'), - (0x93, 'X'), - (0x94, 'X'), - (0x95, 'X'), - (0x96, 'X'), - (0x97, 'X'), - (0x98, 'X'), - (0x99, 'X'), - (0x9A, 'X'), - (0x9B, 'X'), - (0x9C, 'X'), - (0x9D, 'X'), - (0x9E, 'X'), - (0x9F, 'X'), - (0xA0, '3', ' '), - (0xA1, 'V'), - (0xA2, 'V'), - (0xA3, 'V'), - (0xA4, 'V'), - (0xA5, 'V'), - (0xA6, 'V'), - (0xA7, 'V'), - (0xA8, '3', ' ̈'), - (0xA9, 'V'), - (0xAA, 'M', 'a'), - (0xAB, 'V'), - (0xAC, 'V'), - (0xAD, 'I'), - (0xAE, 'V'), - (0xAF, '3', ' ̄'), - (0xB0, 'V'), - (0xB1, 'V'), - (0xB2, 'M', '2'), - (0xB3, 'M', '3'), - (0xB4, '3', ' ́'), - (0xB5, 'M', 'μ'), - (0xB6, 'V'), - (0xB7, 'V'), - (0xB8, '3', ' ̧'), - (0xB9, 'M', '1'), - (0xBA, 'M', 'o'), - (0xBB, 'V'), - (0xBC, 'M', '1⁄4'), - (0xBD, 'M', '1⁄2'), - (0xBE, 'M', '3⁄4'), - (0xBF, 'V'), - (0xC0, 'M', 'à'), - (0xC1, 'M', 'á'), - (0xC2, 'M', 'â'), - (0xC3, 'M', 'ã'), - (0xC4, 'M', 'ä'), - (0xC5, 'M', 'å'), - (0xC6, 'M', 'æ'), - (0xC7, 'M', 'ç'), - ] - -def _seg_2() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xC8, 'M', 'è'), - (0xC9, 'M', 'é'), - (0xCA, 'M', 'ê'), - (0xCB, 'M', 'ë'), - (0xCC, 'M', 'ì'), - (0xCD, 'M', 'í'), - (0xCE, 'M', 'î'), - (0xCF, 'M', 'ï'), - (0xD0, 'M', 'ð'), - (0xD1, 'M', 'ñ'), - (0xD2, 'M', 'ò'), - (0xD3, 'M', 'ó'), - (0xD4, 'M', 'ô'), - (0xD5, 'M', 'õ'), - (0xD6, 'M', 'ö'), - (0xD7, 'V'), - (0xD8, 'M', 'ø'), - (0xD9, 'M', 'ù'), - (0xDA, 'M', 'ú'), - (0xDB, 'M', 'û'), - (0xDC, 'M', 'ü'), - (0xDD, 'M', 'ý'), - (0xDE, 'M', 'þ'), - (0xDF, 'D', 'ss'), - (0xE0, 'V'), - (0xE1, 'V'), - (0xE2, 'V'), - (0xE3, 'V'), - (0xE4, 'V'), - (0xE5, 'V'), - (0xE6, 'V'), - (0xE7, 'V'), - (0xE8, 'V'), - (0xE9, 'V'), - (0xEA, 'V'), - (0xEB, 'V'), - (0xEC, 'V'), - (0xED, 'V'), - (0xEE, 'V'), - (0xEF, 'V'), - (0xF0, 'V'), - (0xF1, 'V'), - (0xF2, 'V'), - (0xF3, 'V'), - (0xF4, 'V'), - (0xF5, 'V'), - (0xF6, 'V'), - (0xF7, 'V'), - (0xF8, 'V'), - (0xF9, 'V'), - (0xFA, 'V'), - (0xFB, 'V'), - (0xFC, 'V'), - (0xFD, 'V'), - (0xFE, 'V'), - (0xFF, 'V'), - (0x100, 'M', 'ā'), - (0x101, 'V'), - (0x102, 'M', 'ă'), - (0x103, 'V'), - (0x104, 'M', 'ą'), - (0x105, 'V'), - (0x106, 'M', 'ć'), - (0x107, 'V'), - (0x108, 'M', 'ĉ'), - (0x109, 'V'), - (0x10A, 'M', 'ċ'), - (0x10B, 'V'), - (0x10C, 'M', 'č'), - (0x10D, 'V'), - (0x10E, 'M', 'ď'), - (0x10F, 'V'), - (0x110, 'M', 'đ'), - (0x111, 'V'), - (0x112, 'M', 'ē'), - (0x113, 'V'), - (0x114, 'M', 'ĕ'), - (0x115, 'V'), - (0x116, 'M', 'ė'), - (0x117, 'V'), - (0x118, 'M', 'ę'), - (0x119, 'V'), - (0x11A, 'M', 'ě'), - (0x11B, 'V'), - (0x11C, 'M', 'ĝ'), - (0x11D, 'V'), - (0x11E, 'M', 'ğ'), - (0x11F, 'V'), - (0x120, 'M', 'ġ'), - (0x121, 'V'), - (0x122, 'M', 'ģ'), - (0x123, 'V'), - (0x124, 'M', 'ĥ'), - (0x125, 'V'), - (0x126, 'M', 'ħ'), - (0x127, 'V'), - (0x128, 'M', 'ĩ'), - (0x129, 'V'), - (0x12A, 'M', 'ī'), - (0x12B, 'V'), - ] - -def _seg_3() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x12C, 'M', 'ĭ'), - (0x12D, 'V'), - (0x12E, 'M', 'į'), - (0x12F, 'V'), - (0x130, 'M', 'i̇'), - (0x131, 'V'), - (0x132, 'M', 'ij'), - (0x134, 'M', 'ĵ'), - (0x135, 'V'), - (0x136, 'M', 'ķ'), - (0x137, 'V'), - (0x139, 'M', 'ĺ'), - (0x13A, 'V'), - (0x13B, 'M', 'ļ'), - (0x13C, 'V'), - (0x13D, 'M', 'ľ'), - (0x13E, 'V'), - (0x13F, 'M', 'l·'), - (0x141, 'M', 'ł'), - (0x142, 'V'), - (0x143, 'M', 'ń'), - (0x144, 'V'), - (0x145, 'M', 'ņ'), - (0x146, 'V'), - (0x147, 'M', 'ň'), - (0x148, 'V'), - (0x149, 'M', 'ʼn'), - (0x14A, 'M', 'ŋ'), - (0x14B, 'V'), - (0x14C, 'M', 'ō'), - (0x14D, 'V'), - (0x14E, 'M', 'ŏ'), - (0x14F, 'V'), - (0x150, 'M', 'ő'), - (0x151, 'V'), - (0x152, 'M', 'œ'), - (0x153, 'V'), - (0x154, 'M', 'ŕ'), - (0x155, 'V'), - (0x156, 'M', 'ŗ'), - (0x157, 'V'), - (0x158, 'M', 'ř'), - (0x159, 'V'), - (0x15A, 'M', 'ś'), - (0x15B, 'V'), - (0x15C, 'M', 'ŝ'), - (0x15D, 'V'), - (0x15E, 'M', 'ş'), - (0x15F, 'V'), - (0x160, 'M', 'š'), - (0x161, 'V'), - (0x162, 'M', 'ţ'), - (0x163, 'V'), - (0x164, 'M', 'ť'), - (0x165, 'V'), - (0x166, 'M', 'ŧ'), - (0x167, 'V'), - (0x168, 'M', 'ũ'), - (0x169, 'V'), - (0x16A, 'M', 'ū'), - (0x16B, 'V'), - (0x16C, 'M', 'ŭ'), - (0x16D, 'V'), - (0x16E, 'M', 'ů'), - (0x16F, 'V'), - (0x170, 'M', 'ű'), - (0x171, 'V'), - (0x172, 'M', 'ų'), - (0x173, 'V'), - (0x174, 'M', 'ŵ'), - (0x175, 'V'), - (0x176, 'M', 'ŷ'), - (0x177, 'V'), - (0x178, 'M', 'ÿ'), - (0x179, 'M', 'ź'), - (0x17A, 'V'), - (0x17B, 'M', 'ż'), - (0x17C, 'V'), - (0x17D, 'M', 'ž'), - (0x17E, 'V'), - (0x17F, 'M', 's'), - (0x180, 'V'), - (0x181, 'M', 'ɓ'), - (0x182, 'M', 'ƃ'), - (0x183, 'V'), - (0x184, 'M', 'ƅ'), - (0x185, 'V'), - (0x186, 'M', 'ɔ'), - (0x187, 'M', 'ƈ'), - (0x188, 'V'), - (0x189, 'M', 'ɖ'), - (0x18A, 'M', 'ɗ'), - (0x18B, 'M', 'ƌ'), - (0x18C, 'V'), - (0x18E, 'M', 'ǝ'), - (0x18F, 'M', 'ə'), - (0x190, 'M', 'ɛ'), - (0x191, 'M', 'ƒ'), - (0x192, 'V'), - (0x193, 'M', 'ɠ'), - ] - -def _seg_4() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x194, 'M', 'ɣ'), - (0x195, 'V'), - (0x196, 'M', 'ɩ'), - (0x197, 'M', 'ɨ'), - (0x198, 'M', 'ƙ'), - (0x199, 'V'), - (0x19C, 'M', 'ɯ'), - (0x19D, 'M', 'ɲ'), - (0x19E, 'V'), - (0x19F, 'M', 'ɵ'), - (0x1A0, 'M', 'ơ'), - (0x1A1, 'V'), - (0x1A2, 'M', 'ƣ'), - (0x1A3, 'V'), - (0x1A4, 'M', 'ƥ'), - (0x1A5, 'V'), - (0x1A6, 'M', 'ʀ'), - (0x1A7, 'M', 'ƨ'), - (0x1A8, 'V'), - (0x1A9, 'M', 'ʃ'), - (0x1AA, 'V'), - (0x1AC, 'M', 'ƭ'), - (0x1AD, 'V'), - (0x1AE, 'M', 'ʈ'), - (0x1AF, 'M', 'ư'), - (0x1B0, 'V'), - (0x1B1, 'M', 'ʊ'), - (0x1B2, 'M', 'ʋ'), - (0x1B3, 'M', 'ƴ'), - (0x1B4, 'V'), - (0x1B5, 'M', 'ƶ'), - (0x1B6, 'V'), - (0x1B7, 'M', 'ʒ'), - (0x1B8, 'M', 'ƹ'), - (0x1B9, 'V'), - (0x1BC, 'M', 'ƽ'), - (0x1BD, 'V'), - (0x1C4, 'M', 'dž'), - (0x1C7, 'M', 'lj'), - (0x1CA, 'M', 'nj'), - (0x1CD, 'M', 'ǎ'), - (0x1CE, 'V'), - (0x1CF, 'M', 'ǐ'), - (0x1D0, 'V'), - (0x1D1, 'M', 'ǒ'), - (0x1D2, 'V'), - (0x1D3, 'M', 'ǔ'), - (0x1D4, 'V'), - (0x1D5, 'M', 'ǖ'), - (0x1D6, 'V'), - (0x1D7, 'M', 'ǘ'), - (0x1D8, 'V'), - (0x1D9, 'M', 'ǚ'), - (0x1DA, 'V'), - (0x1DB, 'M', 'ǜ'), - (0x1DC, 'V'), - (0x1DE, 'M', 'ǟ'), - (0x1DF, 'V'), - (0x1E0, 'M', 'ǡ'), - (0x1E1, 'V'), - (0x1E2, 'M', 'ǣ'), - (0x1E3, 'V'), - (0x1E4, 'M', 'ǥ'), - (0x1E5, 'V'), - (0x1E6, 'M', 'ǧ'), - (0x1E7, 'V'), - (0x1E8, 'M', 'ǩ'), - (0x1E9, 'V'), - (0x1EA, 'M', 'ǫ'), - (0x1EB, 'V'), - (0x1EC, 'M', 'ǭ'), - (0x1ED, 'V'), - (0x1EE, 'M', 'ǯ'), - (0x1EF, 'V'), - (0x1F1, 'M', 'dz'), - (0x1F4, 'M', 'ǵ'), - (0x1F5, 'V'), - (0x1F6, 'M', 'ƕ'), - (0x1F7, 'M', 'ƿ'), - (0x1F8, 'M', 'ǹ'), - (0x1F9, 'V'), - (0x1FA, 'M', 'ǻ'), - (0x1FB, 'V'), - (0x1FC, 'M', 'ǽ'), - (0x1FD, 'V'), - (0x1FE, 'M', 'ǿ'), - (0x1FF, 'V'), - (0x200, 'M', 'ȁ'), - (0x201, 'V'), - (0x202, 'M', 'ȃ'), - (0x203, 'V'), - (0x204, 'M', 'ȅ'), - (0x205, 'V'), - (0x206, 'M', 'ȇ'), - (0x207, 'V'), - (0x208, 'M', 'ȉ'), - (0x209, 'V'), - (0x20A, 'M', 'ȋ'), - (0x20B, 'V'), - (0x20C, 'M', 'ȍ'), - ] - -def _seg_5() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x20D, 'V'), - (0x20E, 'M', 'ȏ'), - (0x20F, 'V'), - (0x210, 'M', 'ȑ'), - (0x211, 'V'), - (0x212, 'M', 'ȓ'), - (0x213, 'V'), - (0x214, 'M', 'ȕ'), - (0x215, 'V'), - (0x216, 'M', 'ȗ'), - (0x217, 'V'), - (0x218, 'M', 'ș'), - (0x219, 'V'), - (0x21A, 'M', 'ț'), - (0x21B, 'V'), - (0x21C, 'M', 'ȝ'), - (0x21D, 'V'), - (0x21E, 'M', 'ȟ'), - (0x21F, 'V'), - (0x220, 'M', 'ƞ'), - (0x221, 'V'), - (0x222, 'M', 'ȣ'), - (0x223, 'V'), - (0x224, 'M', 'ȥ'), - (0x225, 'V'), - (0x226, 'M', 'ȧ'), - (0x227, 'V'), - (0x228, 'M', 'ȩ'), - (0x229, 'V'), - (0x22A, 'M', 'ȫ'), - (0x22B, 'V'), - (0x22C, 'M', 'ȭ'), - (0x22D, 'V'), - (0x22E, 'M', 'ȯ'), - (0x22F, 'V'), - (0x230, 'M', 'ȱ'), - (0x231, 'V'), - (0x232, 'M', 'ȳ'), - (0x233, 'V'), - (0x23A, 'M', 'ⱥ'), - (0x23B, 'M', 'ȼ'), - (0x23C, 'V'), - (0x23D, 'M', 'ƚ'), - (0x23E, 'M', 'ⱦ'), - (0x23F, 'V'), - (0x241, 'M', 'ɂ'), - (0x242, 'V'), - (0x243, 'M', 'ƀ'), - (0x244, 'M', 'ʉ'), - (0x245, 'M', 'ʌ'), - (0x246, 'M', 'ɇ'), - (0x247, 'V'), - (0x248, 'M', 'ɉ'), - (0x249, 'V'), - (0x24A, 'M', 'ɋ'), - (0x24B, 'V'), - (0x24C, 'M', 'ɍ'), - (0x24D, 'V'), - (0x24E, 'M', 'ɏ'), - (0x24F, 'V'), - (0x2B0, 'M', 'h'), - (0x2B1, 'M', 'ɦ'), - (0x2B2, 'M', 'j'), - (0x2B3, 'M', 'r'), - (0x2B4, 'M', 'ɹ'), - (0x2B5, 'M', 'ɻ'), - (0x2B6, 'M', 'ʁ'), - (0x2B7, 'M', 'w'), - (0x2B8, 'M', 'y'), - (0x2B9, 'V'), - (0x2D8, '3', ' ̆'), - (0x2D9, '3', ' ̇'), - (0x2DA, '3', ' ̊'), - (0x2DB, '3', ' ̨'), - (0x2DC, '3', ' ̃'), - (0x2DD, '3', ' ̋'), - (0x2DE, 'V'), - (0x2E0, 'M', 'ɣ'), - (0x2E1, 'M', 'l'), - (0x2E2, 'M', 's'), - (0x2E3, 'M', 'x'), - (0x2E4, 'M', 'ʕ'), - (0x2E5, 'V'), - (0x340, 'M', '̀'), - (0x341, 'M', '́'), - (0x342, 'V'), - (0x343, 'M', '̓'), - (0x344, 'M', '̈́'), - (0x345, 'M', 'ι'), - (0x346, 'V'), - (0x34F, 'I'), - (0x350, 'V'), - (0x370, 'M', 'ͱ'), - (0x371, 'V'), - (0x372, 'M', 'ͳ'), - (0x373, 'V'), - (0x374, 'M', 'ʹ'), - (0x375, 'V'), - (0x376, 'M', 'ͷ'), - (0x377, 'V'), - ] - -def _seg_6() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x378, 'X'), - (0x37A, '3', ' ι'), - (0x37B, 'V'), - (0x37E, '3', ';'), - (0x37F, 'M', 'ϳ'), - (0x380, 'X'), - (0x384, '3', ' ́'), - (0x385, '3', ' ̈́'), - (0x386, 'M', 'ά'), - (0x387, 'M', '·'), - (0x388, 'M', 'έ'), - (0x389, 'M', 'ή'), - (0x38A, 'M', 'ί'), - (0x38B, 'X'), - (0x38C, 'M', 'ό'), - (0x38D, 'X'), - (0x38E, 'M', 'ύ'), - (0x38F, 'M', 'ώ'), - (0x390, 'V'), - (0x391, 'M', 'α'), - (0x392, 'M', 'β'), - (0x393, 'M', 'γ'), - (0x394, 'M', 'δ'), - (0x395, 'M', 'ε'), - (0x396, 'M', 'ζ'), - (0x397, 'M', 'η'), - (0x398, 'M', 'θ'), - (0x399, 'M', 'ι'), - (0x39A, 'M', 'κ'), - (0x39B, 'M', 'λ'), - (0x39C, 'M', 'μ'), - (0x39D, 'M', 'ν'), - (0x39E, 'M', 'ξ'), - (0x39F, 'M', 'ο'), - (0x3A0, 'M', 'π'), - (0x3A1, 'M', 'ρ'), - (0x3A2, 'X'), - (0x3A3, 'M', 'σ'), - (0x3A4, 'M', 'τ'), - (0x3A5, 'M', 'υ'), - (0x3A6, 'M', 'φ'), - (0x3A7, 'M', 'χ'), - (0x3A8, 'M', 'ψ'), - (0x3A9, 'M', 'ω'), - (0x3AA, 'M', 'ϊ'), - (0x3AB, 'M', 'ϋ'), - (0x3AC, 'V'), - (0x3C2, 'D', 'σ'), - (0x3C3, 'V'), - (0x3CF, 'M', 'ϗ'), - (0x3D0, 'M', 'β'), - (0x3D1, 'M', 'θ'), - (0x3D2, 'M', 'υ'), - (0x3D3, 'M', 'ύ'), - (0x3D4, 'M', 'ϋ'), - (0x3D5, 'M', 'φ'), - (0x3D6, 'M', 'π'), - (0x3D7, 'V'), - (0x3D8, 'M', 'ϙ'), - (0x3D9, 'V'), - (0x3DA, 'M', 'ϛ'), - (0x3DB, 'V'), - (0x3DC, 'M', 'ϝ'), - (0x3DD, 'V'), - (0x3DE, 'M', 'ϟ'), - (0x3DF, 'V'), - (0x3E0, 'M', 'ϡ'), - (0x3E1, 'V'), - (0x3E2, 'M', 'ϣ'), - (0x3E3, 'V'), - (0x3E4, 'M', 'ϥ'), - (0x3E5, 'V'), - (0x3E6, 'M', 'ϧ'), - (0x3E7, 'V'), - (0x3E8, 'M', 'ϩ'), - (0x3E9, 'V'), - (0x3EA, 'M', 'ϫ'), - (0x3EB, 'V'), - (0x3EC, 'M', 'ϭ'), - (0x3ED, 'V'), - (0x3EE, 'M', 'ϯ'), - (0x3EF, 'V'), - (0x3F0, 'M', 'κ'), - (0x3F1, 'M', 'ρ'), - (0x3F2, 'M', 'σ'), - (0x3F3, 'V'), - (0x3F4, 'M', 'θ'), - (0x3F5, 'M', 'ε'), - (0x3F6, 'V'), - (0x3F7, 'M', 'ϸ'), - (0x3F8, 'V'), - (0x3F9, 'M', 'σ'), - (0x3FA, 'M', 'ϻ'), - (0x3FB, 'V'), - (0x3FD, 'M', 'ͻ'), - (0x3FE, 'M', 'ͼ'), - (0x3FF, 'M', 'ͽ'), - (0x400, 'M', 'ѐ'), - (0x401, 'M', 'ё'), - (0x402, 'M', 'ђ'), - ] - -def _seg_7() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x403, 'M', 'ѓ'), - (0x404, 'M', 'є'), - (0x405, 'M', 'ѕ'), - (0x406, 'M', 'і'), - (0x407, 'M', 'ї'), - (0x408, 'M', 'ј'), - (0x409, 'M', 'љ'), - (0x40A, 'M', 'њ'), - (0x40B, 'M', 'ћ'), - (0x40C, 'M', 'ќ'), - (0x40D, 'M', 'ѝ'), - (0x40E, 'M', 'ў'), - (0x40F, 'M', 'џ'), - (0x410, 'M', 'а'), - (0x411, 'M', 'б'), - (0x412, 'M', 'в'), - (0x413, 'M', 'г'), - (0x414, 'M', 'д'), - (0x415, 'M', 'е'), - (0x416, 'M', 'ж'), - (0x417, 'M', 'з'), - (0x418, 'M', 'и'), - (0x419, 'M', 'й'), - (0x41A, 'M', 'к'), - (0x41B, 'M', 'л'), - (0x41C, 'M', 'м'), - (0x41D, 'M', 'н'), - (0x41E, 'M', 'о'), - (0x41F, 'M', 'п'), - (0x420, 'M', 'р'), - (0x421, 'M', 'с'), - (0x422, 'M', 'т'), - (0x423, 'M', 'у'), - (0x424, 'M', 'ф'), - (0x425, 'M', 'х'), - (0x426, 'M', 'ц'), - (0x427, 'M', 'ч'), - (0x428, 'M', 'ш'), - (0x429, 'M', 'щ'), - (0x42A, 'M', 'ъ'), - (0x42B, 'M', 'ы'), - (0x42C, 'M', 'ь'), - (0x42D, 'M', 'э'), - (0x42E, 'M', 'ю'), - (0x42F, 'M', 'я'), - (0x430, 'V'), - (0x460, 'M', 'ѡ'), - (0x461, 'V'), - (0x462, 'M', 'ѣ'), - (0x463, 'V'), - (0x464, 'M', 'ѥ'), - (0x465, 'V'), - (0x466, 'M', 'ѧ'), - (0x467, 'V'), - (0x468, 'M', 'ѩ'), - (0x469, 'V'), - (0x46A, 'M', 'ѫ'), - (0x46B, 'V'), - (0x46C, 'M', 'ѭ'), - (0x46D, 'V'), - (0x46E, 'M', 'ѯ'), - (0x46F, 'V'), - (0x470, 'M', 'ѱ'), - (0x471, 'V'), - (0x472, 'M', 'ѳ'), - (0x473, 'V'), - (0x474, 'M', 'ѵ'), - (0x475, 'V'), - (0x476, 'M', 'ѷ'), - (0x477, 'V'), - (0x478, 'M', 'ѹ'), - (0x479, 'V'), - (0x47A, 'M', 'ѻ'), - (0x47B, 'V'), - (0x47C, 'M', 'ѽ'), - (0x47D, 'V'), - (0x47E, 'M', 'ѿ'), - (0x47F, 'V'), - (0x480, 'M', 'ҁ'), - (0x481, 'V'), - (0x48A, 'M', 'ҋ'), - (0x48B, 'V'), - (0x48C, 'M', 'ҍ'), - (0x48D, 'V'), - (0x48E, 'M', 'ҏ'), - (0x48F, 'V'), - (0x490, 'M', 'ґ'), - (0x491, 'V'), - (0x492, 'M', 'ғ'), - (0x493, 'V'), - (0x494, 'M', 'ҕ'), - (0x495, 'V'), - (0x496, 'M', 'җ'), - (0x497, 'V'), - (0x498, 'M', 'ҙ'), - (0x499, 'V'), - (0x49A, 'M', 'қ'), - (0x49B, 'V'), - (0x49C, 'M', 'ҝ'), - (0x49D, 'V'), - ] - -def _seg_8() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x49E, 'M', 'ҟ'), - (0x49F, 'V'), - (0x4A0, 'M', 'ҡ'), - (0x4A1, 'V'), - (0x4A2, 'M', 'ң'), - (0x4A3, 'V'), - (0x4A4, 'M', 'ҥ'), - (0x4A5, 'V'), - (0x4A6, 'M', 'ҧ'), - (0x4A7, 'V'), - (0x4A8, 'M', 'ҩ'), - (0x4A9, 'V'), - (0x4AA, 'M', 'ҫ'), - (0x4AB, 'V'), - (0x4AC, 'M', 'ҭ'), - (0x4AD, 'V'), - (0x4AE, 'M', 'ү'), - (0x4AF, 'V'), - (0x4B0, 'M', 'ұ'), - (0x4B1, 'V'), - (0x4B2, 'M', 'ҳ'), - (0x4B3, 'V'), - (0x4B4, 'M', 'ҵ'), - (0x4B5, 'V'), - (0x4B6, 'M', 'ҷ'), - (0x4B7, 'V'), - (0x4B8, 'M', 'ҹ'), - (0x4B9, 'V'), - (0x4BA, 'M', 'һ'), - (0x4BB, 'V'), - (0x4BC, 'M', 'ҽ'), - (0x4BD, 'V'), - (0x4BE, 'M', 'ҿ'), - (0x4BF, 'V'), - (0x4C0, 'X'), - (0x4C1, 'M', 'ӂ'), - (0x4C2, 'V'), - (0x4C3, 'M', 'ӄ'), - (0x4C4, 'V'), - (0x4C5, 'M', 'ӆ'), - (0x4C6, 'V'), - (0x4C7, 'M', 'ӈ'), - (0x4C8, 'V'), - (0x4C9, 'M', 'ӊ'), - (0x4CA, 'V'), - (0x4CB, 'M', 'ӌ'), - (0x4CC, 'V'), - (0x4CD, 'M', 'ӎ'), - (0x4CE, 'V'), - (0x4D0, 'M', 'ӑ'), - (0x4D1, 'V'), - (0x4D2, 'M', 'ӓ'), - (0x4D3, 'V'), - (0x4D4, 'M', 'ӕ'), - (0x4D5, 'V'), - (0x4D6, 'M', 'ӗ'), - (0x4D7, 'V'), - (0x4D8, 'M', 'ә'), - (0x4D9, 'V'), - (0x4DA, 'M', 'ӛ'), - (0x4DB, 'V'), - (0x4DC, 'M', 'ӝ'), - (0x4DD, 'V'), - (0x4DE, 'M', 'ӟ'), - (0x4DF, 'V'), - (0x4E0, 'M', 'ӡ'), - (0x4E1, 'V'), - (0x4E2, 'M', 'ӣ'), - (0x4E3, 'V'), - (0x4E4, 'M', 'ӥ'), - (0x4E5, 'V'), - (0x4E6, 'M', 'ӧ'), - (0x4E7, 'V'), - (0x4E8, 'M', 'ө'), - (0x4E9, 'V'), - (0x4EA, 'M', 'ӫ'), - (0x4EB, 'V'), - (0x4EC, 'M', 'ӭ'), - (0x4ED, 'V'), - (0x4EE, 'M', 'ӯ'), - (0x4EF, 'V'), - (0x4F0, 'M', 'ӱ'), - (0x4F1, 'V'), - (0x4F2, 'M', 'ӳ'), - (0x4F3, 'V'), - (0x4F4, 'M', 'ӵ'), - (0x4F5, 'V'), - (0x4F6, 'M', 'ӷ'), - (0x4F7, 'V'), - (0x4F8, 'M', 'ӹ'), - (0x4F9, 'V'), - (0x4FA, 'M', 'ӻ'), - (0x4FB, 'V'), - (0x4FC, 'M', 'ӽ'), - (0x4FD, 'V'), - (0x4FE, 'M', 'ӿ'), - (0x4FF, 'V'), - (0x500, 'M', 'ԁ'), - (0x501, 'V'), - (0x502, 'M', 'ԃ'), - ] - -def _seg_9() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x503, 'V'), - (0x504, 'M', 'ԅ'), - (0x505, 'V'), - (0x506, 'M', 'ԇ'), - (0x507, 'V'), - (0x508, 'M', 'ԉ'), - (0x509, 'V'), - (0x50A, 'M', 'ԋ'), - (0x50B, 'V'), - (0x50C, 'M', 'ԍ'), - (0x50D, 'V'), - (0x50E, 'M', 'ԏ'), - (0x50F, 'V'), - (0x510, 'M', 'ԑ'), - (0x511, 'V'), - (0x512, 'M', 'ԓ'), - (0x513, 'V'), - (0x514, 'M', 'ԕ'), - (0x515, 'V'), - (0x516, 'M', 'ԗ'), - (0x517, 'V'), - (0x518, 'M', 'ԙ'), - (0x519, 'V'), - (0x51A, 'M', 'ԛ'), - (0x51B, 'V'), - (0x51C, 'M', 'ԝ'), - (0x51D, 'V'), - (0x51E, 'M', 'ԟ'), - (0x51F, 'V'), - (0x520, 'M', 'ԡ'), - (0x521, 'V'), - (0x522, 'M', 'ԣ'), - (0x523, 'V'), - (0x524, 'M', 'ԥ'), - (0x525, 'V'), - (0x526, 'M', 'ԧ'), - (0x527, 'V'), - (0x528, 'M', 'ԩ'), - (0x529, 'V'), - (0x52A, 'M', 'ԫ'), - (0x52B, 'V'), - (0x52C, 'M', 'ԭ'), - (0x52D, 'V'), - (0x52E, 'M', 'ԯ'), - (0x52F, 'V'), - (0x530, 'X'), - (0x531, 'M', 'ա'), - (0x532, 'M', 'բ'), - (0x533, 'M', 'գ'), - (0x534, 'M', 'դ'), - (0x535, 'M', 'ե'), - (0x536, 'M', 'զ'), - (0x537, 'M', 'է'), - (0x538, 'M', 'ը'), - (0x539, 'M', 'թ'), - (0x53A, 'M', 'ժ'), - (0x53B, 'M', 'ի'), - (0x53C, 'M', 'լ'), - (0x53D, 'M', 'խ'), - (0x53E, 'M', 'ծ'), - (0x53F, 'M', 'կ'), - (0x540, 'M', 'հ'), - (0x541, 'M', 'ձ'), - (0x542, 'M', 'ղ'), - (0x543, 'M', 'ճ'), - (0x544, 'M', 'մ'), - (0x545, 'M', 'յ'), - (0x546, 'M', 'ն'), - (0x547, 'M', 'շ'), - (0x548, 'M', 'ո'), - (0x549, 'M', 'չ'), - (0x54A, 'M', 'պ'), - (0x54B, 'M', 'ջ'), - (0x54C, 'M', 'ռ'), - (0x54D, 'M', 'ս'), - (0x54E, 'M', 'վ'), - (0x54F, 'M', 'տ'), - (0x550, 'M', 'ր'), - (0x551, 'M', 'ց'), - (0x552, 'M', 'ւ'), - (0x553, 'M', 'փ'), - (0x554, 'M', 'ք'), - (0x555, 'M', 'օ'), - (0x556, 'M', 'ֆ'), - (0x557, 'X'), - (0x559, 'V'), - (0x587, 'M', 'եւ'), - (0x588, 'V'), - (0x58B, 'X'), - (0x58D, 'V'), - (0x590, 'X'), - (0x591, 'V'), - (0x5C8, 'X'), - (0x5D0, 'V'), - (0x5EB, 'X'), - (0x5EF, 'V'), - (0x5F5, 'X'), - (0x606, 'V'), - (0x61C, 'X'), - (0x61D, 'V'), - ] - -def _seg_10() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x675, 'M', 'اٴ'), - (0x676, 'M', 'وٴ'), - (0x677, 'M', 'ۇٴ'), - (0x678, 'M', 'يٴ'), - (0x679, 'V'), - (0x6DD, 'X'), - (0x6DE, 'V'), - (0x70E, 'X'), - (0x710, 'V'), - (0x74B, 'X'), - (0x74D, 'V'), - (0x7B2, 'X'), - (0x7C0, 'V'), - (0x7FB, 'X'), - (0x7FD, 'V'), - (0x82E, 'X'), - (0x830, 'V'), - (0x83F, 'X'), - (0x840, 'V'), - (0x85C, 'X'), - (0x85E, 'V'), - (0x85F, 'X'), - (0x860, 'V'), - (0x86B, 'X'), - (0x870, 'V'), - (0x88F, 'X'), - (0x898, 'V'), - (0x8E2, 'X'), - (0x8E3, 'V'), - (0x958, 'M', 'क़'), - (0x959, 'M', 'ख़'), - (0x95A, 'M', 'ग़'), - (0x95B, 'M', 'ज़'), - (0x95C, 'M', 'ड़'), - (0x95D, 'M', 'ढ़'), - (0x95E, 'M', 'फ़'), - (0x95F, 'M', 'य़'), - (0x960, 'V'), - (0x984, 'X'), - (0x985, 'V'), - (0x98D, 'X'), - (0x98F, 'V'), - (0x991, 'X'), - (0x993, 'V'), - (0x9A9, 'X'), - (0x9AA, 'V'), - (0x9B1, 'X'), - (0x9B2, 'V'), - (0x9B3, 'X'), - (0x9B6, 'V'), - (0x9BA, 'X'), - (0x9BC, 'V'), - (0x9C5, 'X'), - (0x9C7, 'V'), - (0x9C9, 'X'), - (0x9CB, 'V'), - (0x9CF, 'X'), - (0x9D7, 'V'), - (0x9D8, 'X'), - (0x9DC, 'M', 'ড়'), - (0x9DD, 'M', 'ঢ়'), - (0x9DE, 'X'), - (0x9DF, 'M', 'য়'), - (0x9E0, 'V'), - (0x9E4, 'X'), - (0x9E6, 'V'), - (0x9FF, 'X'), - (0xA01, 'V'), - (0xA04, 'X'), - (0xA05, 'V'), - (0xA0B, 'X'), - (0xA0F, 'V'), - (0xA11, 'X'), - (0xA13, 'V'), - (0xA29, 'X'), - (0xA2A, 'V'), - (0xA31, 'X'), - (0xA32, 'V'), - (0xA33, 'M', 'ਲ਼'), - (0xA34, 'X'), - (0xA35, 'V'), - (0xA36, 'M', 'ਸ਼'), - (0xA37, 'X'), - (0xA38, 'V'), - (0xA3A, 'X'), - (0xA3C, 'V'), - (0xA3D, 'X'), - (0xA3E, 'V'), - (0xA43, 'X'), - (0xA47, 'V'), - (0xA49, 'X'), - (0xA4B, 'V'), - (0xA4E, 'X'), - (0xA51, 'V'), - (0xA52, 'X'), - (0xA59, 'M', 'ਖ਼'), - (0xA5A, 'M', 'ਗ਼'), - (0xA5B, 'M', 'ਜ਼'), - (0xA5C, 'V'), - (0xA5D, 'X'), - ] - -def _seg_11() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xA5E, 'M', 'ਫ਼'), - (0xA5F, 'X'), - (0xA66, 'V'), - (0xA77, 'X'), - (0xA81, 'V'), - (0xA84, 'X'), - (0xA85, 'V'), - (0xA8E, 'X'), - (0xA8F, 'V'), - (0xA92, 'X'), - (0xA93, 'V'), - (0xAA9, 'X'), - (0xAAA, 'V'), - (0xAB1, 'X'), - (0xAB2, 'V'), - (0xAB4, 'X'), - (0xAB5, 'V'), - (0xABA, 'X'), - (0xABC, 'V'), - (0xAC6, 'X'), - (0xAC7, 'V'), - (0xACA, 'X'), - (0xACB, 'V'), - (0xACE, 'X'), - (0xAD0, 'V'), - (0xAD1, 'X'), - (0xAE0, 'V'), - (0xAE4, 'X'), - (0xAE6, 'V'), - (0xAF2, 'X'), - (0xAF9, 'V'), - (0xB00, 'X'), - (0xB01, 'V'), - (0xB04, 'X'), - (0xB05, 'V'), - (0xB0D, 'X'), - (0xB0F, 'V'), - (0xB11, 'X'), - (0xB13, 'V'), - (0xB29, 'X'), - (0xB2A, 'V'), - (0xB31, 'X'), - (0xB32, 'V'), - (0xB34, 'X'), - (0xB35, 'V'), - (0xB3A, 'X'), - (0xB3C, 'V'), - (0xB45, 'X'), - (0xB47, 'V'), - (0xB49, 'X'), - (0xB4B, 'V'), - (0xB4E, 'X'), - (0xB55, 'V'), - (0xB58, 'X'), - (0xB5C, 'M', 'ଡ଼'), - (0xB5D, 'M', 'ଢ଼'), - (0xB5E, 'X'), - (0xB5F, 'V'), - (0xB64, 'X'), - (0xB66, 'V'), - (0xB78, 'X'), - (0xB82, 'V'), - (0xB84, 'X'), - (0xB85, 'V'), - (0xB8B, 'X'), - (0xB8E, 'V'), - (0xB91, 'X'), - (0xB92, 'V'), - (0xB96, 'X'), - (0xB99, 'V'), - (0xB9B, 'X'), - (0xB9C, 'V'), - (0xB9D, 'X'), - (0xB9E, 'V'), - (0xBA0, 'X'), - (0xBA3, 'V'), - (0xBA5, 'X'), - (0xBA8, 'V'), - (0xBAB, 'X'), - (0xBAE, 'V'), - (0xBBA, 'X'), - (0xBBE, 'V'), - (0xBC3, 'X'), - (0xBC6, 'V'), - (0xBC9, 'X'), - (0xBCA, 'V'), - (0xBCE, 'X'), - (0xBD0, 'V'), - (0xBD1, 'X'), - (0xBD7, 'V'), - (0xBD8, 'X'), - (0xBE6, 'V'), - (0xBFB, 'X'), - (0xC00, 'V'), - (0xC0D, 'X'), - (0xC0E, 'V'), - (0xC11, 'X'), - (0xC12, 'V'), - (0xC29, 'X'), - (0xC2A, 'V'), - ] - -def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xC3A, 'X'), - (0xC3C, 'V'), - (0xC45, 'X'), - (0xC46, 'V'), - (0xC49, 'X'), - (0xC4A, 'V'), - (0xC4E, 'X'), - (0xC55, 'V'), - (0xC57, 'X'), - (0xC58, 'V'), - (0xC5B, 'X'), - (0xC5D, 'V'), - (0xC5E, 'X'), - (0xC60, 'V'), - (0xC64, 'X'), - (0xC66, 'V'), - (0xC70, 'X'), - (0xC77, 'V'), - (0xC8D, 'X'), - (0xC8E, 'V'), - (0xC91, 'X'), - (0xC92, 'V'), - (0xCA9, 'X'), - (0xCAA, 'V'), - (0xCB4, 'X'), - (0xCB5, 'V'), - (0xCBA, 'X'), - (0xCBC, 'V'), - (0xCC5, 'X'), - (0xCC6, 'V'), - (0xCC9, 'X'), - (0xCCA, 'V'), - (0xCCE, 'X'), - (0xCD5, 'V'), - (0xCD7, 'X'), - (0xCDD, 'V'), - (0xCDF, 'X'), - (0xCE0, 'V'), - (0xCE4, 'X'), - (0xCE6, 'V'), - (0xCF0, 'X'), - (0xCF1, 'V'), - (0xCF3, 'X'), - (0xD00, 'V'), - (0xD0D, 'X'), - (0xD0E, 'V'), - (0xD11, 'X'), - (0xD12, 'V'), - (0xD45, 'X'), - (0xD46, 'V'), - (0xD49, 'X'), - (0xD4A, 'V'), - (0xD50, 'X'), - (0xD54, 'V'), - (0xD64, 'X'), - (0xD66, 'V'), - (0xD80, 'X'), - (0xD81, 'V'), - (0xD84, 'X'), - (0xD85, 'V'), - (0xD97, 'X'), - (0xD9A, 'V'), - (0xDB2, 'X'), - (0xDB3, 'V'), - (0xDBC, 'X'), - (0xDBD, 'V'), - (0xDBE, 'X'), - (0xDC0, 'V'), - (0xDC7, 'X'), - (0xDCA, 'V'), - (0xDCB, 'X'), - (0xDCF, 'V'), - (0xDD5, 'X'), - (0xDD6, 'V'), - (0xDD7, 'X'), - (0xDD8, 'V'), - (0xDE0, 'X'), - (0xDE6, 'V'), - (0xDF0, 'X'), - (0xDF2, 'V'), - (0xDF5, 'X'), - (0xE01, 'V'), - (0xE33, 'M', 'ํา'), - (0xE34, 'V'), - (0xE3B, 'X'), - (0xE3F, 'V'), - (0xE5C, 'X'), - (0xE81, 'V'), - (0xE83, 'X'), - (0xE84, 'V'), - (0xE85, 'X'), - (0xE86, 'V'), - (0xE8B, 'X'), - (0xE8C, 'V'), - (0xEA4, 'X'), - (0xEA5, 'V'), - (0xEA6, 'X'), - (0xEA7, 'V'), - (0xEB3, 'M', 'ໍາ'), - (0xEB4, 'V'), - ] - -def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xEBE, 'X'), - (0xEC0, 'V'), - (0xEC5, 'X'), - (0xEC6, 'V'), - (0xEC7, 'X'), - (0xEC8, 'V'), - (0xECE, 'X'), - (0xED0, 'V'), - (0xEDA, 'X'), - (0xEDC, 'M', 'ຫນ'), - (0xEDD, 'M', 'ຫມ'), - (0xEDE, 'V'), - (0xEE0, 'X'), - (0xF00, 'V'), - (0xF0C, 'M', '་'), - (0xF0D, 'V'), - (0xF43, 'M', 'གྷ'), - (0xF44, 'V'), - (0xF48, 'X'), - (0xF49, 'V'), - (0xF4D, 'M', 'ཌྷ'), - (0xF4E, 'V'), - (0xF52, 'M', 'དྷ'), - (0xF53, 'V'), - (0xF57, 'M', 'བྷ'), - (0xF58, 'V'), - (0xF5C, 'M', 'ཛྷ'), - (0xF5D, 'V'), - (0xF69, 'M', 'ཀྵ'), - (0xF6A, 'V'), - (0xF6D, 'X'), - (0xF71, 'V'), - (0xF73, 'M', 'ཱི'), - (0xF74, 'V'), - (0xF75, 'M', 'ཱུ'), - (0xF76, 'M', 'ྲྀ'), - (0xF77, 'M', 'ྲཱྀ'), - (0xF78, 'M', 'ླྀ'), - (0xF79, 'M', 'ླཱྀ'), - (0xF7A, 'V'), - (0xF81, 'M', 'ཱྀ'), - (0xF82, 'V'), - (0xF93, 'M', 'ྒྷ'), - (0xF94, 'V'), - (0xF98, 'X'), - (0xF99, 'V'), - (0xF9D, 'M', 'ྜྷ'), - (0xF9E, 'V'), - (0xFA2, 'M', 'ྡྷ'), - (0xFA3, 'V'), - (0xFA7, 'M', 'ྦྷ'), - (0xFA8, 'V'), - (0xFAC, 'M', 'ྫྷ'), - (0xFAD, 'V'), - (0xFB9, 'M', 'ྐྵ'), - (0xFBA, 'V'), - (0xFBD, 'X'), - (0xFBE, 'V'), - (0xFCD, 'X'), - (0xFCE, 'V'), - (0xFDB, 'X'), - (0x1000, 'V'), - (0x10A0, 'X'), - (0x10C7, 'M', 'ⴧ'), - (0x10C8, 'X'), - (0x10CD, 'M', 'ⴭ'), - (0x10CE, 'X'), - (0x10D0, 'V'), - (0x10FC, 'M', 'ნ'), - (0x10FD, 'V'), - (0x115F, 'X'), - (0x1161, 'V'), - (0x1249, 'X'), - (0x124A, 'V'), - (0x124E, 'X'), - (0x1250, 'V'), - (0x1257, 'X'), - (0x1258, 'V'), - (0x1259, 'X'), - (0x125A, 'V'), - (0x125E, 'X'), - (0x1260, 'V'), - (0x1289, 'X'), - (0x128A, 'V'), - (0x128E, 'X'), - (0x1290, 'V'), - (0x12B1, 'X'), - (0x12B2, 'V'), - (0x12B6, 'X'), - (0x12B8, 'V'), - (0x12BF, 'X'), - (0x12C0, 'V'), - (0x12C1, 'X'), - (0x12C2, 'V'), - (0x12C6, 'X'), - (0x12C8, 'V'), - (0x12D7, 'X'), - (0x12D8, 'V'), - (0x1311, 'X'), - (0x1312, 'V'), - ] - -def _seg_14() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1316, 'X'), - (0x1318, 'V'), - (0x135B, 'X'), - (0x135D, 'V'), - (0x137D, 'X'), - (0x1380, 'V'), - (0x139A, 'X'), - (0x13A0, 'V'), - (0x13F6, 'X'), - (0x13F8, 'M', 'Ᏸ'), - (0x13F9, 'M', 'Ᏹ'), - (0x13FA, 'M', 'Ᏺ'), - (0x13FB, 'M', 'Ᏻ'), - (0x13FC, 'M', 'Ᏼ'), - (0x13FD, 'M', 'Ᏽ'), - (0x13FE, 'X'), - (0x1400, 'V'), - (0x1680, 'X'), - (0x1681, 'V'), - (0x169D, 'X'), - (0x16A0, 'V'), - (0x16F9, 'X'), - (0x1700, 'V'), - (0x1716, 'X'), - (0x171F, 'V'), - (0x1737, 'X'), - (0x1740, 'V'), - (0x1754, 'X'), - (0x1760, 'V'), - (0x176D, 'X'), - (0x176E, 'V'), - (0x1771, 'X'), - (0x1772, 'V'), - (0x1774, 'X'), - (0x1780, 'V'), - (0x17B4, 'X'), - (0x17B6, 'V'), - (0x17DE, 'X'), - (0x17E0, 'V'), - (0x17EA, 'X'), - (0x17F0, 'V'), - (0x17FA, 'X'), - (0x1800, 'V'), - (0x1806, 'X'), - (0x1807, 'V'), - (0x180B, 'I'), - (0x180E, 'X'), - (0x180F, 'I'), - (0x1810, 'V'), - (0x181A, 'X'), - (0x1820, 'V'), - (0x1879, 'X'), - (0x1880, 'V'), - (0x18AB, 'X'), - (0x18B0, 'V'), - (0x18F6, 'X'), - (0x1900, 'V'), - (0x191F, 'X'), - (0x1920, 'V'), - (0x192C, 'X'), - (0x1930, 'V'), - (0x193C, 'X'), - (0x1940, 'V'), - (0x1941, 'X'), - (0x1944, 'V'), - (0x196E, 'X'), - (0x1970, 'V'), - (0x1975, 'X'), - (0x1980, 'V'), - (0x19AC, 'X'), - (0x19B0, 'V'), - (0x19CA, 'X'), - (0x19D0, 'V'), - (0x19DB, 'X'), - (0x19DE, 'V'), - (0x1A1C, 'X'), - (0x1A1E, 'V'), - (0x1A5F, 'X'), - (0x1A60, 'V'), - (0x1A7D, 'X'), - (0x1A7F, 'V'), - (0x1A8A, 'X'), - (0x1A90, 'V'), - (0x1A9A, 'X'), - (0x1AA0, 'V'), - (0x1AAE, 'X'), - (0x1AB0, 'V'), - (0x1ACF, 'X'), - (0x1B00, 'V'), - (0x1B4D, 'X'), - (0x1B50, 'V'), - (0x1B7F, 'X'), - (0x1B80, 'V'), - (0x1BF4, 'X'), - (0x1BFC, 'V'), - (0x1C38, 'X'), - (0x1C3B, 'V'), - (0x1C4A, 'X'), - (0x1C4D, 'V'), - (0x1C80, 'M', 'в'), - ] - -def _seg_15() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1C81, 'M', 'д'), - (0x1C82, 'M', 'о'), - (0x1C83, 'M', 'с'), - (0x1C84, 'M', 'т'), - (0x1C86, 'M', 'ъ'), - (0x1C87, 'M', 'ѣ'), - (0x1C88, 'M', 'ꙋ'), - (0x1C89, 'X'), - (0x1C90, 'M', 'ა'), - (0x1C91, 'M', 'ბ'), - (0x1C92, 'M', 'გ'), - (0x1C93, 'M', 'დ'), - (0x1C94, 'M', 'ე'), - (0x1C95, 'M', 'ვ'), - (0x1C96, 'M', 'ზ'), - (0x1C97, 'M', 'თ'), - (0x1C98, 'M', 'ი'), - (0x1C99, 'M', 'კ'), - (0x1C9A, 'M', 'ლ'), - (0x1C9B, 'M', 'მ'), - (0x1C9C, 'M', 'ნ'), - (0x1C9D, 'M', 'ო'), - (0x1C9E, 'M', 'პ'), - (0x1C9F, 'M', 'ჟ'), - (0x1CA0, 'M', 'რ'), - (0x1CA1, 'M', 'ს'), - (0x1CA2, 'M', 'ტ'), - (0x1CA3, 'M', 'უ'), - (0x1CA4, 'M', 'ფ'), - (0x1CA5, 'M', 'ქ'), - (0x1CA6, 'M', 'ღ'), - (0x1CA7, 'M', 'ყ'), - (0x1CA8, 'M', 'შ'), - (0x1CA9, 'M', 'ჩ'), - (0x1CAA, 'M', 'ც'), - (0x1CAB, 'M', 'ძ'), - (0x1CAC, 'M', 'წ'), - (0x1CAD, 'M', 'ჭ'), - (0x1CAE, 'M', 'ხ'), - (0x1CAF, 'M', 'ჯ'), - (0x1CB0, 'M', 'ჰ'), - (0x1CB1, 'M', 'ჱ'), - (0x1CB2, 'M', 'ჲ'), - (0x1CB3, 'M', 'ჳ'), - (0x1CB4, 'M', 'ჴ'), - (0x1CB5, 'M', 'ჵ'), - (0x1CB6, 'M', 'ჶ'), - (0x1CB7, 'M', 'ჷ'), - (0x1CB8, 'M', 'ჸ'), - (0x1CB9, 'M', 'ჹ'), - (0x1CBA, 'M', 'ჺ'), - (0x1CBB, 'X'), - (0x1CBD, 'M', 'ჽ'), - (0x1CBE, 'M', 'ჾ'), - (0x1CBF, 'M', 'ჿ'), - (0x1CC0, 'V'), - (0x1CC8, 'X'), - (0x1CD0, 'V'), - (0x1CFB, 'X'), - (0x1D00, 'V'), - (0x1D2C, 'M', 'a'), - (0x1D2D, 'M', 'æ'), - (0x1D2E, 'M', 'b'), - (0x1D2F, 'V'), - (0x1D30, 'M', 'd'), - (0x1D31, 'M', 'e'), - (0x1D32, 'M', 'ǝ'), - (0x1D33, 'M', 'g'), - (0x1D34, 'M', 'h'), - (0x1D35, 'M', 'i'), - (0x1D36, 'M', 'j'), - (0x1D37, 'M', 'k'), - (0x1D38, 'M', 'l'), - (0x1D39, 'M', 'm'), - (0x1D3A, 'M', 'n'), - (0x1D3B, 'V'), - (0x1D3C, 'M', 'o'), - (0x1D3D, 'M', 'ȣ'), - (0x1D3E, 'M', 'p'), - (0x1D3F, 'M', 'r'), - (0x1D40, 'M', 't'), - (0x1D41, 'M', 'u'), - (0x1D42, 'M', 'w'), - (0x1D43, 'M', 'a'), - (0x1D44, 'M', 'ɐ'), - (0x1D45, 'M', 'ɑ'), - (0x1D46, 'M', 'ᴂ'), - (0x1D47, 'M', 'b'), - (0x1D48, 'M', 'd'), - (0x1D49, 'M', 'e'), - (0x1D4A, 'M', 'ə'), - (0x1D4B, 'M', 'ɛ'), - (0x1D4C, 'M', 'ɜ'), - (0x1D4D, 'M', 'g'), - (0x1D4E, 'V'), - (0x1D4F, 'M', 'k'), - (0x1D50, 'M', 'm'), - (0x1D51, 'M', 'ŋ'), - (0x1D52, 'M', 'o'), - (0x1D53, 'M', 'ɔ'), - ] - -def _seg_16() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D54, 'M', 'ᴖ'), - (0x1D55, 'M', 'ᴗ'), - (0x1D56, 'M', 'p'), - (0x1D57, 'M', 't'), - (0x1D58, 'M', 'u'), - (0x1D59, 'M', 'ᴝ'), - (0x1D5A, 'M', 'ɯ'), - (0x1D5B, 'M', 'v'), - (0x1D5C, 'M', 'ᴥ'), - (0x1D5D, 'M', 'β'), - (0x1D5E, 'M', 'γ'), - (0x1D5F, 'M', 'δ'), - (0x1D60, 'M', 'φ'), - (0x1D61, 'M', 'χ'), - (0x1D62, 'M', 'i'), - (0x1D63, 'M', 'r'), - (0x1D64, 'M', 'u'), - (0x1D65, 'M', 'v'), - (0x1D66, 'M', 'β'), - (0x1D67, 'M', 'γ'), - (0x1D68, 'M', 'ρ'), - (0x1D69, 'M', 'φ'), - (0x1D6A, 'M', 'χ'), - (0x1D6B, 'V'), - (0x1D78, 'M', 'н'), - (0x1D79, 'V'), - (0x1D9B, 'M', 'ɒ'), - (0x1D9C, 'M', 'c'), - (0x1D9D, 'M', 'ɕ'), - (0x1D9E, 'M', 'ð'), - (0x1D9F, 'M', 'ɜ'), - (0x1DA0, 'M', 'f'), - (0x1DA1, 'M', 'ɟ'), - (0x1DA2, 'M', 'ɡ'), - (0x1DA3, 'M', 'ɥ'), - (0x1DA4, 'M', 'ɨ'), - (0x1DA5, 'M', 'ɩ'), - (0x1DA6, 'M', 'ɪ'), - (0x1DA7, 'M', 'ᵻ'), - (0x1DA8, 'M', 'ʝ'), - (0x1DA9, 'M', 'ɭ'), - (0x1DAA, 'M', 'ᶅ'), - (0x1DAB, 'M', 'ʟ'), - (0x1DAC, 'M', 'ɱ'), - (0x1DAD, 'M', 'ɰ'), - (0x1DAE, 'M', 'ɲ'), - (0x1DAF, 'M', 'ɳ'), - (0x1DB0, 'M', 'ɴ'), - (0x1DB1, 'M', 'ɵ'), - (0x1DB2, 'M', 'ɸ'), - (0x1DB3, 'M', 'ʂ'), - (0x1DB4, 'M', 'ʃ'), - (0x1DB5, 'M', 'ƫ'), - (0x1DB6, 'M', 'ʉ'), - (0x1DB7, 'M', 'ʊ'), - (0x1DB8, 'M', 'ᴜ'), - (0x1DB9, 'M', 'ʋ'), - (0x1DBA, 'M', 'ʌ'), - (0x1DBB, 'M', 'z'), - (0x1DBC, 'M', 'ʐ'), - (0x1DBD, 'M', 'ʑ'), - (0x1DBE, 'M', 'ʒ'), - (0x1DBF, 'M', 'θ'), - (0x1DC0, 'V'), - (0x1E00, 'M', 'ḁ'), - (0x1E01, 'V'), - (0x1E02, 'M', 'ḃ'), - (0x1E03, 'V'), - (0x1E04, 'M', 'ḅ'), - (0x1E05, 'V'), - (0x1E06, 'M', 'ḇ'), - (0x1E07, 'V'), - (0x1E08, 'M', 'ḉ'), - (0x1E09, 'V'), - (0x1E0A, 'M', 'ḋ'), - (0x1E0B, 'V'), - (0x1E0C, 'M', 'ḍ'), - (0x1E0D, 'V'), - (0x1E0E, 'M', 'ḏ'), - (0x1E0F, 'V'), - (0x1E10, 'M', 'ḑ'), - (0x1E11, 'V'), - (0x1E12, 'M', 'ḓ'), - (0x1E13, 'V'), - (0x1E14, 'M', 'ḕ'), - (0x1E15, 'V'), - (0x1E16, 'M', 'ḗ'), - (0x1E17, 'V'), - (0x1E18, 'M', 'ḙ'), - (0x1E19, 'V'), - (0x1E1A, 'M', 'ḛ'), - (0x1E1B, 'V'), - (0x1E1C, 'M', 'ḝ'), - (0x1E1D, 'V'), - (0x1E1E, 'M', 'ḟ'), - (0x1E1F, 'V'), - (0x1E20, 'M', 'ḡ'), - (0x1E21, 'V'), - (0x1E22, 'M', 'ḣ'), - (0x1E23, 'V'), - ] - -def _seg_17() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1E24, 'M', 'ḥ'), - (0x1E25, 'V'), - (0x1E26, 'M', 'ḧ'), - (0x1E27, 'V'), - (0x1E28, 'M', 'ḩ'), - (0x1E29, 'V'), - (0x1E2A, 'M', 'ḫ'), - (0x1E2B, 'V'), - (0x1E2C, 'M', 'ḭ'), - (0x1E2D, 'V'), - (0x1E2E, 'M', 'ḯ'), - (0x1E2F, 'V'), - (0x1E30, 'M', 'ḱ'), - (0x1E31, 'V'), - (0x1E32, 'M', 'ḳ'), - (0x1E33, 'V'), - (0x1E34, 'M', 'ḵ'), - (0x1E35, 'V'), - (0x1E36, 'M', 'ḷ'), - (0x1E37, 'V'), - (0x1E38, 'M', 'ḹ'), - (0x1E39, 'V'), - (0x1E3A, 'M', 'ḻ'), - (0x1E3B, 'V'), - (0x1E3C, 'M', 'ḽ'), - (0x1E3D, 'V'), - (0x1E3E, 'M', 'ḿ'), - (0x1E3F, 'V'), - (0x1E40, 'M', 'ṁ'), - (0x1E41, 'V'), - (0x1E42, 'M', 'ṃ'), - (0x1E43, 'V'), - (0x1E44, 'M', 'ṅ'), - (0x1E45, 'V'), - (0x1E46, 'M', 'ṇ'), - (0x1E47, 'V'), - (0x1E48, 'M', 'ṉ'), - (0x1E49, 'V'), - (0x1E4A, 'M', 'ṋ'), - (0x1E4B, 'V'), - (0x1E4C, 'M', 'ṍ'), - (0x1E4D, 'V'), - (0x1E4E, 'M', 'ṏ'), - (0x1E4F, 'V'), - (0x1E50, 'M', 'ṑ'), - (0x1E51, 'V'), - (0x1E52, 'M', 'ṓ'), - (0x1E53, 'V'), - (0x1E54, 'M', 'ṕ'), - (0x1E55, 'V'), - (0x1E56, 'M', 'ṗ'), - (0x1E57, 'V'), - (0x1E58, 'M', 'ṙ'), - (0x1E59, 'V'), - (0x1E5A, 'M', 'ṛ'), - (0x1E5B, 'V'), - (0x1E5C, 'M', 'ṝ'), - (0x1E5D, 'V'), - (0x1E5E, 'M', 'ṟ'), - (0x1E5F, 'V'), - (0x1E60, 'M', 'ṡ'), - (0x1E61, 'V'), - (0x1E62, 'M', 'ṣ'), - (0x1E63, 'V'), - (0x1E64, 'M', 'ṥ'), - (0x1E65, 'V'), - (0x1E66, 'M', 'ṧ'), - (0x1E67, 'V'), - (0x1E68, 'M', 'ṩ'), - (0x1E69, 'V'), - (0x1E6A, 'M', 'ṫ'), - (0x1E6B, 'V'), - (0x1E6C, 'M', 'ṭ'), - (0x1E6D, 'V'), - (0x1E6E, 'M', 'ṯ'), - (0x1E6F, 'V'), - (0x1E70, 'M', 'ṱ'), - (0x1E71, 'V'), - (0x1E72, 'M', 'ṳ'), - (0x1E73, 'V'), - (0x1E74, 'M', 'ṵ'), - (0x1E75, 'V'), - (0x1E76, 'M', 'ṷ'), - (0x1E77, 'V'), - (0x1E78, 'M', 'ṹ'), - (0x1E79, 'V'), - (0x1E7A, 'M', 'ṻ'), - (0x1E7B, 'V'), - (0x1E7C, 'M', 'ṽ'), - (0x1E7D, 'V'), - (0x1E7E, 'M', 'ṿ'), - (0x1E7F, 'V'), - (0x1E80, 'M', 'ẁ'), - (0x1E81, 'V'), - (0x1E82, 'M', 'ẃ'), - (0x1E83, 'V'), - (0x1E84, 'M', 'ẅ'), - (0x1E85, 'V'), - (0x1E86, 'M', 'ẇ'), - (0x1E87, 'V'), - ] - -def _seg_18() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1E88, 'M', 'ẉ'), - (0x1E89, 'V'), - (0x1E8A, 'M', 'ẋ'), - (0x1E8B, 'V'), - (0x1E8C, 'M', 'ẍ'), - (0x1E8D, 'V'), - (0x1E8E, 'M', 'ẏ'), - (0x1E8F, 'V'), - (0x1E90, 'M', 'ẑ'), - (0x1E91, 'V'), - (0x1E92, 'M', 'ẓ'), - (0x1E93, 'V'), - (0x1E94, 'M', 'ẕ'), - (0x1E95, 'V'), - (0x1E9A, 'M', 'aʾ'), - (0x1E9B, 'M', 'ṡ'), - (0x1E9C, 'V'), - (0x1E9E, 'M', 'ss'), - (0x1E9F, 'V'), - (0x1EA0, 'M', 'ạ'), - (0x1EA1, 'V'), - (0x1EA2, 'M', 'ả'), - (0x1EA3, 'V'), - (0x1EA4, 'M', 'ấ'), - (0x1EA5, 'V'), - (0x1EA6, 'M', 'ầ'), - (0x1EA7, 'V'), - (0x1EA8, 'M', 'ẩ'), - (0x1EA9, 'V'), - (0x1EAA, 'M', 'ẫ'), - (0x1EAB, 'V'), - (0x1EAC, 'M', 'ậ'), - (0x1EAD, 'V'), - (0x1EAE, 'M', 'ắ'), - (0x1EAF, 'V'), - (0x1EB0, 'M', 'ằ'), - (0x1EB1, 'V'), - (0x1EB2, 'M', 'ẳ'), - (0x1EB3, 'V'), - (0x1EB4, 'M', 'ẵ'), - (0x1EB5, 'V'), - (0x1EB6, 'M', 'ặ'), - (0x1EB7, 'V'), - (0x1EB8, 'M', 'ẹ'), - (0x1EB9, 'V'), - (0x1EBA, 'M', 'ẻ'), - (0x1EBB, 'V'), - (0x1EBC, 'M', 'ẽ'), - (0x1EBD, 'V'), - (0x1EBE, 'M', 'ế'), - (0x1EBF, 'V'), - (0x1EC0, 'M', 'ề'), - (0x1EC1, 'V'), - (0x1EC2, 'M', 'ể'), - (0x1EC3, 'V'), - (0x1EC4, 'M', 'ễ'), - (0x1EC5, 'V'), - (0x1EC6, 'M', 'ệ'), - (0x1EC7, 'V'), - (0x1EC8, 'M', 'ỉ'), - (0x1EC9, 'V'), - (0x1ECA, 'M', 'ị'), - (0x1ECB, 'V'), - (0x1ECC, 'M', 'ọ'), - (0x1ECD, 'V'), - (0x1ECE, 'M', 'ỏ'), - (0x1ECF, 'V'), - (0x1ED0, 'M', 'ố'), - (0x1ED1, 'V'), - (0x1ED2, 'M', 'ồ'), - (0x1ED3, 'V'), - (0x1ED4, 'M', 'ổ'), - (0x1ED5, 'V'), - (0x1ED6, 'M', 'ỗ'), - (0x1ED7, 'V'), - (0x1ED8, 'M', 'ộ'), - (0x1ED9, 'V'), - (0x1EDA, 'M', 'ớ'), - (0x1EDB, 'V'), - (0x1EDC, 'M', 'ờ'), - (0x1EDD, 'V'), - (0x1EDE, 'M', 'ở'), - (0x1EDF, 'V'), - (0x1EE0, 'M', 'ỡ'), - (0x1EE1, 'V'), - (0x1EE2, 'M', 'ợ'), - (0x1EE3, 'V'), - (0x1EE4, 'M', 'ụ'), - (0x1EE5, 'V'), - (0x1EE6, 'M', 'ủ'), - (0x1EE7, 'V'), - (0x1EE8, 'M', 'ứ'), - (0x1EE9, 'V'), - (0x1EEA, 'M', 'ừ'), - (0x1EEB, 'V'), - (0x1EEC, 'M', 'ử'), - (0x1EED, 'V'), - (0x1EEE, 'M', 'ữ'), - (0x1EEF, 'V'), - (0x1EF0, 'M', 'ự'), - ] - -def _seg_19() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1EF1, 'V'), - (0x1EF2, 'M', 'ỳ'), - (0x1EF3, 'V'), - (0x1EF4, 'M', 'ỵ'), - (0x1EF5, 'V'), - (0x1EF6, 'M', 'ỷ'), - (0x1EF7, 'V'), - (0x1EF8, 'M', 'ỹ'), - (0x1EF9, 'V'), - (0x1EFA, 'M', 'ỻ'), - (0x1EFB, 'V'), - (0x1EFC, 'M', 'ỽ'), - (0x1EFD, 'V'), - (0x1EFE, 'M', 'ỿ'), - (0x1EFF, 'V'), - (0x1F08, 'M', 'ἀ'), - (0x1F09, 'M', 'ἁ'), - (0x1F0A, 'M', 'ἂ'), - (0x1F0B, 'M', 'ἃ'), - (0x1F0C, 'M', 'ἄ'), - (0x1F0D, 'M', 'ἅ'), - (0x1F0E, 'M', 'ἆ'), - (0x1F0F, 'M', 'ἇ'), - (0x1F10, 'V'), - (0x1F16, 'X'), - (0x1F18, 'M', 'ἐ'), - (0x1F19, 'M', 'ἑ'), - (0x1F1A, 'M', 'ἒ'), - (0x1F1B, 'M', 'ἓ'), - (0x1F1C, 'M', 'ἔ'), - (0x1F1D, 'M', 'ἕ'), - (0x1F1E, 'X'), - (0x1F20, 'V'), - (0x1F28, 'M', 'ἠ'), - (0x1F29, 'M', 'ἡ'), - (0x1F2A, 'M', 'ἢ'), - (0x1F2B, 'M', 'ἣ'), - (0x1F2C, 'M', 'ἤ'), - (0x1F2D, 'M', 'ἥ'), - (0x1F2E, 'M', 'ἦ'), - (0x1F2F, 'M', 'ἧ'), - (0x1F30, 'V'), - (0x1F38, 'M', 'ἰ'), - (0x1F39, 'M', 'ἱ'), - (0x1F3A, 'M', 'ἲ'), - (0x1F3B, 'M', 'ἳ'), - (0x1F3C, 'M', 'ἴ'), - (0x1F3D, 'M', 'ἵ'), - (0x1F3E, 'M', 'ἶ'), - (0x1F3F, 'M', 'ἷ'), - (0x1F40, 'V'), - (0x1F46, 'X'), - (0x1F48, 'M', 'ὀ'), - (0x1F49, 'M', 'ὁ'), - (0x1F4A, 'M', 'ὂ'), - (0x1F4B, 'M', 'ὃ'), - (0x1F4C, 'M', 'ὄ'), - (0x1F4D, 'M', 'ὅ'), - (0x1F4E, 'X'), - (0x1F50, 'V'), - (0x1F58, 'X'), - (0x1F59, 'M', 'ὑ'), - (0x1F5A, 'X'), - (0x1F5B, 'M', 'ὓ'), - (0x1F5C, 'X'), - (0x1F5D, 'M', 'ὕ'), - (0x1F5E, 'X'), - (0x1F5F, 'M', 'ὗ'), - (0x1F60, 'V'), - (0x1F68, 'M', 'ὠ'), - (0x1F69, 'M', 'ὡ'), - (0x1F6A, 'M', 'ὢ'), - (0x1F6B, 'M', 'ὣ'), - (0x1F6C, 'M', 'ὤ'), - (0x1F6D, 'M', 'ὥ'), - (0x1F6E, 'M', 'ὦ'), - (0x1F6F, 'M', 'ὧ'), - (0x1F70, 'V'), - (0x1F71, 'M', 'ά'), - (0x1F72, 'V'), - (0x1F73, 'M', 'έ'), - (0x1F74, 'V'), - (0x1F75, 'M', 'ή'), - (0x1F76, 'V'), - (0x1F77, 'M', 'ί'), - (0x1F78, 'V'), - (0x1F79, 'M', 'ό'), - (0x1F7A, 'V'), - (0x1F7B, 'M', 'ύ'), - (0x1F7C, 'V'), - (0x1F7D, 'M', 'ώ'), - (0x1F7E, 'X'), - (0x1F80, 'M', 'ἀι'), - (0x1F81, 'M', 'ἁι'), - (0x1F82, 'M', 'ἂι'), - (0x1F83, 'M', 'ἃι'), - (0x1F84, 'M', 'ἄι'), - (0x1F85, 'M', 'ἅι'), - (0x1F86, 'M', 'ἆι'), - (0x1F87, 'M', 'ἇι'), - ] - -def _seg_20() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1F88, 'M', 'ἀι'), - (0x1F89, 'M', 'ἁι'), - (0x1F8A, 'M', 'ἂι'), - (0x1F8B, 'M', 'ἃι'), - (0x1F8C, 'M', 'ἄι'), - (0x1F8D, 'M', 'ἅι'), - (0x1F8E, 'M', 'ἆι'), - (0x1F8F, 'M', 'ἇι'), - (0x1F90, 'M', 'ἠι'), - (0x1F91, 'M', 'ἡι'), - (0x1F92, 'M', 'ἢι'), - (0x1F93, 'M', 'ἣι'), - (0x1F94, 'M', 'ἤι'), - (0x1F95, 'M', 'ἥι'), - (0x1F96, 'M', 'ἦι'), - (0x1F97, 'M', 'ἧι'), - (0x1F98, 'M', 'ἠι'), - (0x1F99, 'M', 'ἡι'), - (0x1F9A, 'M', 'ἢι'), - (0x1F9B, 'M', 'ἣι'), - (0x1F9C, 'M', 'ἤι'), - (0x1F9D, 'M', 'ἥι'), - (0x1F9E, 'M', 'ἦι'), - (0x1F9F, 'M', 'ἧι'), - (0x1FA0, 'M', 'ὠι'), - (0x1FA1, 'M', 'ὡι'), - (0x1FA2, 'M', 'ὢι'), - (0x1FA3, 'M', 'ὣι'), - (0x1FA4, 'M', 'ὤι'), - (0x1FA5, 'M', 'ὥι'), - (0x1FA6, 'M', 'ὦι'), - (0x1FA7, 'M', 'ὧι'), - (0x1FA8, 'M', 'ὠι'), - (0x1FA9, 'M', 'ὡι'), - (0x1FAA, 'M', 'ὢι'), - (0x1FAB, 'M', 'ὣι'), - (0x1FAC, 'M', 'ὤι'), - (0x1FAD, 'M', 'ὥι'), - (0x1FAE, 'M', 'ὦι'), - (0x1FAF, 'M', 'ὧι'), - (0x1FB0, 'V'), - (0x1FB2, 'M', 'ὰι'), - (0x1FB3, 'M', 'αι'), - (0x1FB4, 'M', 'άι'), - (0x1FB5, 'X'), - (0x1FB6, 'V'), - (0x1FB7, 'M', 'ᾶι'), - (0x1FB8, 'M', 'ᾰ'), - (0x1FB9, 'M', 'ᾱ'), - (0x1FBA, 'M', 'ὰ'), - (0x1FBB, 'M', 'ά'), - (0x1FBC, 'M', 'αι'), - (0x1FBD, '3', ' ̓'), - (0x1FBE, 'M', 'ι'), - (0x1FBF, '3', ' ̓'), - (0x1FC0, '3', ' ͂'), - (0x1FC1, '3', ' ̈͂'), - (0x1FC2, 'M', 'ὴι'), - (0x1FC3, 'M', 'ηι'), - (0x1FC4, 'M', 'ήι'), - (0x1FC5, 'X'), - (0x1FC6, 'V'), - (0x1FC7, 'M', 'ῆι'), - (0x1FC8, 'M', 'ὲ'), - (0x1FC9, 'M', 'έ'), - (0x1FCA, 'M', 'ὴ'), - (0x1FCB, 'M', 'ή'), - (0x1FCC, 'M', 'ηι'), - (0x1FCD, '3', ' ̓̀'), - (0x1FCE, '3', ' ̓́'), - (0x1FCF, '3', ' ̓͂'), - (0x1FD0, 'V'), - (0x1FD3, 'M', 'ΐ'), - (0x1FD4, 'X'), - (0x1FD6, 'V'), - (0x1FD8, 'M', 'ῐ'), - (0x1FD9, 'M', 'ῑ'), - (0x1FDA, 'M', 'ὶ'), - (0x1FDB, 'M', 'ί'), - (0x1FDC, 'X'), - (0x1FDD, '3', ' ̔̀'), - (0x1FDE, '3', ' ̔́'), - (0x1FDF, '3', ' ̔͂'), - (0x1FE0, 'V'), - (0x1FE3, 'M', 'ΰ'), - (0x1FE4, 'V'), - (0x1FE8, 'M', 'ῠ'), - (0x1FE9, 'M', 'ῡ'), - (0x1FEA, 'M', 'ὺ'), - (0x1FEB, 'M', 'ύ'), - (0x1FEC, 'M', 'ῥ'), - (0x1FED, '3', ' ̈̀'), - (0x1FEE, '3', ' ̈́'), - (0x1FEF, '3', '`'), - (0x1FF0, 'X'), - (0x1FF2, 'M', 'ὼι'), - (0x1FF3, 'M', 'ωι'), - (0x1FF4, 'M', 'ώι'), - (0x1FF5, 'X'), - (0x1FF6, 'V'), - ] - -def _seg_21() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1FF7, 'M', 'ῶι'), - (0x1FF8, 'M', 'ὸ'), - (0x1FF9, 'M', 'ό'), - (0x1FFA, 'M', 'ὼ'), - (0x1FFB, 'M', 'ώ'), - (0x1FFC, 'M', 'ωι'), - (0x1FFD, '3', ' ́'), - (0x1FFE, '3', ' ̔'), - (0x1FFF, 'X'), - (0x2000, '3', ' '), - (0x200B, 'I'), - (0x200C, 'D', ''), - (0x200E, 'X'), - (0x2010, 'V'), - (0x2011, 'M', '‐'), - (0x2012, 'V'), - (0x2017, '3', ' ̳'), - (0x2018, 'V'), - (0x2024, 'X'), - (0x2027, 'V'), - (0x2028, 'X'), - (0x202F, '3', ' '), - (0x2030, 'V'), - (0x2033, 'M', '′′'), - (0x2034, 'M', '′′′'), - (0x2035, 'V'), - (0x2036, 'M', '‵‵'), - (0x2037, 'M', '‵‵‵'), - (0x2038, 'V'), - (0x203C, '3', '!!'), - (0x203D, 'V'), - (0x203E, '3', ' ̅'), - (0x203F, 'V'), - (0x2047, '3', '??'), - (0x2048, '3', '?!'), - (0x2049, '3', '!?'), - (0x204A, 'V'), - (0x2057, 'M', '′′′′'), - (0x2058, 'V'), - (0x205F, '3', ' '), - (0x2060, 'I'), - (0x2061, 'X'), - (0x2064, 'I'), - (0x2065, 'X'), - (0x2070, 'M', '0'), - (0x2071, 'M', 'i'), - (0x2072, 'X'), - (0x2074, 'M', '4'), - (0x2075, 'M', '5'), - (0x2076, 'M', '6'), - (0x2077, 'M', '7'), - (0x2078, 'M', '8'), - (0x2079, 'M', '9'), - (0x207A, '3', '+'), - (0x207B, 'M', '−'), - (0x207C, '3', '='), - (0x207D, '3', '('), - (0x207E, '3', ')'), - (0x207F, 'M', 'n'), - (0x2080, 'M', '0'), - (0x2081, 'M', '1'), - (0x2082, 'M', '2'), - (0x2083, 'M', '3'), - (0x2084, 'M', '4'), - (0x2085, 'M', '5'), - (0x2086, 'M', '6'), - (0x2087, 'M', '7'), - (0x2088, 'M', '8'), - (0x2089, 'M', '9'), - (0x208A, '3', '+'), - (0x208B, 'M', '−'), - (0x208C, '3', '='), - (0x208D, '3', '('), - (0x208E, '3', ')'), - (0x208F, 'X'), - (0x2090, 'M', 'a'), - (0x2091, 'M', 'e'), - (0x2092, 'M', 'o'), - (0x2093, 'M', 'x'), - (0x2094, 'M', 'ə'), - (0x2095, 'M', 'h'), - (0x2096, 'M', 'k'), - (0x2097, 'M', 'l'), - (0x2098, 'M', 'm'), - (0x2099, 'M', 'n'), - (0x209A, 'M', 'p'), - (0x209B, 'M', 's'), - (0x209C, 'M', 't'), - (0x209D, 'X'), - (0x20A0, 'V'), - (0x20A8, 'M', 'rs'), - (0x20A9, 'V'), - (0x20C1, 'X'), - (0x20D0, 'V'), - (0x20F1, 'X'), - (0x2100, '3', 'a/c'), - (0x2101, '3', 'a/s'), - (0x2102, 'M', 'c'), - (0x2103, 'M', '°c'), - (0x2104, 'V'), - ] - -def _seg_22() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2105, '3', 'c/o'), - (0x2106, '3', 'c/u'), - (0x2107, 'M', 'ɛ'), - (0x2108, 'V'), - (0x2109, 'M', '°f'), - (0x210A, 'M', 'g'), - (0x210B, 'M', 'h'), - (0x210F, 'M', 'ħ'), - (0x2110, 'M', 'i'), - (0x2112, 'M', 'l'), - (0x2114, 'V'), - (0x2115, 'M', 'n'), - (0x2116, 'M', 'no'), - (0x2117, 'V'), - (0x2119, 'M', 'p'), - (0x211A, 'M', 'q'), - (0x211B, 'M', 'r'), - (0x211E, 'V'), - (0x2120, 'M', 'sm'), - (0x2121, 'M', 'tel'), - (0x2122, 'M', 'tm'), - (0x2123, 'V'), - (0x2124, 'M', 'z'), - (0x2125, 'V'), - (0x2126, 'M', 'ω'), - (0x2127, 'V'), - (0x2128, 'M', 'z'), - (0x2129, 'V'), - (0x212A, 'M', 'k'), - (0x212B, 'M', 'å'), - (0x212C, 'M', 'b'), - (0x212D, 'M', 'c'), - (0x212E, 'V'), - (0x212F, 'M', 'e'), - (0x2131, 'M', 'f'), - (0x2132, 'X'), - (0x2133, 'M', 'm'), - (0x2134, 'M', 'o'), - (0x2135, 'M', 'א'), - (0x2136, 'M', 'ב'), - (0x2137, 'M', 'ג'), - (0x2138, 'M', 'ד'), - (0x2139, 'M', 'i'), - (0x213A, 'V'), - (0x213B, 'M', 'fax'), - (0x213C, 'M', 'π'), - (0x213D, 'M', 'γ'), - (0x213F, 'M', 'π'), - (0x2140, 'M', '∑'), - (0x2141, 'V'), - (0x2145, 'M', 'd'), - (0x2147, 'M', 'e'), - (0x2148, 'M', 'i'), - (0x2149, 'M', 'j'), - (0x214A, 'V'), - (0x2150, 'M', '1⁄7'), - (0x2151, 'M', '1⁄9'), - (0x2152, 'M', '1⁄10'), - (0x2153, 'M', '1⁄3'), - (0x2154, 'M', '2⁄3'), - (0x2155, 'M', '1⁄5'), - (0x2156, 'M', '2⁄5'), - (0x2157, 'M', '3⁄5'), - (0x2158, 'M', '4⁄5'), - (0x2159, 'M', '1⁄6'), - (0x215A, 'M', '5⁄6'), - (0x215B, 'M', '1⁄8'), - (0x215C, 'M', '3⁄8'), - (0x215D, 'M', '5⁄8'), - (0x215E, 'M', '7⁄8'), - (0x215F, 'M', '1⁄'), - (0x2160, 'M', 'i'), - (0x2161, 'M', 'ii'), - (0x2162, 'M', 'iii'), - (0x2163, 'M', 'iv'), - (0x2164, 'M', 'v'), - (0x2165, 'M', 'vi'), - (0x2166, 'M', 'vii'), - (0x2167, 'M', 'viii'), - (0x2168, 'M', 'ix'), - (0x2169, 'M', 'x'), - (0x216A, 'M', 'xi'), - (0x216B, 'M', 'xii'), - (0x216C, 'M', 'l'), - (0x216D, 'M', 'c'), - (0x216E, 'M', 'd'), - (0x216F, 'M', 'm'), - (0x2170, 'M', 'i'), - (0x2171, 'M', 'ii'), - (0x2172, 'M', 'iii'), - (0x2173, 'M', 'iv'), - (0x2174, 'M', 'v'), - (0x2175, 'M', 'vi'), - (0x2176, 'M', 'vii'), - (0x2177, 'M', 'viii'), - (0x2178, 'M', 'ix'), - (0x2179, 'M', 'x'), - (0x217A, 'M', 'xi'), - (0x217B, 'M', 'xii'), - (0x217C, 'M', 'l'), - ] - -def _seg_23() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x217D, 'M', 'c'), - (0x217E, 'M', 'd'), - (0x217F, 'M', 'm'), - (0x2180, 'V'), - (0x2183, 'X'), - (0x2184, 'V'), - (0x2189, 'M', '0⁄3'), - (0x218A, 'V'), - (0x218C, 'X'), - (0x2190, 'V'), - (0x222C, 'M', '∫∫'), - (0x222D, 'M', '∫∫∫'), - (0x222E, 'V'), - (0x222F, 'M', '∮∮'), - (0x2230, 'M', '∮∮∮'), - (0x2231, 'V'), - (0x2260, '3'), - (0x2261, 'V'), - (0x226E, '3'), - (0x2270, 'V'), - (0x2329, 'M', '〈'), - (0x232A, 'M', '〉'), - (0x232B, 'V'), - (0x2427, 'X'), - (0x2440, 'V'), - (0x244B, 'X'), - (0x2460, 'M', '1'), - (0x2461, 'M', '2'), - (0x2462, 'M', '3'), - (0x2463, 'M', '4'), - (0x2464, 'M', '5'), - (0x2465, 'M', '6'), - (0x2466, 'M', '7'), - (0x2467, 'M', '8'), - (0x2468, 'M', '9'), - (0x2469, 'M', '10'), - (0x246A, 'M', '11'), - (0x246B, 'M', '12'), - (0x246C, 'M', '13'), - (0x246D, 'M', '14'), - (0x246E, 'M', '15'), - (0x246F, 'M', '16'), - (0x2470, 'M', '17'), - (0x2471, 'M', '18'), - (0x2472, 'M', '19'), - (0x2473, 'M', '20'), - (0x2474, '3', '(1)'), - (0x2475, '3', '(2)'), - (0x2476, '3', '(3)'), - (0x2477, '3', '(4)'), - (0x2478, '3', '(5)'), - (0x2479, '3', '(6)'), - (0x247A, '3', '(7)'), - (0x247B, '3', '(8)'), - (0x247C, '3', '(9)'), - (0x247D, '3', '(10)'), - (0x247E, '3', '(11)'), - (0x247F, '3', '(12)'), - (0x2480, '3', '(13)'), - (0x2481, '3', '(14)'), - (0x2482, '3', '(15)'), - (0x2483, '3', '(16)'), - (0x2484, '3', '(17)'), - (0x2485, '3', '(18)'), - (0x2486, '3', '(19)'), - (0x2487, '3', '(20)'), - (0x2488, 'X'), - (0x249C, '3', '(a)'), - (0x249D, '3', '(b)'), - (0x249E, '3', '(c)'), - (0x249F, '3', '(d)'), - (0x24A0, '3', '(e)'), - (0x24A1, '3', '(f)'), - (0x24A2, '3', '(g)'), - (0x24A3, '3', '(h)'), - (0x24A4, '3', '(i)'), - (0x24A5, '3', '(j)'), - (0x24A6, '3', '(k)'), - (0x24A7, '3', '(l)'), - (0x24A8, '3', '(m)'), - (0x24A9, '3', '(n)'), - (0x24AA, '3', '(o)'), - (0x24AB, '3', '(p)'), - (0x24AC, '3', '(q)'), - (0x24AD, '3', '(r)'), - (0x24AE, '3', '(s)'), - (0x24AF, '3', '(t)'), - (0x24B0, '3', '(u)'), - (0x24B1, '3', '(v)'), - (0x24B2, '3', '(w)'), - (0x24B3, '3', '(x)'), - (0x24B4, '3', '(y)'), - (0x24B5, '3', '(z)'), - (0x24B6, 'M', 'a'), - (0x24B7, 'M', 'b'), - (0x24B8, 'M', 'c'), - (0x24B9, 'M', 'd'), - (0x24BA, 'M', 'e'), - (0x24BB, 'M', 'f'), - (0x24BC, 'M', 'g'), - ] - -def _seg_24() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x24BD, 'M', 'h'), - (0x24BE, 'M', 'i'), - (0x24BF, 'M', 'j'), - (0x24C0, 'M', 'k'), - (0x24C1, 'M', 'l'), - (0x24C2, 'M', 'm'), - (0x24C3, 'M', 'n'), - (0x24C4, 'M', 'o'), - (0x24C5, 'M', 'p'), - (0x24C6, 'M', 'q'), - (0x24C7, 'M', 'r'), - (0x24C8, 'M', 's'), - (0x24C9, 'M', 't'), - (0x24CA, 'M', 'u'), - (0x24CB, 'M', 'v'), - (0x24CC, 'M', 'w'), - (0x24CD, 'M', 'x'), - (0x24CE, 'M', 'y'), - (0x24CF, 'M', 'z'), - (0x24D0, 'M', 'a'), - (0x24D1, 'M', 'b'), - (0x24D2, 'M', 'c'), - (0x24D3, 'M', 'd'), - (0x24D4, 'M', 'e'), - (0x24D5, 'M', 'f'), - (0x24D6, 'M', 'g'), - (0x24D7, 'M', 'h'), - (0x24D8, 'M', 'i'), - (0x24D9, 'M', 'j'), - (0x24DA, 'M', 'k'), - (0x24DB, 'M', 'l'), - (0x24DC, 'M', 'm'), - (0x24DD, 'M', 'n'), - (0x24DE, 'M', 'o'), - (0x24DF, 'M', 'p'), - (0x24E0, 'M', 'q'), - (0x24E1, 'M', 'r'), - (0x24E2, 'M', 's'), - (0x24E3, 'M', 't'), - (0x24E4, 'M', 'u'), - (0x24E5, 'M', 'v'), - (0x24E6, 'M', 'w'), - (0x24E7, 'M', 'x'), - (0x24E8, 'M', 'y'), - (0x24E9, 'M', 'z'), - (0x24EA, 'M', '0'), - (0x24EB, 'V'), - (0x2A0C, 'M', '∫∫∫∫'), - (0x2A0D, 'V'), - (0x2A74, '3', '::='), - (0x2A75, '3', '=='), - (0x2A76, '3', '==='), - (0x2A77, 'V'), - (0x2ADC, 'M', '⫝̸'), - (0x2ADD, 'V'), - (0x2B74, 'X'), - (0x2B76, 'V'), - (0x2B96, 'X'), - (0x2B97, 'V'), - (0x2C00, 'M', 'ⰰ'), - (0x2C01, 'M', 'ⰱ'), - (0x2C02, 'M', 'ⰲ'), - (0x2C03, 'M', 'ⰳ'), - (0x2C04, 'M', 'ⰴ'), - (0x2C05, 'M', 'ⰵ'), - (0x2C06, 'M', 'ⰶ'), - (0x2C07, 'M', 'ⰷ'), - (0x2C08, 'M', 'ⰸ'), - (0x2C09, 'M', 'ⰹ'), - (0x2C0A, 'M', 'ⰺ'), - (0x2C0B, 'M', 'ⰻ'), - (0x2C0C, 'M', 'ⰼ'), - (0x2C0D, 'M', 'ⰽ'), - (0x2C0E, 'M', 'ⰾ'), - (0x2C0F, 'M', 'ⰿ'), - (0x2C10, 'M', 'ⱀ'), - (0x2C11, 'M', 'ⱁ'), - (0x2C12, 'M', 'ⱂ'), - (0x2C13, 'M', 'ⱃ'), - (0x2C14, 'M', 'ⱄ'), - (0x2C15, 'M', 'ⱅ'), - (0x2C16, 'M', 'ⱆ'), - (0x2C17, 'M', 'ⱇ'), - (0x2C18, 'M', 'ⱈ'), - (0x2C19, 'M', 'ⱉ'), - (0x2C1A, 'M', 'ⱊ'), - (0x2C1B, 'M', 'ⱋ'), - (0x2C1C, 'M', 'ⱌ'), - (0x2C1D, 'M', 'ⱍ'), - (0x2C1E, 'M', 'ⱎ'), - (0x2C1F, 'M', 'ⱏ'), - (0x2C20, 'M', 'ⱐ'), - (0x2C21, 'M', 'ⱑ'), - (0x2C22, 'M', 'ⱒ'), - (0x2C23, 'M', 'ⱓ'), - (0x2C24, 'M', 'ⱔ'), - (0x2C25, 'M', 'ⱕ'), - (0x2C26, 'M', 'ⱖ'), - (0x2C27, 'M', 'ⱗ'), - (0x2C28, 'M', 'ⱘ'), - ] - -def _seg_25() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2C29, 'M', 'ⱙ'), - (0x2C2A, 'M', 'ⱚ'), - (0x2C2B, 'M', 'ⱛ'), - (0x2C2C, 'M', 'ⱜ'), - (0x2C2D, 'M', 'ⱝ'), - (0x2C2E, 'M', 'ⱞ'), - (0x2C2F, 'M', 'ⱟ'), - (0x2C30, 'V'), - (0x2C60, 'M', 'ⱡ'), - (0x2C61, 'V'), - (0x2C62, 'M', 'ɫ'), - (0x2C63, 'M', 'ᵽ'), - (0x2C64, 'M', 'ɽ'), - (0x2C65, 'V'), - (0x2C67, 'M', 'ⱨ'), - (0x2C68, 'V'), - (0x2C69, 'M', 'ⱪ'), - (0x2C6A, 'V'), - (0x2C6B, 'M', 'ⱬ'), - (0x2C6C, 'V'), - (0x2C6D, 'M', 'ɑ'), - (0x2C6E, 'M', 'ɱ'), - (0x2C6F, 'M', 'ɐ'), - (0x2C70, 'M', 'ɒ'), - (0x2C71, 'V'), - (0x2C72, 'M', 'ⱳ'), - (0x2C73, 'V'), - (0x2C75, 'M', 'ⱶ'), - (0x2C76, 'V'), - (0x2C7C, 'M', 'j'), - (0x2C7D, 'M', 'v'), - (0x2C7E, 'M', 'ȿ'), - (0x2C7F, 'M', 'ɀ'), - (0x2C80, 'M', 'ⲁ'), - (0x2C81, 'V'), - (0x2C82, 'M', 'ⲃ'), - (0x2C83, 'V'), - (0x2C84, 'M', 'ⲅ'), - (0x2C85, 'V'), - (0x2C86, 'M', 'ⲇ'), - (0x2C87, 'V'), - (0x2C88, 'M', 'ⲉ'), - (0x2C89, 'V'), - (0x2C8A, 'M', 'ⲋ'), - (0x2C8B, 'V'), - (0x2C8C, 'M', 'ⲍ'), - (0x2C8D, 'V'), - (0x2C8E, 'M', 'ⲏ'), - (0x2C8F, 'V'), - (0x2C90, 'M', 'ⲑ'), - (0x2C91, 'V'), - (0x2C92, 'M', 'ⲓ'), - (0x2C93, 'V'), - (0x2C94, 'M', 'ⲕ'), - (0x2C95, 'V'), - (0x2C96, 'M', 'ⲗ'), - (0x2C97, 'V'), - (0x2C98, 'M', 'ⲙ'), - (0x2C99, 'V'), - (0x2C9A, 'M', 'ⲛ'), - (0x2C9B, 'V'), - (0x2C9C, 'M', 'ⲝ'), - (0x2C9D, 'V'), - (0x2C9E, 'M', 'ⲟ'), - (0x2C9F, 'V'), - (0x2CA0, 'M', 'ⲡ'), - (0x2CA1, 'V'), - (0x2CA2, 'M', 'ⲣ'), - (0x2CA3, 'V'), - (0x2CA4, 'M', 'ⲥ'), - (0x2CA5, 'V'), - (0x2CA6, 'M', 'ⲧ'), - (0x2CA7, 'V'), - (0x2CA8, 'M', 'ⲩ'), - (0x2CA9, 'V'), - (0x2CAA, 'M', 'ⲫ'), - (0x2CAB, 'V'), - (0x2CAC, 'M', 'ⲭ'), - (0x2CAD, 'V'), - (0x2CAE, 'M', 'ⲯ'), - (0x2CAF, 'V'), - (0x2CB0, 'M', 'ⲱ'), - (0x2CB1, 'V'), - (0x2CB2, 'M', 'ⲳ'), - (0x2CB3, 'V'), - (0x2CB4, 'M', 'ⲵ'), - (0x2CB5, 'V'), - (0x2CB6, 'M', 'ⲷ'), - (0x2CB7, 'V'), - (0x2CB8, 'M', 'ⲹ'), - (0x2CB9, 'V'), - (0x2CBA, 'M', 'ⲻ'), - (0x2CBB, 'V'), - (0x2CBC, 'M', 'ⲽ'), - (0x2CBD, 'V'), - (0x2CBE, 'M', 'ⲿ'), - (0x2CBF, 'V'), - (0x2CC0, 'M', 'ⳁ'), - (0x2CC1, 'V'), - (0x2CC2, 'M', 'ⳃ'), - ] - -def _seg_26() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2CC3, 'V'), - (0x2CC4, 'M', 'ⳅ'), - (0x2CC5, 'V'), - (0x2CC6, 'M', 'ⳇ'), - (0x2CC7, 'V'), - (0x2CC8, 'M', 'ⳉ'), - (0x2CC9, 'V'), - (0x2CCA, 'M', 'ⳋ'), - (0x2CCB, 'V'), - (0x2CCC, 'M', 'ⳍ'), - (0x2CCD, 'V'), - (0x2CCE, 'M', 'ⳏ'), - (0x2CCF, 'V'), - (0x2CD0, 'M', 'ⳑ'), - (0x2CD1, 'V'), - (0x2CD2, 'M', 'ⳓ'), - (0x2CD3, 'V'), - (0x2CD4, 'M', 'ⳕ'), - (0x2CD5, 'V'), - (0x2CD6, 'M', 'ⳗ'), - (0x2CD7, 'V'), - (0x2CD8, 'M', 'ⳙ'), - (0x2CD9, 'V'), - (0x2CDA, 'M', 'ⳛ'), - (0x2CDB, 'V'), - (0x2CDC, 'M', 'ⳝ'), - (0x2CDD, 'V'), - (0x2CDE, 'M', 'ⳟ'), - (0x2CDF, 'V'), - (0x2CE0, 'M', 'ⳡ'), - (0x2CE1, 'V'), - (0x2CE2, 'M', 'ⳣ'), - (0x2CE3, 'V'), - (0x2CEB, 'M', 'ⳬ'), - (0x2CEC, 'V'), - (0x2CED, 'M', 'ⳮ'), - (0x2CEE, 'V'), - (0x2CF2, 'M', 'ⳳ'), - (0x2CF3, 'V'), - (0x2CF4, 'X'), - (0x2CF9, 'V'), - (0x2D26, 'X'), - (0x2D27, 'V'), - (0x2D28, 'X'), - (0x2D2D, 'V'), - (0x2D2E, 'X'), - (0x2D30, 'V'), - (0x2D68, 'X'), - (0x2D6F, 'M', 'ⵡ'), - (0x2D70, 'V'), - (0x2D71, 'X'), - (0x2D7F, 'V'), - (0x2D97, 'X'), - (0x2DA0, 'V'), - (0x2DA7, 'X'), - (0x2DA8, 'V'), - (0x2DAF, 'X'), - (0x2DB0, 'V'), - (0x2DB7, 'X'), - (0x2DB8, 'V'), - (0x2DBF, 'X'), - (0x2DC0, 'V'), - (0x2DC7, 'X'), - (0x2DC8, 'V'), - (0x2DCF, 'X'), - (0x2DD0, 'V'), - (0x2DD7, 'X'), - (0x2DD8, 'V'), - (0x2DDF, 'X'), - (0x2DE0, 'V'), - (0x2E5E, 'X'), - (0x2E80, 'V'), - (0x2E9A, 'X'), - (0x2E9B, 'V'), - (0x2E9F, 'M', '母'), - (0x2EA0, 'V'), - (0x2EF3, 'M', '龟'), - (0x2EF4, 'X'), - (0x2F00, 'M', '一'), - (0x2F01, 'M', '丨'), - (0x2F02, 'M', '丶'), - (0x2F03, 'M', '丿'), - (0x2F04, 'M', '乙'), - (0x2F05, 'M', '亅'), - (0x2F06, 'M', '二'), - (0x2F07, 'M', '亠'), - (0x2F08, 'M', '人'), - (0x2F09, 'M', '儿'), - (0x2F0A, 'M', '入'), - (0x2F0B, 'M', '八'), - (0x2F0C, 'M', '冂'), - (0x2F0D, 'M', '冖'), - (0x2F0E, 'M', '冫'), - (0x2F0F, 'M', '几'), - (0x2F10, 'M', '凵'), - (0x2F11, 'M', '刀'), - (0x2F12, 'M', '力'), - (0x2F13, 'M', '勹'), - (0x2F14, 'M', '匕'), - (0x2F15, 'M', '匚'), - ] - -def _seg_27() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F16, 'M', '匸'), - (0x2F17, 'M', '十'), - (0x2F18, 'M', '卜'), - (0x2F19, 'M', '卩'), - (0x2F1A, 'M', '厂'), - (0x2F1B, 'M', '厶'), - (0x2F1C, 'M', '又'), - (0x2F1D, 'M', '口'), - (0x2F1E, 'M', '囗'), - (0x2F1F, 'M', '土'), - (0x2F20, 'M', '士'), - (0x2F21, 'M', '夂'), - (0x2F22, 'M', '夊'), - (0x2F23, 'M', '夕'), - (0x2F24, 'M', '大'), - (0x2F25, 'M', '女'), - (0x2F26, 'M', '子'), - (0x2F27, 'M', '宀'), - (0x2F28, 'M', '寸'), - (0x2F29, 'M', '小'), - (0x2F2A, 'M', '尢'), - (0x2F2B, 'M', '尸'), - (0x2F2C, 'M', '屮'), - (0x2F2D, 'M', '山'), - (0x2F2E, 'M', '巛'), - (0x2F2F, 'M', '工'), - (0x2F30, 'M', '己'), - (0x2F31, 'M', '巾'), - (0x2F32, 'M', '干'), - (0x2F33, 'M', '幺'), - (0x2F34, 'M', '广'), - (0x2F35, 'M', '廴'), - (0x2F36, 'M', '廾'), - (0x2F37, 'M', '弋'), - (0x2F38, 'M', '弓'), - (0x2F39, 'M', '彐'), - (0x2F3A, 'M', '彡'), - (0x2F3B, 'M', '彳'), - (0x2F3C, 'M', '心'), - (0x2F3D, 'M', '戈'), - (0x2F3E, 'M', '戶'), - (0x2F3F, 'M', '手'), - (0x2F40, 'M', '支'), - (0x2F41, 'M', '攴'), - (0x2F42, 'M', '文'), - (0x2F43, 'M', '斗'), - (0x2F44, 'M', '斤'), - (0x2F45, 'M', '方'), - (0x2F46, 'M', '无'), - (0x2F47, 'M', '日'), - (0x2F48, 'M', '曰'), - (0x2F49, 'M', '月'), - (0x2F4A, 'M', '木'), - (0x2F4B, 'M', '欠'), - (0x2F4C, 'M', '止'), - (0x2F4D, 'M', '歹'), - (0x2F4E, 'M', '殳'), - (0x2F4F, 'M', '毋'), - (0x2F50, 'M', '比'), - (0x2F51, 'M', '毛'), - (0x2F52, 'M', '氏'), - (0x2F53, 'M', '气'), - (0x2F54, 'M', '水'), - (0x2F55, 'M', '火'), - (0x2F56, 'M', '爪'), - (0x2F57, 'M', '父'), - (0x2F58, 'M', '爻'), - (0x2F59, 'M', '爿'), - (0x2F5A, 'M', '片'), - (0x2F5B, 'M', '牙'), - (0x2F5C, 'M', '牛'), - (0x2F5D, 'M', '犬'), - (0x2F5E, 'M', '玄'), - (0x2F5F, 'M', '玉'), - (0x2F60, 'M', '瓜'), - (0x2F61, 'M', '瓦'), - (0x2F62, 'M', '甘'), - (0x2F63, 'M', '生'), - (0x2F64, 'M', '用'), - (0x2F65, 'M', '田'), - (0x2F66, 'M', '疋'), - (0x2F67, 'M', '疒'), - (0x2F68, 'M', '癶'), - (0x2F69, 'M', '白'), - (0x2F6A, 'M', '皮'), - (0x2F6B, 'M', '皿'), - (0x2F6C, 'M', '目'), - (0x2F6D, 'M', '矛'), - (0x2F6E, 'M', '矢'), - (0x2F6F, 'M', '石'), - (0x2F70, 'M', '示'), - (0x2F71, 'M', '禸'), - (0x2F72, 'M', '禾'), - (0x2F73, 'M', '穴'), - (0x2F74, 'M', '立'), - (0x2F75, 'M', '竹'), - (0x2F76, 'M', '米'), - (0x2F77, 'M', '糸'), - (0x2F78, 'M', '缶'), - (0x2F79, 'M', '网'), - ] - -def _seg_28() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F7A, 'M', '羊'), - (0x2F7B, 'M', '羽'), - (0x2F7C, 'M', '老'), - (0x2F7D, 'M', '而'), - (0x2F7E, 'M', '耒'), - (0x2F7F, 'M', '耳'), - (0x2F80, 'M', '聿'), - (0x2F81, 'M', '肉'), - (0x2F82, 'M', '臣'), - (0x2F83, 'M', '自'), - (0x2F84, 'M', '至'), - (0x2F85, 'M', '臼'), - (0x2F86, 'M', '舌'), - (0x2F87, 'M', '舛'), - (0x2F88, 'M', '舟'), - (0x2F89, 'M', '艮'), - (0x2F8A, 'M', '色'), - (0x2F8B, 'M', '艸'), - (0x2F8C, 'M', '虍'), - (0x2F8D, 'M', '虫'), - (0x2F8E, 'M', '血'), - (0x2F8F, 'M', '行'), - (0x2F90, 'M', '衣'), - (0x2F91, 'M', '襾'), - (0x2F92, 'M', '見'), - (0x2F93, 'M', '角'), - (0x2F94, 'M', '言'), - (0x2F95, 'M', '谷'), - (0x2F96, 'M', '豆'), - (0x2F97, 'M', '豕'), - (0x2F98, 'M', '豸'), - (0x2F99, 'M', '貝'), - (0x2F9A, 'M', '赤'), - (0x2F9B, 'M', '走'), - (0x2F9C, 'M', '足'), - (0x2F9D, 'M', '身'), - (0x2F9E, 'M', '車'), - (0x2F9F, 'M', '辛'), - (0x2FA0, 'M', '辰'), - (0x2FA1, 'M', '辵'), - (0x2FA2, 'M', '邑'), - (0x2FA3, 'M', '酉'), - (0x2FA4, 'M', '釆'), - (0x2FA5, 'M', '里'), - (0x2FA6, 'M', '金'), - (0x2FA7, 'M', '長'), - (0x2FA8, 'M', '門'), - (0x2FA9, 'M', '阜'), - (0x2FAA, 'M', '隶'), - (0x2FAB, 'M', '隹'), - (0x2FAC, 'M', '雨'), - (0x2FAD, 'M', '靑'), - (0x2FAE, 'M', '非'), - (0x2FAF, 'M', '面'), - (0x2FB0, 'M', '革'), - (0x2FB1, 'M', '韋'), - (0x2FB2, 'M', '韭'), - (0x2FB3, 'M', '音'), - (0x2FB4, 'M', '頁'), - (0x2FB5, 'M', '風'), - (0x2FB6, 'M', '飛'), - (0x2FB7, 'M', '食'), - (0x2FB8, 'M', '首'), - (0x2FB9, 'M', '香'), - (0x2FBA, 'M', '馬'), - (0x2FBB, 'M', '骨'), - (0x2FBC, 'M', '高'), - (0x2FBD, 'M', '髟'), - (0x2FBE, 'M', '鬥'), - (0x2FBF, 'M', '鬯'), - (0x2FC0, 'M', '鬲'), - (0x2FC1, 'M', '鬼'), - (0x2FC2, 'M', '魚'), - (0x2FC3, 'M', '鳥'), - (0x2FC4, 'M', '鹵'), - (0x2FC5, 'M', '鹿'), - (0x2FC6, 'M', '麥'), - (0x2FC7, 'M', '麻'), - (0x2FC8, 'M', '黃'), - (0x2FC9, 'M', '黍'), - (0x2FCA, 'M', '黑'), - (0x2FCB, 'M', '黹'), - (0x2FCC, 'M', '黽'), - (0x2FCD, 'M', '鼎'), - (0x2FCE, 'M', '鼓'), - (0x2FCF, 'M', '鼠'), - (0x2FD0, 'M', '鼻'), - (0x2FD1, 'M', '齊'), - (0x2FD2, 'M', '齒'), - (0x2FD3, 'M', '龍'), - (0x2FD4, 'M', '龜'), - (0x2FD5, 'M', '龠'), - (0x2FD6, 'X'), - (0x3000, '3', ' '), - (0x3001, 'V'), - (0x3002, 'M', '.'), - (0x3003, 'V'), - (0x3036, 'M', '〒'), - (0x3037, 'V'), - (0x3038, 'M', '十'), - ] - -def _seg_29() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x3039, 'M', '卄'), - (0x303A, 'M', '卅'), - (0x303B, 'V'), - (0x3040, 'X'), - (0x3041, 'V'), - (0x3097, 'X'), - (0x3099, 'V'), - (0x309B, '3', ' ゙'), - (0x309C, '3', ' ゚'), - (0x309D, 'V'), - (0x309F, 'M', 'より'), - (0x30A0, 'V'), - (0x30FF, 'M', 'コト'), - (0x3100, 'X'), - (0x3105, 'V'), - (0x3130, 'X'), - (0x3131, 'M', 'ᄀ'), - (0x3132, 'M', 'ᄁ'), - (0x3133, 'M', 'ᆪ'), - (0x3134, 'M', 'ᄂ'), - (0x3135, 'M', 'ᆬ'), - (0x3136, 'M', 'ᆭ'), - (0x3137, 'M', 'ᄃ'), - (0x3138, 'M', 'ᄄ'), - (0x3139, 'M', 'ᄅ'), - (0x313A, 'M', 'ᆰ'), - (0x313B, 'M', 'ᆱ'), - (0x313C, 'M', 'ᆲ'), - (0x313D, 'M', 'ᆳ'), - (0x313E, 'M', 'ᆴ'), - (0x313F, 'M', 'ᆵ'), - (0x3140, 'M', 'ᄚ'), - (0x3141, 'M', 'ᄆ'), - (0x3142, 'M', 'ᄇ'), - (0x3143, 'M', 'ᄈ'), - (0x3144, 'M', 'ᄡ'), - (0x3145, 'M', 'ᄉ'), - (0x3146, 'M', 'ᄊ'), - (0x3147, 'M', 'ᄋ'), - (0x3148, 'M', 'ᄌ'), - (0x3149, 'M', 'ᄍ'), - (0x314A, 'M', 'ᄎ'), - (0x314B, 'M', 'ᄏ'), - (0x314C, 'M', 'ᄐ'), - (0x314D, 'M', 'ᄑ'), - (0x314E, 'M', 'ᄒ'), - (0x314F, 'M', 'ᅡ'), - (0x3150, 'M', 'ᅢ'), - (0x3151, 'M', 'ᅣ'), - (0x3152, 'M', 'ᅤ'), - (0x3153, 'M', 'ᅥ'), - (0x3154, 'M', 'ᅦ'), - (0x3155, 'M', 'ᅧ'), - (0x3156, 'M', 'ᅨ'), - (0x3157, 'M', 'ᅩ'), - (0x3158, 'M', 'ᅪ'), - (0x3159, 'M', 'ᅫ'), - (0x315A, 'M', 'ᅬ'), - (0x315B, 'M', 'ᅭ'), - (0x315C, 'M', 'ᅮ'), - (0x315D, 'M', 'ᅯ'), - (0x315E, 'M', 'ᅰ'), - (0x315F, 'M', 'ᅱ'), - (0x3160, 'M', 'ᅲ'), - (0x3161, 'M', 'ᅳ'), - (0x3162, 'M', 'ᅴ'), - (0x3163, 'M', 'ᅵ'), - (0x3164, 'X'), - (0x3165, 'M', 'ᄔ'), - (0x3166, 'M', 'ᄕ'), - (0x3167, 'M', 'ᇇ'), - (0x3168, 'M', 'ᇈ'), - (0x3169, 'M', 'ᇌ'), - (0x316A, 'M', 'ᇎ'), - (0x316B, 'M', 'ᇓ'), - (0x316C, 'M', 'ᇗ'), - (0x316D, 'M', 'ᇙ'), - (0x316E, 'M', 'ᄜ'), - (0x316F, 'M', 'ᇝ'), - (0x3170, 'M', 'ᇟ'), - (0x3171, 'M', 'ᄝ'), - (0x3172, 'M', 'ᄞ'), - (0x3173, 'M', 'ᄠ'), - (0x3174, 'M', 'ᄢ'), - (0x3175, 'M', 'ᄣ'), - (0x3176, 'M', 'ᄧ'), - (0x3177, 'M', 'ᄩ'), - (0x3178, 'M', 'ᄫ'), - (0x3179, 'M', 'ᄬ'), - (0x317A, 'M', 'ᄭ'), - (0x317B, 'M', 'ᄮ'), - (0x317C, 'M', 'ᄯ'), - (0x317D, 'M', 'ᄲ'), - (0x317E, 'M', 'ᄶ'), - (0x317F, 'M', 'ᅀ'), - (0x3180, 'M', 'ᅇ'), - (0x3181, 'M', 'ᅌ'), - (0x3182, 'M', 'ᇱ'), - (0x3183, 'M', 'ᇲ'), - (0x3184, 'M', 'ᅗ'), - ] - -def _seg_30() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x3185, 'M', 'ᅘ'), - (0x3186, 'M', 'ᅙ'), - (0x3187, 'M', 'ᆄ'), - (0x3188, 'M', 'ᆅ'), - (0x3189, 'M', 'ᆈ'), - (0x318A, 'M', 'ᆑ'), - (0x318B, 'M', 'ᆒ'), - (0x318C, 'M', 'ᆔ'), - (0x318D, 'M', 'ᆞ'), - (0x318E, 'M', 'ᆡ'), - (0x318F, 'X'), - (0x3190, 'V'), - (0x3192, 'M', '一'), - (0x3193, 'M', '二'), - (0x3194, 'M', '三'), - (0x3195, 'M', '四'), - (0x3196, 'M', '上'), - (0x3197, 'M', '中'), - (0x3198, 'M', '下'), - (0x3199, 'M', '甲'), - (0x319A, 'M', '乙'), - (0x319B, 'M', '丙'), - (0x319C, 'M', '丁'), - (0x319D, 'M', '天'), - (0x319E, 'M', '地'), - (0x319F, 'M', '人'), - (0x31A0, 'V'), - (0x31E4, 'X'), - (0x31F0, 'V'), - (0x3200, '3', '(ᄀ)'), - (0x3201, '3', '(ᄂ)'), - (0x3202, '3', '(ᄃ)'), - (0x3203, '3', '(ᄅ)'), - (0x3204, '3', '(ᄆ)'), - (0x3205, '3', '(ᄇ)'), - (0x3206, '3', '(ᄉ)'), - (0x3207, '3', '(ᄋ)'), - (0x3208, '3', '(ᄌ)'), - (0x3209, '3', '(ᄎ)'), - (0x320A, '3', '(ᄏ)'), - (0x320B, '3', '(ᄐ)'), - (0x320C, '3', '(ᄑ)'), - (0x320D, '3', '(ᄒ)'), - (0x320E, '3', '(가)'), - (0x320F, '3', '(나)'), - (0x3210, '3', '(다)'), - (0x3211, '3', '(라)'), - (0x3212, '3', '(마)'), - (0x3213, '3', '(바)'), - (0x3214, '3', '(사)'), - (0x3215, '3', '(아)'), - (0x3216, '3', '(자)'), - (0x3217, '3', '(차)'), - (0x3218, '3', '(카)'), - (0x3219, '3', '(타)'), - (0x321A, '3', '(파)'), - (0x321B, '3', '(하)'), - (0x321C, '3', '(주)'), - (0x321D, '3', '(오전)'), - (0x321E, '3', '(오후)'), - (0x321F, 'X'), - (0x3220, '3', '(一)'), - (0x3221, '3', '(二)'), - (0x3222, '3', '(三)'), - (0x3223, '3', '(四)'), - (0x3224, '3', '(五)'), - (0x3225, '3', '(六)'), - (0x3226, '3', '(七)'), - (0x3227, '3', '(八)'), - (0x3228, '3', '(九)'), - (0x3229, '3', '(十)'), - (0x322A, '3', '(月)'), - (0x322B, '3', '(火)'), - (0x322C, '3', '(水)'), - (0x322D, '3', '(木)'), - (0x322E, '3', '(金)'), - (0x322F, '3', '(土)'), - (0x3230, '3', '(日)'), - (0x3231, '3', '(株)'), - (0x3232, '3', '(有)'), - (0x3233, '3', '(社)'), - (0x3234, '3', '(名)'), - (0x3235, '3', '(特)'), - (0x3236, '3', '(財)'), - (0x3237, '3', '(祝)'), - (0x3238, '3', '(労)'), - (0x3239, '3', '(代)'), - (0x323A, '3', '(呼)'), - (0x323B, '3', '(学)'), - (0x323C, '3', '(監)'), - (0x323D, '3', '(企)'), - (0x323E, '3', '(資)'), - (0x323F, '3', '(協)'), - (0x3240, '3', '(祭)'), - (0x3241, '3', '(休)'), - (0x3242, '3', '(自)'), - (0x3243, '3', '(至)'), - (0x3244, 'M', '問'), - (0x3245, 'M', '幼'), - (0x3246, 'M', '文'), - ] - -def _seg_31() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x3247, 'M', '箏'), - (0x3248, 'V'), - (0x3250, 'M', 'pte'), - (0x3251, 'M', '21'), - (0x3252, 'M', '22'), - (0x3253, 'M', '23'), - (0x3254, 'M', '24'), - (0x3255, 'M', '25'), - (0x3256, 'M', '26'), - (0x3257, 'M', '27'), - (0x3258, 'M', '28'), - (0x3259, 'M', '29'), - (0x325A, 'M', '30'), - (0x325B, 'M', '31'), - (0x325C, 'M', '32'), - (0x325D, 'M', '33'), - (0x325E, 'M', '34'), - (0x325F, 'M', '35'), - (0x3260, 'M', 'ᄀ'), - (0x3261, 'M', 'ᄂ'), - (0x3262, 'M', 'ᄃ'), - (0x3263, 'M', 'ᄅ'), - (0x3264, 'M', 'ᄆ'), - (0x3265, 'M', 'ᄇ'), - (0x3266, 'M', 'ᄉ'), - (0x3267, 'M', 'ᄋ'), - (0x3268, 'M', 'ᄌ'), - (0x3269, 'M', 'ᄎ'), - (0x326A, 'M', 'ᄏ'), - (0x326B, 'M', 'ᄐ'), - (0x326C, 'M', 'ᄑ'), - (0x326D, 'M', 'ᄒ'), - (0x326E, 'M', '가'), - (0x326F, 'M', '나'), - (0x3270, 'M', '다'), - (0x3271, 'M', '라'), - (0x3272, 'M', '마'), - (0x3273, 'M', '바'), - (0x3274, 'M', '사'), - (0x3275, 'M', '아'), - (0x3276, 'M', '자'), - (0x3277, 'M', '차'), - (0x3278, 'M', '카'), - (0x3279, 'M', '타'), - (0x327A, 'M', '파'), - (0x327B, 'M', '하'), - (0x327C, 'M', '참고'), - (0x327D, 'M', '주의'), - (0x327E, 'M', '우'), - (0x327F, 'V'), - (0x3280, 'M', '一'), - (0x3281, 'M', '二'), - (0x3282, 'M', '三'), - (0x3283, 'M', '四'), - (0x3284, 'M', '五'), - (0x3285, 'M', '六'), - (0x3286, 'M', '七'), - (0x3287, 'M', '八'), - (0x3288, 'M', '九'), - (0x3289, 'M', '十'), - (0x328A, 'M', '月'), - (0x328B, 'M', '火'), - (0x328C, 'M', '水'), - (0x328D, 'M', '木'), - (0x328E, 'M', '金'), - (0x328F, 'M', '土'), - (0x3290, 'M', '日'), - (0x3291, 'M', '株'), - (0x3292, 'M', '有'), - (0x3293, 'M', '社'), - (0x3294, 'M', '名'), - (0x3295, 'M', '特'), - (0x3296, 'M', '財'), - (0x3297, 'M', '祝'), - (0x3298, 'M', '労'), - (0x3299, 'M', '秘'), - (0x329A, 'M', '男'), - (0x329B, 'M', '女'), - (0x329C, 'M', '適'), - (0x329D, 'M', '優'), - (0x329E, 'M', '印'), - (0x329F, 'M', '注'), - (0x32A0, 'M', '項'), - (0x32A1, 'M', '休'), - (0x32A2, 'M', '写'), - (0x32A3, 'M', '正'), - (0x32A4, 'M', '上'), - (0x32A5, 'M', '中'), - (0x32A6, 'M', '下'), - (0x32A7, 'M', '左'), - (0x32A8, 'M', '右'), - (0x32A9, 'M', '医'), - (0x32AA, 'M', '宗'), - (0x32AB, 'M', '学'), - (0x32AC, 'M', '監'), - (0x32AD, 'M', '企'), - (0x32AE, 'M', '資'), - (0x32AF, 'M', '協'), - (0x32B0, 'M', '夜'), - (0x32B1, 'M', '36'), - ] - -def _seg_32() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x32B2, 'M', '37'), - (0x32B3, 'M', '38'), - (0x32B4, 'M', '39'), - (0x32B5, 'M', '40'), - (0x32B6, 'M', '41'), - (0x32B7, 'M', '42'), - (0x32B8, 'M', '43'), - (0x32B9, 'M', '44'), - (0x32BA, 'M', '45'), - (0x32BB, 'M', '46'), - (0x32BC, 'M', '47'), - (0x32BD, 'M', '48'), - (0x32BE, 'M', '49'), - (0x32BF, 'M', '50'), - (0x32C0, 'M', '1月'), - (0x32C1, 'M', '2月'), - (0x32C2, 'M', '3月'), - (0x32C3, 'M', '4月'), - (0x32C4, 'M', '5月'), - (0x32C5, 'M', '6月'), - (0x32C6, 'M', '7月'), - (0x32C7, 'M', '8月'), - (0x32C8, 'M', '9月'), - (0x32C9, 'M', '10月'), - (0x32CA, 'M', '11月'), - (0x32CB, 'M', '12月'), - (0x32CC, 'M', 'hg'), - (0x32CD, 'M', 'erg'), - (0x32CE, 'M', 'ev'), - (0x32CF, 'M', 'ltd'), - (0x32D0, 'M', 'ア'), - (0x32D1, 'M', 'イ'), - (0x32D2, 'M', 'ウ'), - (0x32D3, 'M', 'エ'), - (0x32D4, 'M', 'オ'), - (0x32D5, 'M', 'カ'), - (0x32D6, 'M', 'キ'), - (0x32D7, 'M', 'ク'), - (0x32D8, 'M', 'ケ'), - (0x32D9, 'M', 'コ'), - (0x32DA, 'M', 'サ'), - (0x32DB, 'M', 'シ'), - (0x32DC, 'M', 'ス'), - (0x32DD, 'M', 'セ'), - (0x32DE, 'M', 'ソ'), - (0x32DF, 'M', 'タ'), - (0x32E0, 'M', 'チ'), - (0x32E1, 'M', 'ツ'), - (0x32E2, 'M', 'テ'), - (0x32E3, 'M', 'ト'), - (0x32E4, 'M', 'ナ'), - (0x32E5, 'M', 'ニ'), - (0x32E6, 'M', 'ヌ'), - (0x32E7, 'M', 'ネ'), - (0x32E8, 'M', 'ノ'), - (0x32E9, 'M', 'ハ'), - (0x32EA, 'M', 'ヒ'), - (0x32EB, 'M', 'フ'), - (0x32EC, 'M', 'ヘ'), - (0x32ED, 'M', 'ホ'), - (0x32EE, 'M', 'マ'), - (0x32EF, 'M', 'ミ'), - (0x32F0, 'M', 'ム'), - (0x32F1, 'M', 'メ'), - (0x32F2, 'M', 'モ'), - (0x32F3, 'M', 'ヤ'), - (0x32F4, 'M', 'ユ'), - (0x32F5, 'M', 'ヨ'), - (0x32F6, 'M', 'ラ'), - (0x32F7, 'M', 'リ'), - (0x32F8, 'M', 'ル'), - (0x32F9, 'M', 'レ'), - (0x32FA, 'M', 'ロ'), - (0x32FB, 'M', 'ワ'), - (0x32FC, 'M', 'ヰ'), - (0x32FD, 'M', 'ヱ'), - (0x32FE, 'M', 'ヲ'), - (0x32FF, 'M', '令和'), - (0x3300, 'M', 'アパート'), - (0x3301, 'M', 'アルファ'), - (0x3302, 'M', 'アンペア'), - (0x3303, 'M', 'アール'), - (0x3304, 'M', 'イニング'), - (0x3305, 'M', 'インチ'), - (0x3306, 'M', 'ウォン'), - (0x3307, 'M', 'エスクード'), - (0x3308, 'M', 'エーカー'), - (0x3309, 'M', 'オンス'), - (0x330A, 'M', 'オーム'), - (0x330B, 'M', 'カイリ'), - (0x330C, 'M', 'カラット'), - (0x330D, 'M', 'カロリー'), - (0x330E, 'M', 'ガロン'), - (0x330F, 'M', 'ガンマ'), - (0x3310, 'M', 'ギガ'), - (0x3311, 'M', 'ギニー'), - (0x3312, 'M', 'キュリー'), - (0x3313, 'M', 'ギルダー'), - (0x3314, 'M', 'キロ'), - (0x3315, 'M', 'キログラム'), - ] - -def _seg_33() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x3316, 'M', 'キロメートル'), - (0x3317, 'M', 'キロワット'), - (0x3318, 'M', 'グラム'), - (0x3319, 'M', 'グラムトン'), - (0x331A, 'M', 'クルゼイロ'), - (0x331B, 'M', 'クローネ'), - (0x331C, 'M', 'ケース'), - (0x331D, 'M', 'コルナ'), - (0x331E, 'M', 'コーポ'), - (0x331F, 'M', 'サイクル'), - (0x3320, 'M', 'サンチーム'), - (0x3321, 'M', 'シリング'), - (0x3322, 'M', 'センチ'), - (0x3323, 'M', 'セント'), - (0x3324, 'M', 'ダース'), - (0x3325, 'M', 'デシ'), - (0x3326, 'M', 'ドル'), - (0x3327, 'M', 'トン'), - (0x3328, 'M', 'ナノ'), - (0x3329, 'M', 'ノット'), - (0x332A, 'M', 'ハイツ'), - (0x332B, 'M', 'パーセント'), - (0x332C, 'M', 'パーツ'), - (0x332D, 'M', 'バーレル'), - (0x332E, 'M', 'ピアストル'), - (0x332F, 'M', 'ピクル'), - (0x3330, 'M', 'ピコ'), - (0x3331, 'M', 'ビル'), - (0x3332, 'M', 'ファラッド'), - (0x3333, 'M', 'フィート'), - (0x3334, 'M', 'ブッシェル'), - (0x3335, 'M', 'フラン'), - (0x3336, 'M', 'ヘクタール'), - (0x3337, 'M', 'ペソ'), - (0x3338, 'M', 'ペニヒ'), - (0x3339, 'M', 'ヘルツ'), - (0x333A, 'M', 'ペンス'), - (0x333B, 'M', 'ページ'), - (0x333C, 'M', 'ベータ'), - (0x333D, 'M', 'ポイント'), - (0x333E, 'M', 'ボルト'), - (0x333F, 'M', 'ホン'), - (0x3340, 'M', 'ポンド'), - (0x3341, 'M', 'ホール'), - (0x3342, 'M', 'ホーン'), - (0x3343, 'M', 'マイクロ'), - (0x3344, 'M', 'マイル'), - (0x3345, 'M', 'マッハ'), - (0x3346, 'M', 'マルク'), - (0x3347, 'M', 'マンション'), - (0x3348, 'M', 'ミクロン'), - (0x3349, 'M', 'ミリ'), - (0x334A, 'M', 'ミリバール'), - (0x334B, 'M', 'メガ'), - (0x334C, 'M', 'メガトン'), - (0x334D, 'M', 'メートル'), - (0x334E, 'M', 'ヤード'), - (0x334F, 'M', 'ヤール'), - (0x3350, 'M', 'ユアン'), - (0x3351, 'M', 'リットル'), - (0x3352, 'M', 'リラ'), - (0x3353, 'M', 'ルピー'), - (0x3354, 'M', 'ルーブル'), - (0x3355, 'M', 'レム'), - (0x3356, 'M', 'レントゲン'), - (0x3357, 'M', 'ワット'), - (0x3358, 'M', '0点'), - (0x3359, 'M', '1点'), - (0x335A, 'M', '2点'), - (0x335B, 'M', '3点'), - (0x335C, 'M', '4点'), - (0x335D, 'M', '5点'), - (0x335E, 'M', '6点'), - (0x335F, 'M', '7点'), - (0x3360, 'M', '8点'), - (0x3361, 'M', '9点'), - (0x3362, 'M', '10点'), - (0x3363, 'M', '11点'), - (0x3364, 'M', '12点'), - (0x3365, 'M', '13点'), - (0x3366, 'M', '14点'), - (0x3367, 'M', '15点'), - (0x3368, 'M', '16点'), - (0x3369, 'M', '17点'), - (0x336A, 'M', '18点'), - (0x336B, 'M', '19点'), - (0x336C, 'M', '20点'), - (0x336D, 'M', '21点'), - (0x336E, 'M', '22点'), - (0x336F, 'M', '23点'), - (0x3370, 'M', '24点'), - (0x3371, 'M', 'hpa'), - (0x3372, 'M', 'da'), - (0x3373, 'M', 'au'), - (0x3374, 'M', 'bar'), - (0x3375, 'M', 'ov'), - (0x3376, 'M', 'pc'), - (0x3377, 'M', 'dm'), - (0x3378, 'M', 'dm2'), - (0x3379, 'M', 'dm3'), - ] - -def _seg_34() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x337A, 'M', 'iu'), - (0x337B, 'M', '平成'), - (0x337C, 'M', '昭和'), - (0x337D, 'M', '大正'), - (0x337E, 'M', '明治'), - (0x337F, 'M', '株式会社'), - (0x3380, 'M', 'pa'), - (0x3381, 'M', 'na'), - (0x3382, 'M', 'μa'), - (0x3383, 'M', 'ma'), - (0x3384, 'M', 'ka'), - (0x3385, 'M', 'kb'), - (0x3386, 'M', 'mb'), - (0x3387, 'M', 'gb'), - (0x3388, 'M', 'cal'), - (0x3389, 'M', 'kcal'), - (0x338A, 'M', 'pf'), - (0x338B, 'M', 'nf'), - (0x338C, 'M', 'μf'), - (0x338D, 'M', 'μg'), - (0x338E, 'M', 'mg'), - (0x338F, 'M', 'kg'), - (0x3390, 'M', 'hz'), - (0x3391, 'M', 'khz'), - (0x3392, 'M', 'mhz'), - (0x3393, 'M', 'ghz'), - (0x3394, 'M', 'thz'), - (0x3395, 'M', 'μl'), - (0x3396, 'M', 'ml'), - (0x3397, 'M', 'dl'), - (0x3398, 'M', 'kl'), - (0x3399, 'M', 'fm'), - (0x339A, 'M', 'nm'), - (0x339B, 'M', 'μm'), - (0x339C, 'M', 'mm'), - (0x339D, 'M', 'cm'), - (0x339E, 'M', 'km'), - (0x339F, 'M', 'mm2'), - (0x33A0, 'M', 'cm2'), - (0x33A1, 'M', 'm2'), - (0x33A2, 'M', 'km2'), - (0x33A3, 'M', 'mm3'), - (0x33A4, 'M', 'cm3'), - (0x33A5, 'M', 'm3'), - (0x33A6, 'M', 'km3'), - (0x33A7, 'M', 'm∕s'), - (0x33A8, 'M', 'm∕s2'), - (0x33A9, 'M', 'pa'), - (0x33AA, 'M', 'kpa'), - (0x33AB, 'M', 'mpa'), - (0x33AC, 'M', 'gpa'), - (0x33AD, 'M', 'rad'), - (0x33AE, 'M', 'rad∕s'), - (0x33AF, 'M', 'rad∕s2'), - (0x33B0, 'M', 'ps'), - (0x33B1, 'M', 'ns'), - (0x33B2, 'M', 'μs'), - (0x33B3, 'M', 'ms'), - (0x33B4, 'M', 'pv'), - (0x33B5, 'M', 'nv'), - (0x33B6, 'M', 'μv'), - (0x33B7, 'M', 'mv'), - (0x33B8, 'M', 'kv'), - (0x33B9, 'M', 'mv'), - (0x33BA, 'M', 'pw'), - (0x33BB, 'M', 'nw'), - (0x33BC, 'M', 'μw'), - (0x33BD, 'M', 'mw'), - (0x33BE, 'M', 'kw'), - (0x33BF, 'M', 'mw'), - (0x33C0, 'M', 'kω'), - (0x33C1, 'M', 'mω'), - (0x33C2, 'X'), - (0x33C3, 'M', 'bq'), - (0x33C4, 'M', 'cc'), - (0x33C5, 'M', 'cd'), - (0x33C6, 'M', 'c∕kg'), - (0x33C7, 'X'), - (0x33C8, 'M', 'db'), - (0x33C9, 'M', 'gy'), - (0x33CA, 'M', 'ha'), - (0x33CB, 'M', 'hp'), - (0x33CC, 'M', 'in'), - (0x33CD, 'M', 'kk'), - (0x33CE, 'M', 'km'), - (0x33CF, 'M', 'kt'), - (0x33D0, 'M', 'lm'), - (0x33D1, 'M', 'ln'), - (0x33D2, 'M', 'log'), - (0x33D3, 'M', 'lx'), - (0x33D4, 'M', 'mb'), - (0x33D5, 'M', 'mil'), - (0x33D6, 'M', 'mol'), - (0x33D7, 'M', 'ph'), - (0x33D8, 'X'), - (0x33D9, 'M', 'ppm'), - (0x33DA, 'M', 'pr'), - (0x33DB, 'M', 'sr'), - (0x33DC, 'M', 'sv'), - (0x33DD, 'M', 'wb'), - ] - -def _seg_35() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x33DE, 'M', 'v∕m'), - (0x33DF, 'M', 'a∕m'), - (0x33E0, 'M', '1日'), - (0x33E1, 'M', '2日'), - (0x33E2, 'M', '3日'), - (0x33E3, 'M', '4日'), - (0x33E4, 'M', '5日'), - (0x33E5, 'M', '6日'), - (0x33E6, 'M', '7日'), - (0x33E7, 'M', '8日'), - (0x33E8, 'M', '9日'), - (0x33E9, 'M', '10日'), - (0x33EA, 'M', '11日'), - (0x33EB, 'M', '12日'), - (0x33EC, 'M', '13日'), - (0x33ED, 'M', '14日'), - (0x33EE, 'M', '15日'), - (0x33EF, 'M', '16日'), - (0x33F0, 'M', '17日'), - (0x33F1, 'M', '18日'), - (0x33F2, 'M', '19日'), - (0x33F3, 'M', '20日'), - (0x33F4, 'M', '21日'), - (0x33F5, 'M', '22日'), - (0x33F6, 'M', '23日'), - (0x33F7, 'M', '24日'), - (0x33F8, 'M', '25日'), - (0x33F9, 'M', '26日'), - (0x33FA, 'M', '27日'), - (0x33FB, 'M', '28日'), - (0x33FC, 'M', '29日'), - (0x33FD, 'M', '30日'), - (0x33FE, 'M', '31日'), - (0x33FF, 'M', 'gal'), - (0x3400, 'V'), - (0xA48D, 'X'), - (0xA490, 'V'), - (0xA4C7, 'X'), - (0xA4D0, 'V'), - (0xA62C, 'X'), - (0xA640, 'M', 'ꙁ'), - (0xA641, 'V'), - (0xA642, 'M', 'ꙃ'), - (0xA643, 'V'), - (0xA644, 'M', 'ꙅ'), - (0xA645, 'V'), - (0xA646, 'M', 'ꙇ'), - (0xA647, 'V'), - (0xA648, 'M', 'ꙉ'), - (0xA649, 'V'), - (0xA64A, 'M', 'ꙋ'), - (0xA64B, 'V'), - (0xA64C, 'M', 'ꙍ'), - (0xA64D, 'V'), - (0xA64E, 'M', 'ꙏ'), - (0xA64F, 'V'), - (0xA650, 'M', 'ꙑ'), - (0xA651, 'V'), - (0xA652, 'M', 'ꙓ'), - (0xA653, 'V'), - (0xA654, 'M', 'ꙕ'), - (0xA655, 'V'), - (0xA656, 'M', 'ꙗ'), - (0xA657, 'V'), - (0xA658, 'M', 'ꙙ'), - (0xA659, 'V'), - (0xA65A, 'M', 'ꙛ'), - (0xA65B, 'V'), - (0xA65C, 'M', 'ꙝ'), - (0xA65D, 'V'), - (0xA65E, 'M', 'ꙟ'), - (0xA65F, 'V'), - (0xA660, 'M', 'ꙡ'), - (0xA661, 'V'), - (0xA662, 'M', 'ꙣ'), - (0xA663, 'V'), - (0xA664, 'M', 'ꙥ'), - (0xA665, 'V'), - (0xA666, 'M', 'ꙧ'), - (0xA667, 'V'), - (0xA668, 'M', 'ꙩ'), - (0xA669, 'V'), - (0xA66A, 'M', 'ꙫ'), - (0xA66B, 'V'), - (0xA66C, 'M', 'ꙭ'), - (0xA66D, 'V'), - (0xA680, 'M', 'ꚁ'), - (0xA681, 'V'), - (0xA682, 'M', 'ꚃ'), - (0xA683, 'V'), - (0xA684, 'M', 'ꚅ'), - (0xA685, 'V'), - (0xA686, 'M', 'ꚇ'), - (0xA687, 'V'), - (0xA688, 'M', 'ꚉ'), - (0xA689, 'V'), - (0xA68A, 'M', 'ꚋ'), - (0xA68B, 'V'), - (0xA68C, 'M', 'ꚍ'), - (0xA68D, 'V'), - ] - -def _seg_36() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xA68E, 'M', 'ꚏ'), - (0xA68F, 'V'), - (0xA690, 'M', 'ꚑ'), - (0xA691, 'V'), - (0xA692, 'M', 'ꚓ'), - (0xA693, 'V'), - (0xA694, 'M', 'ꚕ'), - (0xA695, 'V'), - (0xA696, 'M', 'ꚗ'), - (0xA697, 'V'), - (0xA698, 'M', 'ꚙ'), - (0xA699, 'V'), - (0xA69A, 'M', 'ꚛ'), - (0xA69B, 'V'), - (0xA69C, 'M', 'ъ'), - (0xA69D, 'M', 'ь'), - (0xA69E, 'V'), - (0xA6F8, 'X'), - (0xA700, 'V'), - (0xA722, 'M', 'ꜣ'), - (0xA723, 'V'), - (0xA724, 'M', 'ꜥ'), - (0xA725, 'V'), - (0xA726, 'M', 'ꜧ'), - (0xA727, 'V'), - (0xA728, 'M', 'ꜩ'), - (0xA729, 'V'), - (0xA72A, 'M', 'ꜫ'), - (0xA72B, 'V'), - (0xA72C, 'M', 'ꜭ'), - (0xA72D, 'V'), - (0xA72E, 'M', 'ꜯ'), - (0xA72F, 'V'), - (0xA732, 'M', 'ꜳ'), - (0xA733, 'V'), - (0xA734, 'M', 'ꜵ'), - (0xA735, 'V'), - (0xA736, 'M', 'ꜷ'), - (0xA737, 'V'), - (0xA738, 'M', 'ꜹ'), - (0xA739, 'V'), - (0xA73A, 'M', 'ꜻ'), - (0xA73B, 'V'), - (0xA73C, 'M', 'ꜽ'), - (0xA73D, 'V'), - (0xA73E, 'M', 'ꜿ'), - (0xA73F, 'V'), - (0xA740, 'M', 'ꝁ'), - (0xA741, 'V'), - (0xA742, 'M', 'ꝃ'), - (0xA743, 'V'), - (0xA744, 'M', 'ꝅ'), - (0xA745, 'V'), - (0xA746, 'M', 'ꝇ'), - (0xA747, 'V'), - (0xA748, 'M', 'ꝉ'), - (0xA749, 'V'), - (0xA74A, 'M', 'ꝋ'), - (0xA74B, 'V'), - (0xA74C, 'M', 'ꝍ'), - (0xA74D, 'V'), - (0xA74E, 'M', 'ꝏ'), - (0xA74F, 'V'), - (0xA750, 'M', 'ꝑ'), - (0xA751, 'V'), - (0xA752, 'M', 'ꝓ'), - (0xA753, 'V'), - (0xA754, 'M', 'ꝕ'), - (0xA755, 'V'), - (0xA756, 'M', 'ꝗ'), - (0xA757, 'V'), - (0xA758, 'M', 'ꝙ'), - (0xA759, 'V'), - (0xA75A, 'M', 'ꝛ'), - (0xA75B, 'V'), - (0xA75C, 'M', 'ꝝ'), - (0xA75D, 'V'), - (0xA75E, 'M', 'ꝟ'), - (0xA75F, 'V'), - (0xA760, 'M', 'ꝡ'), - (0xA761, 'V'), - (0xA762, 'M', 'ꝣ'), - (0xA763, 'V'), - (0xA764, 'M', 'ꝥ'), - (0xA765, 'V'), - (0xA766, 'M', 'ꝧ'), - (0xA767, 'V'), - (0xA768, 'M', 'ꝩ'), - (0xA769, 'V'), - (0xA76A, 'M', 'ꝫ'), - (0xA76B, 'V'), - (0xA76C, 'M', 'ꝭ'), - (0xA76D, 'V'), - (0xA76E, 'M', 'ꝯ'), - (0xA76F, 'V'), - (0xA770, 'M', 'ꝯ'), - (0xA771, 'V'), - (0xA779, 'M', 'ꝺ'), - (0xA77A, 'V'), - (0xA77B, 'M', 'ꝼ'), - ] - -def _seg_37() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xA77C, 'V'), - (0xA77D, 'M', 'ᵹ'), - (0xA77E, 'M', 'ꝿ'), - (0xA77F, 'V'), - (0xA780, 'M', 'ꞁ'), - (0xA781, 'V'), - (0xA782, 'M', 'ꞃ'), - (0xA783, 'V'), - (0xA784, 'M', 'ꞅ'), - (0xA785, 'V'), - (0xA786, 'M', 'ꞇ'), - (0xA787, 'V'), - (0xA78B, 'M', 'ꞌ'), - (0xA78C, 'V'), - (0xA78D, 'M', 'ɥ'), - (0xA78E, 'V'), - (0xA790, 'M', 'ꞑ'), - (0xA791, 'V'), - (0xA792, 'M', 'ꞓ'), - (0xA793, 'V'), - (0xA796, 'M', 'ꞗ'), - (0xA797, 'V'), - (0xA798, 'M', 'ꞙ'), - (0xA799, 'V'), - (0xA79A, 'M', 'ꞛ'), - (0xA79B, 'V'), - (0xA79C, 'M', 'ꞝ'), - (0xA79D, 'V'), - (0xA79E, 'M', 'ꞟ'), - (0xA79F, 'V'), - (0xA7A0, 'M', 'ꞡ'), - (0xA7A1, 'V'), - (0xA7A2, 'M', 'ꞣ'), - (0xA7A3, 'V'), - (0xA7A4, 'M', 'ꞥ'), - (0xA7A5, 'V'), - (0xA7A6, 'M', 'ꞧ'), - (0xA7A7, 'V'), - (0xA7A8, 'M', 'ꞩ'), - (0xA7A9, 'V'), - (0xA7AA, 'M', 'ɦ'), - (0xA7AB, 'M', 'ɜ'), - (0xA7AC, 'M', 'ɡ'), - (0xA7AD, 'M', 'ɬ'), - (0xA7AE, 'M', 'ɪ'), - (0xA7AF, 'V'), - (0xA7B0, 'M', 'ʞ'), - (0xA7B1, 'M', 'ʇ'), - (0xA7B2, 'M', 'ʝ'), - (0xA7B3, 'M', 'ꭓ'), - (0xA7B4, 'M', 'ꞵ'), - (0xA7B5, 'V'), - (0xA7B6, 'M', 'ꞷ'), - (0xA7B7, 'V'), - (0xA7B8, 'M', 'ꞹ'), - (0xA7B9, 'V'), - (0xA7BA, 'M', 'ꞻ'), - (0xA7BB, 'V'), - (0xA7BC, 'M', 'ꞽ'), - (0xA7BD, 'V'), - (0xA7BE, 'M', 'ꞿ'), - (0xA7BF, 'V'), - (0xA7C0, 'M', 'ꟁ'), - (0xA7C1, 'V'), - (0xA7C2, 'M', 'ꟃ'), - (0xA7C3, 'V'), - (0xA7C4, 'M', 'ꞔ'), - (0xA7C5, 'M', 'ʂ'), - (0xA7C6, 'M', 'ᶎ'), - (0xA7C7, 'M', 'ꟈ'), - (0xA7C8, 'V'), - (0xA7C9, 'M', 'ꟊ'), - (0xA7CA, 'V'), - (0xA7CB, 'X'), - (0xA7D0, 'M', 'ꟑ'), - (0xA7D1, 'V'), - (0xA7D2, 'X'), - (0xA7D3, 'V'), - (0xA7D4, 'X'), - (0xA7D5, 'V'), - (0xA7D6, 'M', 'ꟗ'), - (0xA7D7, 'V'), - (0xA7D8, 'M', 'ꟙ'), - (0xA7D9, 'V'), - (0xA7DA, 'X'), - (0xA7F2, 'M', 'c'), - (0xA7F3, 'M', 'f'), - (0xA7F4, 'M', 'q'), - (0xA7F5, 'M', 'ꟶ'), - (0xA7F6, 'V'), - (0xA7F8, 'M', 'ħ'), - (0xA7F9, 'M', 'œ'), - (0xA7FA, 'V'), - (0xA82D, 'X'), - (0xA830, 'V'), - (0xA83A, 'X'), - (0xA840, 'V'), - (0xA878, 'X'), - (0xA880, 'V'), - (0xA8C6, 'X'), - ] - -def _seg_38() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xA8CE, 'V'), - (0xA8DA, 'X'), - (0xA8E0, 'V'), - (0xA954, 'X'), - (0xA95F, 'V'), - (0xA97D, 'X'), - (0xA980, 'V'), - (0xA9CE, 'X'), - (0xA9CF, 'V'), - (0xA9DA, 'X'), - (0xA9DE, 'V'), - (0xA9FF, 'X'), - (0xAA00, 'V'), - (0xAA37, 'X'), - (0xAA40, 'V'), - (0xAA4E, 'X'), - (0xAA50, 'V'), - (0xAA5A, 'X'), - (0xAA5C, 'V'), - (0xAAC3, 'X'), - (0xAADB, 'V'), - (0xAAF7, 'X'), - (0xAB01, 'V'), - (0xAB07, 'X'), - (0xAB09, 'V'), - (0xAB0F, 'X'), - (0xAB11, 'V'), - (0xAB17, 'X'), - (0xAB20, 'V'), - (0xAB27, 'X'), - (0xAB28, 'V'), - (0xAB2F, 'X'), - (0xAB30, 'V'), - (0xAB5C, 'M', 'ꜧ'), - (0xAB5D, 'M', 'ꬷ'), - (0xAB5E, 'M', 'ɫ'), - (0xAB5F, 'M', 'ꭒ'), - (0xAB60, 'V'), - (0xAB69, 'M', 'ʍ'), - (0xAB6A, 'V'), - (0xAB6C, 'X'), - (0xAB70, 'M', 'Ꭰ'), - (0xAB71, 'M', 'Ꭱ'), - (0xAB72, 'M', 'Ꭲ'), - (0xAB73, 'M', 'Ꭳ'), - (0xAB74, 'M', 'Ꭴ'), - (0xAB75, 'M', 'Ꭵ'), - (0xAB76, 'M', 'Ꭶ'), - (0xAB77, 'M', 'Ꭷ'), - (0xAB78, 'M', 'Ꭸ'), - (0xAB79, 'M', 'Ꭹ'), - (0xAB7A, 'M', 'Ꭺ'), - (0xAB7B, 'M', 'Ꭻ'), - (0xAB7C, 'M', 'Ꭼ'), - (0xAB7D, 'M', 'Ꭽ'), - (0xAB7E, 'M', 'Ꭾ'), - (0xAB7F, 'M', 'Ꭿ'), - (0xAB80, 'M', 'Ꮀ'), - (0xAB81, 'M', 'Ꮁ'), - (0xAB82, 'M', 'Ꮂ'), - (0xAB83, 'M', 'Ꮃ'), - (0xAB84, 'M', 'Ꮄ'), - (0xAB85, 'M', 'Ꮅ'), - (0xAB86, 'M', 'Ꮆ'), - (0xAB87, 'M', 'Ꮇ'), - (0xAB88, 'M', 'Ꮈ'), - (0xAB89, 'M', 'Ꮉ'), - (0xAB8A, 'M', 'Ꮊ'), - (0xAB8B, 'M', 'Ꮋ'), - (0xAB8C, 'M', 'Ꮌ'), - (0xAB8D, 'M', 'Ꮍ'), - (0xAB8E, 'M', 'Ꮎ'), - (0xAB8F, 'M', 'Ꮏ'), - (0xAB90, 'M', 'Ꮐ'), - (0xAB91, 'M', 'Ꮑ'), - (0xAB92, 'M', 'Ꮒ'), - (0xAB93, 'M', 'Ꮓ'), - (0xAB94, 'M', 'Ꮔ'), - (0xAB95, 'M', 'Ꮕ'), - (0xAB96, 'M', 'Ꮖ'), - (0xAB97, 'M', 'Ꮗ'), - (0xAB98, 'M', 'Ꮘ'), - (0xAB99, 'M', 'Ꮙ'), - (0xAB9A, 'M', 'Ꮚ'), - (0xAB9B, 'M', 'Ꮛ'), - (0xAB9C, 'M', 'Ꮜ'), - (0xAB9D, 'M', 'Ꮝ'), - (0xAB9E, 'M', 'Ꮞ'), - (0xAB9F, 'M', 'Ꮟ'), - (0xABA0, 'M', 'Ꮠ'), - (0xABA1, 'M', 'Ꮡ'), - (0xABA2, 'M', 'Ꮢ'), - (0xABA3, 'M', 'Ꮣ'), - (0xABA4, 'M', 'Ꮤ'), - (0xABA5, 'M', 'Ꮥ'), - (0xABA6, 'M', 'Ꮦ'), - (0xABA7, 'M', 'Ꮧ'), - (0xABA8, 'M', 'Ꮨ'), - (0xABA9, 'M', 'Ꮩ'), - (0xABAA, 'M', 'Ꮪ'), - ] - -def _seg_39() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xABAB, 'M', 'Ꮫ'), - (0xABAC, 'M', 'Ꮬ'), - (0xABAD, 'M', 'Ꮭ'), - (0xABAE, 'M', 'Ꮮ'), - (0xABAF, 'M', 'Ꮯ'), - (0xABB0, 'M', 'Ꮰ'), - (0xABB1, 'M', 'Ꮱ'), - (0xABB2, 'M', 'Ꮲ'), - (0xABB3, 'M', 'Ꮳ'), - (0xABB4, 'M', 'Ꮴ'), - (0xABB5, 'M', 'Ꮵ'), - (0xABB6, 'M', 'Ꮶ'), - (0xABB7, 'M', 'Ꮷ'), - (0xABB8, 'M', 'Ꮸ'), - (0xABB9, 'M', 'Ꮹ'), - (0xABBA, 'M', 'Ꮺ'), - (0xABBB, 'M', 'Ꮻ'), - (0xABBC, 'M', 'Ꮼ'), - (0xABBD, 'M', 'Ꮽ'), - (0xABBE, 'M', 'Ꮾ'), - (0xABBF, 'M', 'Ꮿ'), - (0xABC0, 'V'), - (0xABEE, 'X'), - (0xABF0, 'V'), - (0xABFA, 'X'), - (0xAC00, 'V'), - (0xD7A4, 'X'), - (0xD7B0, 'V'), - (0xD7C7, 'X'), - (0xD7CB, 'V'), - (0xD7FC, 'X'), - (0xF900, 'M', '豈'), - (0xF901, 'M', '更'), - (0xF902, 'M', '車'), - (0xF903, 'M', '賈'), - (0xF904, 'M', '滑'), - (0xF905, 'M', '串'), - (0xF906, 'M', '句'), - (0xF907, 'M', '龜'), - (0xF909, 'M', '契'), - (0xF90A, 'M', '金'), - (0xF90B, 'M', '喇'), - (0xF90C, 'M', '奈'), - (0xF90D, 'M', '懶'), - (0xF90E, 'M', '癩'), - (0xF90F, 'M', '羅'), - (0xF910, 'M', '蘿'), - (0xF911, 'M', '螺'), - (0xF912, 'M', '裸'), - (0xF913, 'M', '邏'), - (0xF914, 'M', '樂'), - (0xF915, 'M', '洛'), - (0xF916, 'M', '烙'), - (0xF917, 'M', '珞'), - (0xF918, 'M', '落'), - (0xF919, 'M', '酪'), - (0xF91A, 'M', '駱'), - (0xF91B, 'M', '亂'), - (0xF91C, 'M', '卵'), - (0xF91D, 'M', '欄'), - (0xF91E, 'M', '爛'), - (0xF91F, 'M', '蘭'), - (0xF920, 'M', '鸞'), - (0xF921, 'M', '嵐'), - (0xF922, 'M', '濫'), - (0xF923, 'M', '藍'), - (0xF924, 'M', '襤'), - (0xF925, 'M', '拉'), - (0xF926, 'M', '臘'), - (0xF927, 'M', '蠟'), - (0xF928, 'M', '廊'), - (0xF929, 'M', '朗'), - (0xF92A, 'M', '浪'), - (0xF92B, 'M', '狼'), - (0xF92C, 'M', '郎'), - (0xF92D, 'M', '來'), - (0xF92E, 'M', '冷'), - (0xF92F, 'M', '勞'), - (0xF930, 'M', '擄'), - (0xF931, 'M', '櫓'), - (0xF932, 'M', '爐'), - (0xF933, 'M', '盧'), - (0xF934, 'M', '老'), - (0xF935, 'M', '蘆'), - (0xF936, 'M', '虜'), - (0xF937, 'M', '路'), - (0xF938, 'M', '露'), - (0xF939, 'M', '魯'), - (0xF93A, 'M', '鷺'), - (0xF93B, 'M', '碌'), - (0xF93C, 'M', '祿'), - (0xF93D, 'M', '綠'), - (0xF93E, 'M', '菉'), - (0xF93F, 'M', '錄'), - (0xF940, 'M', '鹿'), - (0xF941, 'M', '論'), - (0xF942, 'M', '壟'), - (0xF943, 'M', '弄'), - (0xF944, 'M', '籠'), - (0xF945, 'M', '聾'), - ] - -def _seg_40() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xF946, 'M', '牢'), - (0xF947, 'M', '磊'), - (0xF948, 'M', '賂'), - (0xF949, 'M', '雷'), - (0xF94A, 'M', '壘'), - (0xF94B, 'M', '屢'), - (0xF94C, 'M', '樓'), - (0xF94D, 'M', '淚'), - (0xF94E, 'M', '漏'), - (0xF94F, 'M', '累'), - (0xF950, 'M', '縷'), - (0xF951, 'M', '陋'), - (0xF952, 'M', '勒'), - (0xF953, 'M', '肋'), - (0xF954, 'M', '凜'), - (0xF955, 'M', '凌'), - (0xF956, 'M', '稜'), - (0xF957, 'M', '綾'), - (0xF958, 'M', '菱'), - (0xF959, 'M', '陵'), - (0xF95A, 'M', '讀'), - (0xF95B, 'M', '拏'), - (0xF95C, 'M', '樂'), - (0xF95D, 'M', '諾'), - (0xF95E, 'M', '丹'), - (0xF95F, 'M', '寧'), - (0xF960, 'M', '怒'), - (0xF961, 'M', '率'), - (0xF962, 'M', '異'), - (0xF963, 'M', '北'), - (0xF964, 'M', '磻'), - (0xF965, 'M', '便'), - (0xF966, 'M', '復'), - (0xF967, 'M', '不'), - (0xF968, 'M', '泌'), - (0xF969, 'M', '數'), - (0xF96A, 'M', '索'), - (0xF96B, 'M', '參'), - (0xF96C, 'M', '塞'), - (0xF96D, 'M', '省'), - (0xF96E, 'M', '葉'), - (0xF96F, 'M', '說'), - (0xF970, 'M', '殺'), - (0xF971, 'M', '辰'), - (0xF972, 'M', '沈'), - (0xF973, 'M', '拾'), - (0xF974, 'M', '若'), - (0xF975, 'M', '掠'), - (0xF976, 'M', '略'), - (0xF977, 'M', '亮'), - (0xF978, 'M', '兩'), - (0xF979, 'M', '凉'), - (0xF97A, 'M', '梁'), - (0xF97B, 'M', '糧'), - (0xF97C, 'M', '良'), - (0xF97D, 'M', '諒'), - (0xF97E, 'M', '量'), - (0xF97F, 'M', '勵'), - (0xF980, 'M', '呂'), - (0xF981, 'M', '女'), - (0xF982, 'M', '廬'), - (0xF983, 'M', '旅'), - (0xF984, 'M', '濾'), - (0xF985, 'M', '礪'), - (0xF986, 'M', '閭'), - (0xF987, 'M', '驪'), - (0xF988, 'M', '麗'), - (0xF989, 'M', '黎'), - (0xF98A, 'M', '力'), - (0xF98B, 'M', '曆'), - (0xF98C, 'M', '歷'), - (0xF98D, 'M', '轢'), - (0xF98E, 'M', '年'), - (0xF98F, 'M', '憐'), - (0xF990, 'M', '戀'), - (0xF991, 'M', '撚'), - (0xF992, 'M', '漣'), - (0xF993, 'M', '煉'), - (0xF994, 'M', '璉'), - (0xF995, 'M', '秊'), - (0xF996, 'M', '練'), - (0xF997, 'M', '聯'), - (0xF998, 'M', '輦'), - (0xF999, 'M', '蓮'), - (0xF99A, 'M', '連'), - (0xF99B, 'M', '鍊'), - (0xF99C, 'M', '列'), - (0xF99D, 'M', '劣'), - (0xF99E, 'M', '咽'), - (0xF99F, 'M', '烈'), - (0xF9A0, 'M', '裂'), - (0xF9A1, 'M', '說'), - (0xF9A2, 'M', '廉'), - (0xF9A3, 'M', '念'), - (0xF9A4, 'M', '捻'), - (0xF9A5, 'M', '殮'), - (0xF9A6, 'M', '簾'), - (0xF9A7, 'M', '獵'), - (0xF9A8, 'M', '令'), - (0xF9A9, 'M', '囹'), - ] - -def _seg_41() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xF9AA, 'M', '寧'), - (0xF9AB, 'M', '嶺'), - (0xF9AC, 'M', '怜'), - (0xF9AD, 'M', '玲'), - (0xF9AE, 'M', '瑩'), - (0xF9AF, 'M', '羚'), - (0xF9B0, 'M', '聆'), - (0xF9B1, 'M', '鈴'), - (0xF9B2, 'M', '零'), - (0xF9B3, 'M', '靈'), - (0xF9B4, 'M', '領'), - (0xF9B5, 'M', '例'), - (0xF9B6, 'M', '禮'), - (0xF9B7, 'M', '醴'), - (0xF9B8, 'M', '隸'), - (0xF9B9, 'M', '惡'), - (0xF9BA, 'M', '了'), - (0xF9BB, 'M', '僚'), - (0xF9BC, 'M', '寮'), - (0xF9BD, 'M', '尿'), - (0xF9BE, 'M', '料'), - (0xF9BF, 'M', '樂'), - (0xF9C0, 'M', '燎'), - (0xF9C1, 'M', '療'), - (0xF9C2, 'M', '蓼'), - (0xF9C3, 'M', '遼'), - (0xF9C4, 'M', '龍'), - (0xF9C5, 'M', '暈'), - (0xF9C6, 'M', '阮'), - (0xF9C7, 'M', '劉'), - (0xF9C8, 'M', '杻'), - (0xF9C9, 'M', '柳'), - (0xF9CA, 'M', '流'), - (0xF9CB, 'M', '溜'), - (0xF9CC, 'M', '琉'), - (0xF9CD, 'M', '留'), - (0xF9CE, 'M', '硫'), - (0xF9CF, 'M', '紐'), - (0xF9D0, 'M', '類'), - (0xF9D1, 'M', '六'), - (0xF9D2, 'M', '戮'), - (0xF9D3, 'M', '陸'), - (0xF9D4, 'M', '倫'), - (0xF9D5, 'M', '崙'), - (0xF9D6, 'M', '淪'), - (0xF9D7, 'M', '輪'), - (0xF9D8, 'M', '律'), - (0xF9D9, 'M', '慄'), - (0xF9DA, 'M', '栗'), - (0xF9DB, 'M', '率'), - (0xF9DC, 'M', '隆'), - (0xF9DD, 'M', '利'), - (0xF9DE, 'M', '吏'), - (0xF9DF, 'M', '履'), - (0xF9E0, 'M', '易'), - (0xF9E1, 'M', '李'), - (0xF9E2, 'M', '梨'), - (0xF9E3, 'M', '泥'), - (0xF9E4, 'M', '理'), - (0xF9E5, 'M', '痢'), - (0xF9E6, 'M', '罹'), - (0xF9E7, 'M', '裏'), - (0xF9E8, 'M', '裡'), - (0xF9E9, 'M', '里'), - (0xF9EA, 'M', '離'), - (0xF9EB, 'M', '匿'), - (0xF9EC, 'M', '溺'), - (0xF9ED, 'M', '吝'), - (0xF9EE, 'M', '燐'), - (0xF9EF, 'M', '璘'), - (0xF9F0, 'M', '藺'), - (0xF9F1, 'M', '隣'), - (0xF9F2, 'M', '鱗'), - (0xF9F3, 'M', '麟'), - (0xF9F4, 'M', '林'), - (0xF9F5, 'M', '淋'), - (0xF9F6, 'M', '臨'), - (0xF9F7, 'M', '立'), - (0xF9F8, 'M', '笠'), - (0xF9F9, 'M', '粒'), - (0xF9FA, 'M', '狀'), - (0xF9FB, 'M', '炙'), - (0xF9FC, 'M', '識'), - (0xF9FD, 'M', '什'), - (0xF9FE, 'M', '茶'), - (0xF9FF, 'M', '刺'), - (0xFA00, 'M', '切'), - (0xFA01, 'M', '度'), - (0xFA02, 'M', '拓'), - (0xFA03, 'M', '糖'), - (0xFA04, 'M', '宅'), - (0xFA05, 'M', '洞'), - (0xFA06, 'M', '暴'), - (0xFA07, 'M', '輻'), - (0xFA08, 'M', '行'), - (0xFA09, 'M', '降'), - (0xFA0A, 'M', '見'), - (0xFA0B, 'M', '廓'), - (0xFA0C, 'M', '兀'), - (0xFA0D, 'M', '嗀'), - ] - -def _seg_42() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFA0E, 'V'), - (0xFA10, 'M', '塚'), - (0xFA11, 'V'), - (0xFA12, 'M', '晴'), - (0xFA13, 'V'), - (0xFA15, 'M', '凞'), - (0xFA16, 'M', '猪'), - (0xFA17, 'M', '益'), - (0xFA18, 'M', '礼'), - (0xFA19, 'M', '神'), - (0xFA1A, 'M', '祥'), - (0xFA1B, 'M', '福'), - (0xFA1C, 'M', '靖'), - (0xFA1D, 'M', '精'), - (0xFA1E, 'M', '羽'), - (0xFA1F, 'V'), - (0xFA20, 'M', '蘒'), - (0xFA21, 'V'), - (0xFA22, 'M', '諸'), - (0xFA23, 'V'), - (0xFA25, 'M', '逸'), - (0xFA26, 'M', '都'), - (0xFA27, 'V'), - (0xFA2A, 'M', '飯'), - (0xFA2B, 'M', '飼'), - (0xFA2C, 'M', '館'), - (0xFA2D, 'M', '鶴'), - (0xFA2E, 'M', '郞'), - (0xFA2F, 'M', '隷'), - (0xFA30, 'M', '侮'), - (0xFA31, 'M', '僧'), - (0xFA32, 'M', '免'), - (0xFA33, 'M', '勉'), - (0xFA34, 'M', '勤'), - (0xFA35, 'M', '卑'), - (0xFA36, 'M', '喝'), - (0xFA37, 'M', '嘆'), - (0xFA38, 'M', '器'), - (0xFA39, 'M', '塀'), - (0xFA3A, 'M', '墨'), - (0xFA3B, 'M', '層'), - (0xFA3C, 'M', '屮'), - (0xFA3D, 'M', '悔'), - (0xFA3E, 'M', '慨'), - (0xFA3F, 'M', '憎'), - (0xFA40, 'M', '懲'), - (0xFA41, 'M', '敏'), - (0xFA42, 'M', '既'), - (0xFA43, 'M', '暑'), - (0xFA44, 'M', '梅'), - (0xFA45, 'M', '海'), - (0xFA46, 'M', '渚'), - (0xFA47, 'M', '漢'), - (0xFA48, 'M', '煮'), - (0xFA49, 'M', '爫'), - (0xFA4A, 'M', '琢'), - (0xFA4B, 'M', '碑'), - (0xFA4C, 'M', '社'), - (0xFA4D, 'M', '祉'), - (0xFA4E, 'M', '祈'), - (0xFA4F, 'M', '祐'), - (0xFA50, 'M', '祖'), - (0xFA51, 'M', '祝'), - (0xFA52, 'M', '禍'), - (0xFA53, 'M', '禎'), - (0xFA54, 'M', '穀'), - (0xFA55, 'M', '突'), - (0xFA56, 'M', '節'), - (0xFA57, 'M', '練'), - (0xFA58, 'M', '縉'), - (0xFA59, 'M', '繁'), - (0xFA5A, 'M', '署'), - (0xFA5B, 'M', '者'), - (0xFA5C, 'M', '臭'), - (0xFA5D, 'M', '艹'), - (0xFA5F, 'M', '著'), - (0xFA60, 'M', '褐'), - (0xFA61, 'M', '視'), - (0xFA62, 'M', '謁'), - (0xFA63, 'M', '謹'), - (0xFA64, 'M', '賓'), - (0xFA65, 'M', '贈'), - (0xFA66, 'M', '辶'), - (0xFA67, 'M', '逸'), - (0xFA68, 'M', '難'), - (0xFA69, 'M', '響'), - (0xFA6A, 'M', '頻'), - (0xFA6B, 'M', '恵'), - (0xFA6C, 'M', '𤋮'), - (0xFA6D, 'M', '舘'), - (0xFA6E, 'X'), - (0xFA70, 'M', '並'), - (0xFA71, 'M', '况'), - (0xFA72, 'M', '全'), - (0xFA73, 'M', '侀'), - (0xFA74, 'M', '充'), - (0xFA75, 'M', '冀'), - (0xFA76, 'M', '勇'), - (0xFA77, 'M', '勺'), - (0xFA78, 'M', '喝'), - ] - -def _seg_43() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFA79, 'M', '啕'), - (0xFA7A, 'M', '喙'), - (0xFA7B, 'M', '嗢'), - (0xFA7C, 'M', '塚'), - (0xFA7D, 'M', '墳'), - (0xFA7E, 'M', '奄'), - (0xFA7F, 'M', '奔'), - (0xFA80, 'M', '婢'), - (0xFA81, 'M', '嬨'), - (0xFA82, 'M', '廒'), - (0xFA83, 'M', '廙'), - (0xFA84, 'M', '彩'), - (0xFA85, 'M', '徭'), - (0xFA86, 'M', '惘'), - (0xFA87, 'M', '慎'), - (0xFA88, 'M', '愈'), - (0xFA89, 'M', '憎'), - (0xFA8A, 'M', '慠'), - (0xFA8B, 'M', '懲'), - (0xFA8C, 'M', '戴'), - (0xFA8D, 'M', '揄'), - (0xFA8E, 'M', '搜'), - (0xFA8F, 'M', '摒'), - (0xFA90, 'M', '敖'), - (0xFA91, 'M', '晴'), - (0xFA92, 'M', '朗'), - (0xFA93, 'M', '望'), - (0xFA94, 'M', '杖'), - (0xFA95, 'M', '歹'), - (0xFA96, 'M', '殺'), - (0xFA97, 'M', '流'), - (0xFA98, 'M', '滛'), - (0xFA99, 'M', '滋'), - (0xFA9A, 'M', '漢'), - (0xFA9B, 'M', '瀞'), - (0xFA9C, 'M', '煮'), - (0xFA9D, 'M', '瞧'), - (0xFA9E, 'M', '爵'), - (0xFA9F, 'M', '犯'), - (0xFAA0, 'M', '猪'), - (0xFAA1, 'M', '瑱'), - (0xFAA2, 'M', '甆'), - (0xFAA3, 'M', '画'), - (0xFAA4, 'M', '瘝'), - (0xFAA5, 'M', '瘟'), - (0xFAA6, 'M', '益'), - (0xFAA7, 'M', '盛'), - (0xFAA8, 'M', '直'), - (0xFAA9, 'M', '睊'), - (0xFAAA, 'M', '着'), - (0xFAAB, 'M', '磌'), - (0xFAAC, 'M', '窱'), - (0xFAAD, 'M', '節'), - (0xFAAE, 'M', '类'), - (0xFAAF, 'M', '絛'), - (0xFAB0, 'M', '練'), - (0xFAB1, 'M', '缾'), - (0xFAB2, 'M', '者'), - (0xFAB3, 'M', '荒'), - (0xFAB4, 'M', '華'), - (0xFAB5, 'M', '蝹'), - (0xFAB6, 'M', '襁'), - (0xFAB7, 'M', '覆'), - (0xFAB8, 'M', '視'), - (0xFAB9, 'M', '調'), - (0xFABA, 'M', '諸'), - (0xFABB, 'M', '請'), - (0xFABC, 'M', '謁'), - (0xFABD, 'M', '諾'), - (0xFABE, 'M', '諭'), - (0xFABF, 'M', '謹'), - (0xFAC0, 'M', '變'), - (0xFAC1, 'M', '贈'), - (0xFAC2, 'M', '輸'), - (0xFAC3, 'M', '遲'), - (0xFAC4, 'M', '醙'), - (0xFAC5, 'M', '鉶'), - (0xFAC6, 'M', '陼'), - (0xFAC7, 'M', '難'), - (0xFAC8, 'M', '靖'), - (0xFAC9, 'M', '韛'), - (0xFACA, 'M', '響'), - (0xFACB, 'M', '頋'), - (0xFACC, 'M', '頻'), - (0xFACD, 'M', '鬒'), - (0xFACE, 'M', '龜'), - (0xFACF, 'M', '𢡊'), - (0xFAD0, 'M', '𢡄'), - (0xFAD1, 'M', '𣏕'), - (0xFAD2, 'M', '㮝'), - (0xFAD3, 'M', '䀘'), - (0xFAD4, 'M', '䀹'), - (0xFAD5, 'M', '𥉉'), - (0xFAD6, 'M', '𥳐'), - (0xFAD7, 'M', '𧻓'), - (0xFAD8, 'M', '齃'), - (0xFAD9, 'M', '龎'), - (0xFADA, 'X'), - (0xFB00, 'M', 'ff'), - (0xFB01, 'M', 'fi'), - ] - -def _seg_44() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFB02, 'M', 'fl'), - (0xFB03, 'M', 'ffi'), - (0xFB04, 'M', 'ffl'), - (0xFB05, 'M', 'st'), - (0xFB07, 'X'), - (0xFB13, 'M', 'մն'), - (0xFB14, 'M', 'մե'), - (0xFB15, 'M', 'մի'), - (0xFB16, 'M', 'վն'), - (0xFB17, 'M', 'մխ'), - (0xFB18, 'X'), - (0xFB1D, 'M', 'יִ'), - (0xFB1E, 'V'), - (0xFB1F, 'M', 'ײַ'), - (0xFB20, 'M', 'ע'), - (0xFB21, 'M', 'א'), - (0xFB22, 'M', 'ד'), - (0xFB23, 'M', 'ה'), - (0xFB24, 'M', 'כ'), - (0xFB25, 'M', 'ל'), - (0xFB26, 'M', 'ם'), - (0xFB27, 'M', 'ר'), - (0xFB28, 'M', 'ת'), - (0xFB29, '3', '+'), - (0xFB2A, 'M', 'שׁ'), - (0xFB2B, 'M', 'שׂ'), - (0xFB2C, 'M', 'שּׁ'), - (0xFB2D, 'M', 'שּׂ'), - (0xFB2E, 'M', 'אַ'), - (0xFB2F, 'M', 'אָ'), - (0xFB30, 'M', 'אּ'), - (0xFB31, 'M', 'בּ'), - (0xFB32, 'M', 'גּ'), - (0xFB33, 'M', 'דּ'), - (0xFB34, 'M', 'הּ'), - (0xFB35, 'M', 'וּ'), - (0xFB36, 'M', 'זּ'), - (0xFB37, 'X'), - (0xFB38, 'M', 'טּ'), - (0xFB39, 'M', 'יּ'), - (0xFB3A, 'M', 'ךּ'), - (0xFB3B, 'M', 'כּ'), - (0xFB3C, 'M', 'לּ'), - (0xFB3D, 'X'), - (0xFB3E, 'M', 'מּ'), - (0xFB3F, 'X'), - (0xFB40, 'M', 'נּ'), - (0xFB41, 'M', 'סּ'), - (0xFB42, 'X'), - (0xFB43, 'M', 'ףּ'), - (0xFB44, 'M', 'פּ'), - (0xFB45, 'X'), - (0xFB46, 'M', 'צּ'), - (0xFB47, 'M', 'קּ'), - (0xFB48, 'M', 'רּ'), - (0xFB49, 'M', 'שּ'), - (0xFB4A, 'M', 'תּ'), - (0xFB4B, 'M', 'וֹ'), - (0xFB4C, 'M', 'בֿ'), - (0xFB4D, 'M', 'כֿ'), - (0xFB4E, 'M', 'פֿ'), - (0xFB4F, 'M', 'אל'), - (0xFB50, 'M', 'ٱ'), - (0xFB52, 'M', 'ٻ'), - (0xFB56, 'M', 'پ'), - (0xFB5A, 'M', 'ڀ'), - (0xFB5E, 'M', 'ٺ'), - (0xFB62, 'M', 'ٿ'), - (0xFB66, 'M', 'ٹ'), - (0xFB6A, 'M', 'ڤ'), - (0xFB6E, 'M', 'ڦ'), - (0xFB72, 'M', 'ڄ'), - (0xFB76, 'M', 'ڃ'), - (0xFB7A, 'M', 'چ'), - (0xFB7E, 'M', 'ڇ'), - (0xFB82, 'M', 'ڍ'), - (0xFB84, 'M', 'ڌ'), - (0xFB86, 'M', 'ڎ'), - (0xFB88, 'M', 'ڈ'), - (0xFB8A, 'M', 'ژ'), - (0xFB8C, 'M', 'ڑ'), - (0xFB8E, 'M', 'ک'), - (0xFB92, 'M', 'گ'), - (0xFB96, 'M', 'ڳ'), - (0xFB9A, 'M', 'ڱ'), - (0xFB9E, 'M', 'ں'), - (0xFBA0, 'M', 'ڻ'), - (0xFBA4, 'M', 'ۀ'), - (0xFBA6, 'M', 'ہ'), - (0xFBAA, 'M', 'ھ'), - (0xFBAE, 'M', 'ے'), - (0xFBB0, 'M', 'ۓ'), - (0xFBB2, 'V'), - (0xFBC3, 'X'), - (0xFBD3, 'M', 'ڭ'), - (0xFBD7, 'M', 'ۇ'), - (0xFBD9, 'M', 'ۆ'), - (0xFBDB, 'M', 'ۈ'), - (0xFBDD, 'M', 'ۇٴ'), - (0xFBDE, 'M', 'ۋ'), - ] - -def _seg_45() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFBE0, 'M', 'ۅ'), - (0xFBE2, 'M', 'ۉ'), - (0xFBE4, 'M', 'ې'), - (0xFBE8, 'M', 'ى'), - (0xFBEA, 'M', 'ئا'), - (0xFBEC, 'M', 'ئە'), - (0xFBEE, 'M', 'ئو'), - (0xFBF0, 'M', 'ئۇ'), - (0xFBF2, 'M', 'ئۆ'), - (0xFBF4, 'M', 'ئۈ'), - (0xFBF6, 'M', 'ئې'), - (0xFBF9, 'M', 'ئى'), - (0xFBFC, 'M', 'ی'), - (0xFC00, 'M', 'ئج'), - (0xFC01, 'M', 'ئح'), - (0xFC02, 'M', 'ئم'), - (0xFC03, 'M', 'ئى'), - (0xFC04, 'M', 'ئي'), - (0xFC05, 'M', 'بج'), - (0xFC06, 'M', 'بح'), - (0xFC07, 'M', 'بخ'), - (0xFC08, 'M', 'بم'), - (0xFC09, 'M', 'بى'), - (0xFC0A, 'M', 'بي'), - (0xFC0B, 'M', 'تج'), - (0xFC0C, 'M', 'تح'), - (0xFC0D, 'M', 'تخ'), - (0xFC0E, 'M', 'تم'), - (0xFC0F, 'M', 'تى'), - (0xFC10, 'M', 'تي'), - (0xFC11, 'M', 'ثج'), - (0xFC12, 'M', 'ثم'), - (0xFC13, 'M', 'ثى'), - (0xFC14, 'M', 'ثي'), - (0xFC15, 'M', 'جح'), - (0xFC16, 'M', 'جم'), - (0xFC17, 'M', 'حج'), - (0xFC18, 'M', 'حم'), - (0xFC19, 'M', 'خج'), - (0xFC1A, 'M', 'خح'), - (0xFC1B, 'M', 'خم'), - (0xFC1C, 'M', 'سج'), - (0xFC1D, 'M', 'سح'), - (0xFC1E, 'M', 'سخ'), - (0xFC1F, 'M', 'سم'), - (0xFC20, 'M', 'صح'), - (0xFC21, 'M', 'صم'), - (0xFC22, 'M', 'ضج'), - (0xFC23, 'M', 'ضح'), - (0xFC24, 'M', 'ضخ'), - (0xFC25, 'M', 'ضم'), - (0xFC26, 'M', 'طح'), - (0xFC27, 'M', 'طم'), - (0xFC28, 'M', 'ظم'), - (0xFC29, 'M', 'عج'), - (0xFC2A, 'M', 'عم'), - (0xFC2B, 'M', 'غج'), - (0xFC2C, 'M', 'غم'), - (0xFC2D, 'M', 'فج'), - (0xFC2E, 'M', 'فح'), - (0xFC2F, 'M', 'فخ'), - (0xFC30, 'M', 'فم'), - (0xFC31, 'M', 'فى'), - (0xFC32, 'M', 'في'), - (0xFC33, 'M', 'قح'), - (0xFC34, 'M', 'قم'), - (0xFC35, 'M', 'قى'), - (0xFC36, 'M', 'قي'), - (0xFC37, 'M', 'كا'), - (0xFC38, 'M', 'كج'), - (0xFC39, 'M', 'كح'), - (0xFC3A, 'M', 'كخ'), - (0xFC3B, 'M', 'كل'), - (0xFC3C, 'M', 'كم'), - (0xFC3D, 'M', 'كى'), - (0xFC3E, 'M', 'كي'), - (0xFC3F, 'M', 'لج'), - (0xFC40, 'M', 'لح'), - (0xFC41, 'M', 'لخ'), - (0xFC42, 'M', 'لم'), - (0xFC43, 'M', 'لى'), - (0xFC44, 'M', 'لي'), - (0xFC45, 'M', 'مج'), - (0xFC46, 'M', 'مح'), - (0xFC47, 'M', 'مخ'), - (0xFC48, 'M', 'مم'), - (0xFC49, 'M', 'مى'), - (0xFC4A, 'M', 'مي'), - (0xFC4B, 'M', 'نج'), - (0xFC4C, 'M', 'نح'), - (0xFC4D, 'M', 'نخ'), - (0xFC4E, 'M', 'نم'), - (0xFC4F, 'M', 'نى'), - (0xFC50, 'M', 'ني'), - (0xFC51, 'M', 'هج'), - (0xFC52, 'M', 'هم'), - (0xFC53, 'M', 'هى'), - (0xFC54, 'M', 'هي'), - (0xFC55, 'M', 'يج'), - (0xFC56, 'M', 'يح'), - ] - -def _seg_46() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFC57, 'M', 'يخ'), - (0xFC58, 'M', 'يم'), - (0xFC59, 'M', 'يى'), - (0xFC5A, 'M', 'يي'), - (0xFC5B, 'M', 'ذٰ'), - (0xFC5C, 'M', 'رٰ'), - (0xFC5D, 'M', 'ىٰ'), - (0xFC5E, '3', ' ٌّ'), - (0xFC5F, '3', ' ٍّ'), - (0xFC60, '3', ' َّ'), - (0xFC61, '3', ' ُّ'), - (0xFC62, '3', ' ِّ'), - (0xFC63, '3', ' ّٰ'), - (0xFC64, 'M', 'ئر'), - (0xFC65, 'M', 'ئز'), - (0xFC66, 'M', 'ئم'), - (0xFC67, 'M', 'ئن'), - (0xFC68, 'M', 'ئى'), - (0xFC69, 'M', 'ئي'), - (0xFC6A, 'M', 'بر'), - (0xFC6B, 'M', 'بز'), - (0xFC6C, 'M', 'بم'), - (0xFC6D, 'M', 'بن'), - (0xFC6E, 'M', 'بى'), - (0xFC6F, 'M', 'بي'), - (0xFC70, 'M', 'تر'), - (0xFC71, 'M', 'تز'), - (0xFC72, 'M', 'تم'), - (0xFC73, 'M', 'تن'), - (0xFC74, 'M', 'تى'), - (0xFC75, 'M', 'تي'), - (0xFC76, 'M', 'ثر'), - (0xFC77, 'M', 'ثز'), - (0xFC78, 'M', 'ثم'), - (0xFC79, 'M', 'ثن'), - (0xFC7A, 'M', 'ثى'), - (0xFC7B, 'M', 'ثي'), - (0xFC7C, 'M', 'فى'), - (0xFC7D, 'M', 'في'), - (0xFC7E, 'M', 'قى'), - (0xFC7F, 'M', 'قي'), - (0xFC80, 'M', 'كا'), - (0xFC81, 'M', 'كل'), - (0xFC82, 'M', 'كم'), - (0xFC83, 'M', 'كى'), - (0xFC84, 'M', 'كي'), - (0xFC85, 'M', 'لم'), - (0xFC86, 'M', 'لى'), - (0xFC87, 'M', 'لي'), - (0xFC88, 'M', 'ما'), - (0xFC89, 'M', 'مم'), - (0xFC8A, 'M', 'نر'), - (0xFC8B, 'M', 'نز'), - (0xFC8C, 'M', 'نم'), - (0xFC8D, 'M', 'نن'), - (0xFC8E, 'M', 'نى'), - (0xFC8F, 'M', 'ني'), - (0xFC90, 'M', 'ىٰ'), - (0xFC91, 'M', 'ير'), - (0xFC92, 'M', 'يز'), - (0xFC93, 'M', 'يم'), - (0xFC94, 'M', 'ين'), - (0xFC95, 'M', 'يى'), - (0xFC96, 'M', 'يي'), - (0xFC97, 'M', 'ئج'), - (0xFC98, 'M', 'ئح'), - (0xFC99, 'M', 'ئخ'), - (0xFC9A, 'M', 'ئم'), - (0xFC9B, 'M', 'ئه'), - (0xFC9C, 'M', 'بج'), - (0xFC9D, 'M', 'بح'), - (0xFC9E, 'M', 'بخ'), - (0xFC9F, 'M', 'بم'), - (0xFCA0, 'M', 'به'), - (0xFCA1, 'M', 'تج'), - (0xFCA2, 'M', 'تح'), - (0xFCA3, 'M', 'تخ'), - (0xFCA4, 'M', 'تم'), - (0xFCA5, 'M', 'ته'), - (0xFCA6, 'M', 'ثم'), - (0xFCA7, 'M', 'جح'), - (0xFCA8, 'M', 'جم'), - (0xFCA9, 'M', 'حج'), - (0xFCAA, 'M', 'حم'), - (0xFCAB, 'M', 'خج'), - (0xFCAC, 'M', 'خم'), - (0xFCAD, 'M', 'سج'), - (0xFCAE, 'M', 'سح'), - (0xFCAF, 'M', 'سخ'), - (0xFCB0, 'M', 'سم'), - (0xFCB1, 'M', 'صح'), - (0xFCB2, 'M', 'صخ'), - (0xFCB3, 'M', 'صم'), - (0xFCB4, 'M', 'ضج'), - (0xFCB5, 'M', 'ضح'), - (0xFCB6, 'M', 'ضخ'), - (0xFCB7, 'M', 'ضم'), - (0xFCB8, 'M', 'طح'), - (0xFCB9, 'M', 'ظم'), - (0xFCBA, 'M', 'عج'), - ] - -def _seg_47() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFCBB, 'M', 'عم'), - (0xFCBC, 'M', 'غج'), - (0xFCBD, 'M', 'غم'), - (0xFCBE, 'M', 'فج'), - (0xFCBF, 'M', 'فح'), - (0xFCC0, 'M', 'فخ'), - (0xFCC1, 'M', 'فم'), - (0xFCC2, 'M', 'قح'), - (0xFCC3, 'M', 'قم'), - (0xFCC4, 'M', 'كج'), - (0xFCC5, 'M', 'كح'), - (0xFCC6, 'M', 'كخ'), - (0xFCC7, 'M', 'كل'), - (0xFCC8, 'M', 'كم'), - (0xFCC9, 'M', 'لج'), - (0xFCCA, 'M', 'لح'), - (0xFCCB, 'M', 'لخ'), - (0xFCCC, 'M', 'لم'), - (0xFCCD, 'M', 'له'), - (0xFCCE, 'M', 'مج'), - (0xFCCF, 'M', 'مح'), - (0xFCD0, 'M', 'مخ'), - (0xFCD1, 'M', 'مم'), - (0xFCD2, 'M', 'نج'), - (0xFCD3, 'M', 'نح'), - (0xFCD4, 'M', 'نخ'), - (0xFCD5, 'M', 'نم'), - (0xFCD6, 'M', 'نه'), - (0xFCD7, 'M', 'هج'), - (0xFCD8, 'M', 'هم'), - (0xFCD9, 'M', 'هٰ'), - (0xFCDA, 'M', 'يج'), - (0xFCDB, 'M', 'يح'), - (0xFCDC, 'M', 'يخ'), - (0xFCDD, 'M', 'يم'), - (0xFCDE, 'M', 'يه'), - (0xFCDF, 'M', 'ئم'), - (0xFCE0, 'M', 'ئه'), - (0xFCE1, 'M', 'بم'), - (0xFCE2, 'M', 'به'), - (0xFCE3, 'M', 'تم'), - (0xFCE4, 'M', 'ته'), - (0xFCE5, 'M', 'ثم'), - (0xFCE6, 'M', 'ثه'), - (0xFCE7, 'M', 'سم'), - (0xFCE8, 'M', 'سه'), - (0xFCE9, 'M', 'شم'), - (0xFCEA, 'M', 'شه'), - (0xFCEB, 'M', 'كل'), - (0xFCEC, 'M', 'كم'), - (0xFCED, 'M', 'لم'), - (0xFCEE, 'M', 'نم'), - (0xFCEF, 'M', 'نه'), - (0xFCF0, 'M', 'يم'), - (0xFCF1, 'M', 'يه'), - (0xFCF2, 'M', 'ـَّ'), - (0xFCF3, 'M', 'ـُّ'), - (0xFCF4, 'M', 'ـِّ'), - (0xFCF5, 'M', 'طى'), - (0xFCF6, 'M', 'طي'), - (0xFCF7, 'M', 'عى'), - (0xFCF8, 'M', 'عي'), - (0xFCF9, 'M', 'غى'), - (0xFCFA, 'M', 'غي'), - (0xFCFB, 'M', 'سى'), - (0xFCFC, 'M', 'سي'), - (0xFCFD, 'M', 'شى'), - (0xFCFE, 'M', 'شي'), - (0xFCFF, 'M', 'حى'), - (0xFD00, 'M', 'حي'), - (0xFD01, 'M', 'جى'), - (0xFD02, 'M', 'جي'), - (0xFD03, 'M', 'خى'), - (0xFD04, 'M', 'خي'), - (0xFD05, 'M', 'صى'), - (0xFD06, 'M', 'صي'), - (0xFD07, 'M', 'ضى'), - (0xFD08, 'M', 'ضي'), - (0xFD09, 'M', 'شج'), - (0xFD0A, 'M', 'شح'), - (0xFD0B, 'M', 'شخ'), - (0xFD0C, 'M', 'شم'), - (0xFD0D, 'M', 'شر'), - (0xFD0E, 'M', 'سر'), - (0xFD0F, 'M', 'صر'), - (0xFD10, 'M', 'ضر'), - (0xFD11, 'M', 'طى'), - (0xFD12, 'M', 'طي'), - (0xFD13, 'M', 'عى'), - (0xFD14, 'M', 'عي'), - (0xFD15, 'M', 'غى'), - (0xFD16, 'M', 'غي'), - (0xFD17, 'M', 'سى'), - (0xFD18, 'M', 'سي'), - (0xFD19, 'M', 'شى'), - (0xFD1A, 'M', 'شي'), - (0xFD1B, 'M', 'حى'), - (0xFD1C, 'M', 'حي'), - (0xFD1D, 'M', 'جى'), - (0xFD1E, 'M', 'جي'), - ] - -def _seg_48() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFD1F, 'M', 'خى'), - (0xFD20, 'M', 'خي'), - (0xFD21, 'M', 'صى'), - (0xFD22, 'M', 'صي'), - (0xFD23, 'M', 'ضى'), - (0xFD24, 'M', 'ضي'), - (0xFD25, 'M', 'شج'), - (0xFD26, 'M', 'شح'), - (0xFD27, 'M', 'شخ'), - (0xFD28, 'M', 'شم'), - (0xFD29, 'M', 'شر'), - (0xFD2A, 'M', 'سر'), - (0xFD2B, 'M', 'صر'), - (0xFD2C, 'M', 'ضر'), - (0xFD2D, 'M', 'شج'), - (0xFD2E, 'M', 'شح'), - (0xFD2F, 'M', 'شخ'), - (0xFD30, 'M', 'شم'), - (0xFD31, 'M', 'سه'), - (0xFD32, 'M', 'شه'), - (0xFD33, 'M', 'طم'), - (0xFD34, 'M', 'سج'), - (0xFD35, 'M', 'سح'), - (0xFD36, 'M', 'سخ'), - (0xFD37, 'M', 'شج'), - (0xFD38, 'M', 'شح'), - (0xFD39, 'M', 'شخ'), - (0xFD3A, 'M', 'طم'), - (0xFD3B, 'M', 'ظم'), - (0xFD3C, 'M', 'اً'), - (0xFD3E, 'V'), - (0xFD50, 'M', 'تجم'), - (0xFD51, 'M', 'تحج'), - (0xFD53, 'M', 'تحم'), - (0xFD54, 'M', 'تخم'), - (0xFD55, 'M', 'تمج'), - (0xFD56, 'M', 'تمح'), - (0xFD57, 'M', 'تمخ'), - (0xFD58, 'M', 'جمح'), - (0xFD5A, 'M', 'حمي'), - (0xFD5B, 'M', 'حمى'), - (0xFD5C, 'M', 'سحج'), - (0xFD5D, 'M', 'سجح'), - (0xFD5E, 'M', 'سجى'), - (0xFD5F, 'M', 'سمح'), - (0xFD61, 'M', 'سمج'), - (0xFD62, 'M', 'سمم'), - (0xFD64, 'M', 'صحح'), - (0xFD66, 'M', 'صمم'), - (0xFD67, 'M', 'شحم'), - (0xFD69, 'M', 'شجي'), - (0xFD6A, 'M', 'شمخ'), - (0xFD6C, 'M', 'شمم'), - (0xFD6E, 'M', 'ضحى'), - (0xFD6F, 'M', 'ضخم'), - (0xFD71, 'M', 'طمح'), - (0xFD73, 'M', 'طمم'), - (0xFD74, 'M', 'طمي'), - (0xFD75, 'M', 'عجم'), - (0xFD76, 'M', 'عمم'), - (0xFD78, 'M', 'عمى'), - (0xFD79, 'M', 'غمم'), - (0xFD7A, 'M', 'غمي'), - (0xFD7B, 'M', 'غمى'), - (0xFD7C, 'M', 'فخم'), - (0xFD7E, 'M', 'قمح'), - (0xFD7F, 'M', 'قمم'), - (0xFD80, 'M', 'لحم'), - (0xFD81, 'M', 'لحي'), - (0xFD82, 'M', 'لحى'), - (0xFD83, 'M', 'لجج'), - (0xFD85, 'M', 'لخم'), - (0xFD87, 'M', 'لمح'), - (0xFD89, 'M', 'محج'), - (0xFD8A, 'M', 'محم'), - (0xFD8B, 'M', 'محي'), - (0xFD8C, 'M', 'مجح'), - (0xFD8D, 'M', 'مجم'), - (0xFD8E, 'M', 'مخج'), - (0xFD8F, 'M', 'مخم'), - (0xFD90, 'X'), - (0xFD92, 'M', 'مجخ'), - (0xFD93, 'M', 'همج'), - (0xFD94, 'M', 'همم'), - (0xFD95, 'M', 'نحم'), - (0xFD96, 'M', 'نحى'), - (0xFD97, 'M', 'نجم'), - (0xFD99, 'M', 'نجى'), - (0xFD9A, 'M', 'نمي'), - (0xFD9B, 'M', 'نمى'), - (0xFD9C, 'M', 'يمم'), - (0xFD9E, 'M', 'بخي'), - (0xFD9F, 'M', 'تجي'), - (0xFDA0, 'M', 'تجى'), - (0xFDA1, 'M', 'تخي'), - (0xFDA2, 'M', 'تخى'), - (0xFDA3, 'M', 'تمي'), - (0xFDA4, 'M', 'تمى'), - (0xFDA5, 'M', 'جمي'), - (0xFDA6, 'M', 'جحى'), - ] - -def _seg_49() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFDA7, 'M', 'جمى'), - (0xFDA8, 'M', 'سخى'), - (0xFDA9, 'M', 'صحي'), - (0xFDAA, 'M', 'شحي'), - (0xFDAB, 'M', 'ضحي'), - (0xFDAC, 'M', 'لجي'), - (0xFDAD, 'M', 'لمي'), - (0xFDAE, 'M', 'يحي'), - (0xFDAF, 'M', 'يجي'), - (0xFDB0, 'M', 'يمي'), - (0xFDB1, 'M', 'ممي'), - (0xFDB2, 'M', 'قمي'), - (0xFDB3, 'M', 'نحي'), - (0xFDB4, 'M', 'قمح'), - (0xFDB5, 'M', 'لحم'), - (0xFDB6, 'M', 'عمي'), - (0xFDB7, 'M', 'كمي'), - (0xFDB8, 'M', 'نجح'), - (0xFDB9, 'M', 'مخي'), - (0xFDBA, 'M', 'لجم'), - (0xFDBB, 'M', 'كمم'), - (0xFDBC, 'M', 'لجم'), - (0xFDBD, 'M', 'نجح'), - (0xFDBE, 'M', 'جحي'), - (0xFDBF, 'M', 'حجي'), - (0xFDC0, 'M', 'مجي'), - (0xFDC1, 'M', 'فمي'), - (0xFDC2, 'M', 'بحي'), - (0xFDC3, 'M', 'كمم'), - (0xFDC4, 'M', 'عجم'), - (0xFDC5, 'M', 'صمم'), - (0xFDC6, 'M', 'سخي'), - (0xFDC7, 'M', 'نجي'), - (0xFDC8, 'X'), - (0xFDCF, 'V'), - (0xFDD0, 'X'), - (0xFDF0, 'M', 'صلے'), - (0xFDF1, 'M', 'قلے'), - (0xFDF2, 'M', 'الله'), - (0xFDF3, 'M', 'اكبر'), - (0xFDF4, 'M', 'محمد'), - (0xFDF5, 'M', 'صلعم'), - (0xFDF6, 'M', 'رسول'), - (0xFDF7, 'M', 'عليه'), - (0xFDF8, 'M', 'وسلم'), - (0xFDF9, 'M', 'صلى'), - (0xFDFA, '3', 'صلى الله عليه وسلم'), - (0xFDFB, '3', 'جل جلاله'), - (0xFDFC, 'M', 'ریال'), - (0xFDFD, 'V'), - (0xFE00, 'I'), - (0xFE10, '3', ','), - (0xFE11, 'M', '、'), - (0xFE12, 'X'), - (0xFE13, '3', ':'), - (0xFE14, '3', ';'), - (0xFE15, '3', '!'), - (0xFE16, '3', '?'), - (0xFE17, 'M', '〖'), - (0xFE18, 'M', '〗'), - (0xFE19, 'X'), - (0xFE20, 'V'), - (0xFE30, 'X'), - (0xFE31, 'M', '—'), - (0xFE32, 'M', '–'), - (0xFE33, '3', '_'), - (0xFE35, '3', '('), - (0xFE36, '3', ')'), - (0xFE37, '3', '{'), - (0xFE38, '3', '}'), - (0xFE39, 'M', '〔'), - (0xFE3A, 'M', '〕'), - (0xFE3B, 'M', '【'), - (0xFE3C, 'M', '】'), - (0xFE3D, 'M', '《'), - (0xFE3E, 'M', '》'), - (0xFE3F, 'M', '〈'), - (0xFE40, 'M', '〉'), - (0xFE41, 'M', '「'), - (0xFE42, 'M', '」'), - (0xFE43, 'M', '『'), - (0xFE44, 'M', '』'), - (0xFE45, 'V'), - (0xFE47, '3', '['), - (0xFE48, '3', ']'), - (0xFE49, '3', ' ̅'), - (0xFE4D, '3', '_'), - (0xFE50, '3', ','), - (0xFE51, 'M', '、'), - (0xFE52, 'X'), - (0xFE54, '3', ';'), - (0xFE55, '3', ':'), - (0xFE56, '3', '?'), - (0xFE57, '3', '!'), - (0xFE58, 'M', '—'), - (0xFE59, '3', '('), - (0xFE5A, '3', ')'), - (0xFE5B, '3', '{'), - (0xFE5C, '3', '}'), - (0xFE5D, 'M', '〔'), - ] - -def _seg_50() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFE5E, 'M', '〕'), - (0xFE5F, '3', '#'), - (0xFE60, '3', '&'), - (0xFE61, '3', '*'), - (0xFE62, '3', '+'), - (0xFE63, 'M', '-'), - (0xFE64, '3', '<'), - (0xFE65, '3', '>'), - (0xFE66, '3', '='), - (0xFE67, 'X'), - (0xFE68, '3', '\\'), - (0xFE69, '3', '$'), - (0xFE6A, '3', '%'), - (0xFE6B, '3', '@'), - (0xFE6C, 'X'), - (0xFE70, '3', ' ً'), - (0xFE71, 'M', 'ـً'), - (0xFE72, '3', ' ٌ'), - (0xFE73, 'V'), - (0xFE74, '3', ' ٍ'), - (0xFE75, 'X'), - (0xFE76, '3', ' َ'), - (0xFE77, 'M', 'ـَ'), - (0xFE78, '3', ' ُ'), - (0xFE79, 'M', 'ـُ'), - (0xFE7A, '3', ' ِ'), - (0xFE7B, 'M', 'ـِ'), - (0xFE7C, '3', ' ّ'), - (0xFE7D, 'M', 'ـّ'), - (0xFE7E, '3', ' ْ'), - (0xFE7F, 'M', 'ـْ'), - (0xFE80, 'M', 'ء'), - (0xFE81, 'M', 'آ'), - (0xFE83, 'M', 'أ'), - (0xFE85, 'M', 'ؤ'), - (0xFE87, 'M', 'إ'), - (0xFE89, 'M', 'ئ'), - (0xFE8D, 'M', 'ا'), - (0xFE8F, 'M', 'ب'), - (0xFE93, 'M', 'ة'), - (0xFE95, 'M', 'ت'), - (0xFE99, 'M', 'ث'), - (0xFE9D, 'M', 'ج'), - (0xFEA1, 'M', 'ح'), - (0xFEA5, 'M', 'خ'), - (0xFEA9, 'M', 'د'), - (0xFEAB, 'M', 'ذ'), - (0xFEAD, 'M', 'ر'), - (0xFEAF, 'M', 'ز'), - (0xFEB1, 'M', 'س'), - (0xFEB5, 'M', 'ش'), - (0xFEB9, 'M', 'ص'), - (0xFEBD, 'M', 'ض'), - (0xFEC1, 'M', 'ط'), - (0xFEC5, 'M', 'ظ'), - (0xFEC9, 'M', 'ع'), - (0xFECD, 'M', 'غ'), - (0xFED1, 'M', 'ف'), - (0xFED5, 'M', 'ق'), - (0xFED9, 'M', 'ك'), - (0xFEDD, 'M', 'ل'), - (0xFEE1, 'M', 'م'), - (0xFEE5, 'M', 'ن'), - (0xFEE9, 'M', 'ه'), - (0xFEED, 'M', 'و'), - (0xFEEF, 'M', 'ى'), - (0xFEF1, 'M', 'ي'), - (0xFEF5, 'M', 'لآ'), - (0xFEF7, 'M', 'لأ'), - (0xFEF9, 'M', 'لإ'), - (0xFEFB, 'M', 'لا'), - (0xFEFD, 'X'), - (0xFEFF, 'I'), - (0xFF00, 'X'), - (0xFF01, '3', '!'), - (0xFF02, '3', '"'), - (0xFF03, '3', '#'), - (0xFF04, '3', '$'), - (0xFF05, '3', '%'), - (0xFF06, '3', '&'), - (0xFF07, '3', '\''), - (0xFF08, '3', '('), - (0xFF09, '3', ')'), - (0xFF0A, '3', '*'), - (0xFF0B, '3', '+'), - (0xFF0C, '3', ','), - (0xFF0D, 'M', '-'), - (0xFF0E, 'M', '.'), - (0xFF0F, '3', '/'), - (0xFF10, 'M', '0'), - (0xFF11, 'M', '1'), - (0xFF12, 'M', '2'), - (0xFF13, 'M', '3'), - (0xFF14, 'M', '4'), - (0xFF15, 'M', '5'), - (0xFF16, 'M', '6'), - (0xFF17, 'M', '7'), - (0xFF18, 'M', '8'), - (0xFF19, 'M', '9'), - (0xFF1A, '3', ':'), - ] - -def _seg_51() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFF1B, '3', ';'), - (0xFF1C, '3', '<'), - (0xFF1D, '3', '='), - (0xFF1E, '3', '>'), - (0xFF1F, '3', '?'), - (0xFF20, '3', '@'), - (0xFF21, 'M', 'a'), - (0xFF22, 'M', 'b'), - (0xFF23, 'M', 'c'), - (0xFF24, 'M', 'd'), - (0xFF25, 'M', 'e'), - (0xFF26, 'M', 'f'), - (0xFF27, 'M', 'g'), - (0xFF28, 'M', 'h'), - (0xFF29, 'M', 'i'), - (0xFF2A, 'M', 'j'), - (0xFF2B, 'M', 'k'), - (0xFF2C, 'M', 'l'), - (0xFF2D, 'M', 'm'), - (0xFF2E, 'M', 'n'), - (0xFF2F, 'M', 'o'), - (0xFF30, 'M', 'p'), - (0xFF31, 'M', 'q'), - (0xFF32, 'M', 'r'), - (0xFF33, 'M', 's'), - (0xFF34, 'M', 't'), - (0xFF35, 'M', 'u'), - (0xFF36, 'M', 'v'), - (0xFF37, 'M', 'w'), - (0xFF38, 'M', 'x'), - (0xFF39, 'M', 'y'), - (0xFF3A, 'M', 'z'), - (0xFF3B, '3', '['), - (0xFF3C, '3', '\\'), - (0xFF3D, '3', ']'), - (0xFF3E, '3', '^'), - (0xFF3F, '3', '_'), - (0xFF40, '3', '`'), - (0xFF41, 'M', 'a'), - (0xFF42, 'M', 'b'), - (0xFF43, 'M', 'c'), - (0xFF44, 'M', 'd'), - (0xFF45, 'M', 'e'), - (0xFF46, 'M', 'f'), - (0xFF47, 'M', 'g'), - (0xFF48, 'M', 'h'), - (0xFF49, 'M', 'i'), - (0xFF4A, 'M', 'j'), - (0xFF4B, 'M', 'k'), - (0xFF4C, 'M', 'l'), - (0xFF4D, 'M', 'm'), - (0xFF4E, 'M', 'n'), - (0xFF4F, 'M', 'o'), - (0xFF50, 'M', 'p'), - (0xFF51, 'M', 'q'), - (0xFF52, 'M', 'r'), - (0xFF53, 'M', 's'), - (0xFF54, 'M', 't'), - (0xFF55, 'M', 'u'), - (0xFF56, 'M', 'v'), - (0xFF57, 'M', 'w'), - (0xFF58, 'M', 'x'), - (0xFF59, 'M', 'y'), - (0xFF5A, 'M', 'z'), - (0xFF5B, '3', '{'), - (0xFF5C, '3', '|'), - (0xFF5D, '3', '}'), - (0xFF5E, '3', '~'), - (0xFF5F, 'M', '⦅'), - (0xFF60, 'M', '⦆'), - (0xFF61, 'M', '.'), - (0xFF62, 'M', '「'), - (0xFF63, 'M', '」'), - (0xFF64, 'M', '、'), - (0xFF65, 'M', '・'), - (0xFF66, 'M', 'ヲ'), - (0xFF67, 'M', 'ァ'), - (0xFF68, 'M', 'ィ'), - (0xFF69, 'M', 'ゥ'), - (0xFF6A, 'M', 'ェ'), - (0xFF6B, 'M', 'ォ'), - (0xFF6C, 'M', 'ャ'), - (0xFF6D, 'M', 'ュ'), - (0xFF6E, 'M', 'ョ'), - (0xFF6F, 'M', 'ッ'), - (0xFF70, 'M', 'ー'), - (0xFF71, 'M', 'ア'), - (0xFF72, 'M', 'イ'), - (0xFF73, 'M', 'ウ'), - (0xFF74, 'M', 'エ'), - (0xFF75, 'M', 'オ'), - (0xFF76, 'M', 'カ'), - (0xFF77, 'M', 'キ'), - (0xFF78, 'M', 'ク'), - (0xFF79, 'M', 'ケ'), - (0xFF7A, 'M', 'コ'), - (0xFF7B, 'M', 'サ'), - (0xFF7C, 'M', 'シ'), - (0xFF7D, 'M', 'ス'), - (0xFF7E, 'M', 'セ'), - ] - -def _seg_52() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFF7F, 'M', 'ソ'), - (0xFF80, 'M', 'タ'), - (0xFF81, 'M', 'チ'), - (0xFF82, 'M', 'ツ'), - (0xFF83, 'M', 'テ'), - (0xFF84, 'M', 'ト'), - (0xFF85, 'M', 'ナ'), - (0xFF86, 'M', 'ニ'), - (0xFF87, 'M', 'ヌ'), - (0xFF88, 'M', 'ネ'), - (0xFF89, 'M', 'ノ'), - (0xFF8A, 'M', 'ハ'), - (0xFF8B, 'M', 'ヒ'), - (0xFF8C, 'M', 'フ'), - (0xFF8D, 'M', 'ヘ'), - (0xFF8E, 'M', 'ホ'), - (0xFF8F, 'M', 'マ'), - (0xFF90, 'M', 'ミ'), - (0xFF91, 'M', 'ム'), - (0xFF92, 'M', 'メ'), - (0xFF93, 'M', 'モ'), - (0xFF94, 'M', 'ヤ'), - (0xFF95, 'M', 'ユ'), - (0xFF96, 'M', 'ヨ'), - (0xFF97, 'M', 'ラ'), - (0xFF98, 'M', 'リ'), - (0xFF99, 'M', 'ル'), - (0xFF9A, 'M', 'レ'), - (0xFF9B, 'M', 'ロ'), - (0xFF9C, 'M', 'ワ'), - (0xFF9D, 'M', 'ン'), - (0xFF9E, 'M', '゙'), - (0xFF9F, 'M', '゚'), - (0xFFA0, 'X'), - (0xFFA1, 'M', 'ᄀ'), - (0xFFA2, 'M', 'ᄁ'), - (0xFFA3, 'M', 'ᆪ'), - (0xFFA4, 'M', 'ᄂ'), - (0xFFA5, 'M', 'ᆬ'), - (0xFFA6, 'M', 'ᆭ'), - (0xFFA7, 'M', 'ᄃ'), - (0xFFA8, 'M', 'ᄄ'), - (0xFFA9, 'M', 'ᄅ'), - (0xFFAA, 'M', 'ᆰ'), - (0xFFAB, 'M', 'ᆱ'), - (0xFFAC, 'M', 'ᆲ'), - (0xFFAD, 'M', 'ᆳ'), - (0xFFAE, 'M', 'ᆴ'), - (0xFFAF, 'M', 'ᆵ'), - (0xFFB0, 'M', 'ᄚ'), - (0xFFB1, 'M', 'ᄆ'), - (0xFFB2, 'M', 'ᄇ'), - (0xFFB3, 'M', 'ᄈ'), - (0xFFB4, 'M', 'ᄡ'), - (0xFFB5, 'M', 'ᄉ'), - (0xFFB6, 'M', 'ᄊ'), - (0xFFB7, 'M', 'ᄋ'), - (0xFFB8, 'M', 'ᄌ'), - (0xFFB9, 'M', 'ᄍ'), - (0xFFBA, 'M', 'ᄎ'), - (0xFFBB, 'M', 'ᄏ'), - (0xFFBC, 'M', 'ᄐ'), - (0xFFBD, 'M', 'ᄑ'), - (0xFFBE, 'M', 'ᄒ'), - (0xFFBF, 'X'), - (0xFFC2, 'M', 'ᅡ'), - (0xFFC3, 'M', 'ᅢ'), - (0xFFC4, 'M', 'ᅣ'), - (0xFFC5, 'M', 'ᅤ'), - (0xFFC6, 'M', 'ᅥ'), - (0xFFC7, 'M', 'ᅦ'), - (0xFFC8, 'X'), - (0xFFCA, 'M', 'ᅧ'), - (0xFFCB, 'M', 'ᅨ'), - (0xFFCC, 'M', 'ᅩ'), - (0xFFCD, 'M', 'ᅪ'), - (0xFFCE, 'M', 'ᅫ'), - (0xFFCF, 'M', 'ᅬ'), - (0xFFD0, 'X'), - (0xFFD2, 'M', 'ᅭ'), - (0xFFD3, 'M', 'ᅮ'), - (0xFFD4, 'M', 'ᅯ'), - (0xFFD5, 'M', 'ᅰ'), - (0xFFD6, 'M', 'ᅱ'), - (0xFFD7, 'M', 'ᅲ'), - (0xFFD8, 'X'), - (0xFFDA, 'M', 'ᅳ'), - (0xFFDB, 'M', 'ᅴ'), - (0xFFDC, 'M', 'ᅵ'), - (0xFFDD, 'X'), - (0xFFE0, 'M', '¢'), - (0xFFE1, 'M', '£'), - (0xFFE2, 'M', '¬'), - (0xFFE3, '3', ' ̄'), - (0xFFE4, 'M', '¦'), - (0xFFE5, 'M', '¥'), - (0xFFE6, 'M', '₩'), - (0xFFE7, 'X'), - (0xFFE8, 'M', '│'), - (0xFFE9, 'M', '←'), - ] - -def _seg_53() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFFEA, 'M', '↑'), - (0xFFEB, 'M', '→'), - (0xFFEC, 'M', '↓'), - (0xFFED, 'M', '■'), - (0xFFEE, 'M', '○'), - (0xFFEF, 'X'), - (0x10000, 'V'), - (0x1000C, 'X'), - (0x1000D, 'V'), - (0x10027, 'X'), - (0x10028, 'V'), - (0x1003B, 'X'), - (0x1003C, 'V'), - (0x1003E, 'X'), - (0x1003F, 'V'), - (0x1004E, 'X'), - (0x10050, 'V'), - (0x1005E, 'X'), - (0x10080, 'V'), - (0x100FB, 'X'), - (0x10100, 'V'), - (0x10103, 'X'), - (0x10107, 'V'), - (0x10134, 'X'), - (0x10137, 'V'), - (0x1018F, 'X'), - (0x10190, 'V'), - (0x1019D, 'X'), - (0x101A0, 'V'), - (0x101A1, 'X'), - (0x101D0, 'V'), - (0x101FE, 'X'), - (0x10280, 'V'), - (0x1029D, 'X'), - (0x102A0, 'V'), - (0x102D1, 'X'), - (0x102E0, 'V'), - (0x102FC, 'X'), - (0x10300, 'V'), - (0x10324, 'X'), - (0x1032D, 'V'), - (0x1034B, 'X'), - (0x10350, 'V'), - (0x1037B, 'X'), - (0x10380, 'V'), - (0x1039E, 'X'), - (0x1039F, 'V'), - (0x103C4, 'X'), - (0x103C8, 'V'), - (0x103D6, 'X'), - (0x10400, 'M', '𐐨'), - (0x10401, 'M', '𐐩'), - (0x10402, 'M', '𐐪'), - (0x10403, 'M', '𐐫'), - (0x10404, 'M', '𐐬'), - (0x10405, 'M', '𐐭'), - (0x10406, 'M', '𐐮'), - (0x10407, 'M', '𐐯'), - (0x10408, 'M', '𐐰'), - (0x10409, 'M', '𐐱'), - (0x1040A, 'M', '𐐲'), - (0x1040B, 'M', '𐐳'), - (0x1040C, 'M', '𐐴'), - (0x1040D, 'M', '𐐵'), - (0x1040E, 'M', '𐐶'), - (0x1040F, 'M', '𐐷'), - (0x10410, 'M', '𐐸'), - (0x10411, 'M', '𐐹'), - (0x10412, 'M', '𐐺'), - (0x10413, 'M', '𐐻'), - (0x10414, 'M', '𐐼'), - (0x10415, 'M', '𐐽'), - (0x10416, 'M', '𐐾'), - (0x10417, 'M', '𐐿'), - (0x10418, 'M', '𐑀'), - (0x10419, 'M', '𐑁'), - (0x1041A, 'M', '𐑂'), - (0x1041B, 'M', '𐑃'), - (0x1041C, 'M', '𐑄'), - (0x1041D, 'M', '𐑅'), - (0x1041E, 'M', '𐑆'), - (0x1041F, 'M', '𐑇'), - (0x10420, 'M', '𐑈'), - (0x10421, 'M', '𐑉'), - (0x10422, 'M', '𐑊'), - (0x10423, 'M', '𐑋'), - (0x10424, 'M', '𐑌'), - (0x10425, 'M', '𐑍'), - (0x10426, 'M', '𐑎'), - (0x10427, 'M', '𐑏'), - (0x10428, 'V'), - (0x1049E, 'X'), - (0x104A0, 'V'), - (0x104AA, 'X'), - (0x104B0, 'M', '𐓘'), - (0x104B1, 'M', '𐓙'), - (0x104B2, 'M', '𐓚'), - (0x104B3, 'M', '𐓛'), - (0x104B4, 'M', '𐓜'), - (0x104B5, 'M', '𐓝'), - ] - -def _seg_54() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x104B6, 'M', '𐓞'), - (0x104B7, 'M', '𐓟'), - (0x104B8, 'M', '𐓠'), - (0x104B9, 'M', '𐓡'), - (0x104BA, 'M', '𐓢'), - (0x104BB, 'M', '𐓣'), - (0x104BC, 'M', '𐓤'), - (0x104BD, 'M', '𐓥'), - (0x104BE, 'M', '𐓦'), - (0x104BF, 'M', '𐓧'), - (0x104C0, 'M', '𐓨'), - (0x104C1, 'M', '𐓩'), - (0x104C2, 'M', '𐓪'), - (0x104C3, 'M', '𐓫'), - (0x104C4, 'M', '𐓬'), - (0x104C5, 'M', '𐓭'), - (0x104C6, 'M', '𐓮'), - (0x104C7, 'M', '𐓯'), - (0x104C8, 'M', '𐓰'), - (0x104C9, 'M', '𐓱'), - (0x104CA, 'M', '𐓲'), - (0x104CB, 'M', '𐓳'), - (0x104CC, 'M', '𐓴'), - (0x104CD, 'M', '𐓵'), - (0x104CE, 'M', '𐓶'), - (0x104CF, 'M', '𐓷'), - (0x104D0, 'M', '𐓸'), - (0x104D1, 'M', '𐓹'), - (0x104D2, 'M', '𐓺'), - (0x104D3, 'M', '𐓻'), - (0x104D4, 'X'), - (0x104D8, 'V'), - (0x104FC, 'X'), - (0x10500, 'V'), - (0x10528, 'X'), - (0x10530, 'V'), - (0x10564, 'X'), - (0x1056F, 'V'), - (0x10570, 'M', '𐖗'), - (0x10571, 'M', '𐖘'), - (0x10572, 'M', '𐖙'), - (0x10573, 'M', '𐖚'), - (0x10574, 'M', '𐖛'), - (0x10575, 'M', '𐖜'), - (0x10576, 'M', '𐖝'), - (0x10577, 'M', '𐖞'), - (0x10578, 'M', '𐖟'), - (0x10579, 'M', '𐖠'), - (0x1057A, 'M', '𐖡'), - (0x1057B, 'X'), - (0x1057C, 'M', '𐖣'), - (0x1057D, 'M', '𐖤'), - (0x1057E, 'M', '𐖥'), - (0x1057F, 'M', '𐖦'), - (0x10580, 'M', '𐖧'), - (0x10581, 'M', '𐖨'), - (0x10582, 'M', '𐖩'), - (0x10583, 'M', '𐖪'), - (0x10584, 'M', '𐖫'), - (0x10585, 'M', '𐖬'), - (0x10586, 'M', '𐖭'), - (0x10587, 'M', '𐖮'), - (0x10588, 'M', '𐖯'), - (0x10589, 'M', '𐖰'), - (0x1058A, 'M', '𐖱'), - (0x1058B, 'X'), - (0x1058C, 'M', '𐖳'), - (0x1058D, 'M', '𐖴'), - (0x1058E, 'M', '𐖵'), - (0x1058F, 'M', '𐖶'), - (0x10590, 'M', '𐖷'), - (0x10591, 'M', '𐖸'), - (0x10592, 'M', '𐖹'), - (0x10593, 'X'), - (0x10594, 'M', '𐖻'), - (0x10595, 'M', '𐖼'), - (0x10596, 'X'), - (0x10597, 'V'), - (0x105A2, 'X'), - (0x105A3, 'V'), - (0x105B2, 'X'), - (0x105B3, 'V'), - (0x105BA, 'X'), - (0x105BB, 'V'), - (0x105BD, 'X'), - (0x10600, 'V'), - (0x10737, 'X'), - (0x10740, 'V'), - (0x10756, 'X'), - (0x10760, 'V'), - (0x10768, 'X'), - (0x10780, 'V'), - (0x10781, 'M', 'ː'), - (0x10782, 'M', 'ˑ'), - (0x10783, 'M', 'æ'), - (0x10784, 'M', 'ʙ'), - (0x10785, 'M', 'ɓ'), - (0x10786, 'X'), - (0x10787, 'M', 'ʣ'), - (0x10788, 'M', 'ꭦ'), - ] - -def _seg_55() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x10789, 'M', 'ʥ'), - (0x1078A, 'M', 'ʤ'), - (0x1078B, 'M', 'ɖ'), - (0x1078C, 'M', 'ɗ'), - (0x1078D, 'M', 'ᶑ'), - (0x1078E, 'M', 'ɘ'), - (0x1078F, 'M', 'ɞ'), - (0x10790, 'M', 'ʩ'), - (0x10791, 'M', 'ɤ'), - (0x10792, 'M', 'ɢ'), - (0x10793, 'M', 'ɠ'), - (0x10794, 'M', 'ʛ'), - (0x10795, 'M', 'ħ'), - (0x10796, 'M', 'ʜ'), - (0x10797, 'M', 'ɧ'), - (0x10798, 'M', 'ʄ'), - (0x10799, 'M', 'ʪ'), - (0x1079A, 'M', 'ʫ'), - (0x1079B, 'M', 'ɬ'), - (0x1079C, 'M', '𝼄'), - (0x1079D, 'M', 'ꞎ'), - (0x1079E, 'M', 'ɮ'), - (0x1079F, 'M', '𝼅'), - (0x107A0, 'M', 'ʎ'), - (0x107A1, 'M', '𝼆'), - (0x107A2, 'M', 'ø'), - (0x107A3, 'M', 'ɶ'), - (0x107A4, 'M', 'ɷ'), - (0x107A5, 'M', 'q'), - (0x107A6, 'M', 'ɺ'), - (0x107A7, 'M', '𝼈'), - (0x107A8, 'M', 'ɽ'), - (0x107A9, 'M', 'ɾ'), - (0x107AA, 'M', 'ʀ'), - (0x107AB, 'M', 'ʨ'), - (0x107AC, 'M', 'ʦ'), - (0x107AD, 'M', 'ꭧ'), - (0x107AE, 'M', 'ʧ'), - (0x107AF, 'M', 'ʈ'), - (0x107B0, 'M', 'ⱱ'), - (0x107B1, 'X'), - (0x107B2, 'M', 'ʏ'), - (0x107B3, 'M', 'ʡ'), - (0x107B4, 'M', 'ʢ'), - (0x107B5, 'M', 'ʘ'), - (0x107B6, 'M', 'ǀ'), - (0x107B7, 'M', 'ǁ'), - (0x107B8, 'M', 'ǂ'), - (0x107B9, 'M', '𝼊'), - (0x107BA, 'M', '𝼞'), - (0x107BB, 'X'), - (0x10800, 'V'), - (0x10806, 'X'), - (0x10808, 'V'), - (0x10809, 'X'), - (0x1080A, 'V'), - (0x10836, 'X'), - (0x10837, 'V'), - (0x10839, 'X'), - (0x1083C, 'V'), - (0x1083D, 'X'), - (0x1083F, 'V'), - (0x10856, 'X'), - (0x10857, 'V'), - (0x1089F, 'X'), - (0x108A7, 'V'), - (0x108B0, 'X'), - (0x108E0, 'V'), - (0x108F3, 'X'), - (0x108F4, 'V'), - (0x108F6, 'X'), - (0x108FB, 'V'), - (0x1091C, 'X'), - (0x1091F, 'V'), - (0x1093A, 'X'), - (0x1093F, 'V'), - (0x10940, 'X'), - (0x10980, 'V'), - (0x109B8, 'X'), - (0x109BC, 'V'), - (0x109D0, 'X'), - (0x109D2, 'V'), - (0x10A04, 'X'), - (0x10A05, 'V'), - (0x10A07, 'X'), - (0x10A0C, 'V'), - (0x10A14, 'X'), - (0x10A15, 'V'), - (0x10A18, 'X'), - (0x10A19, 'V'), - (0x10A36, 'X'), - (0x10A38, 'V'), - (0x10A3B, 'X'), - (0x10A3F, 'V'), - (0x10A49, 'X'), - (0x10A50, 'V'), - (0x10A59, 'X'), - (0x10A60, 'V'), - (0x10AA0, 'X'), - (0x10AC0, 'V'), - ] - -def _seg_56() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x10AE7, 'X'), - (0x10AEB, 'V'), - (0x10AF7, 'X'), - (0x10B00, 'V'), - (0x10B36, 'X'), - (0x10B39, 'V'), - (0x10B56, 'X'), - (0x10B58, 'V'), - (0x10B73, 'X'), - (0x10B78, 'V'), - (0x10B92, 'X'), - (0x10B99, 'V'), - (0x10B9D, 'X'), - (0x10BA9, 'V'), - (0x10BB0, 'X'), - (0x10C00, 'V'), - (0x10C49, 'X'), - (0x10C80, 'M', '𐳀'), - (0x10C81, 'M', '𐳁'), - (0x10C82, 'M', '𐳂'), - (0x10C83, 'M', '𐳃'), - (0x10C84, 'M', '𐳄'), - (0x10C85, 'M', '𐳅'), - (0x10C86, 'M', '𐳆'), - (0x10C87, 'M', '𐳇'), - (0x10C88, 'M', '𐳈'), - (0x10C89, 'M', '𐳉'), - (0x10C8A, 'M', '𐳊'), - (0x10C8B, 'M', '𐳋'), - (0x10C8C, 'M', '𐳌'), - (0x10C8D, 'M', '𐳍'), - (0x10C8E, 'M', '𐳎'), - (0x10C8F, 'M', '𐳏'), - (0x10C90, 'M', '𐳐'), - (0x10C91, 'M', '𐳑'), - (0x10C92, 'M', '𐳒'), - (0x10C93, 'M', '𐳓'), - (0x10C94, 'M', '𐳔'), - (0x10C95, 'M', '𐳕'), - (0x10C96, 'M', '𐳖'), - (0x10C97, 'M', '𐳗'), - (0x10C98, 'M', '𐳘'), - (0x10C99, 'M', '𐳙'), - (0x10C9A, 'M', '𐳚'), - (0x10C9B, 'M', '𐳛'), - (0x10C9C, 'M', '𐳜'), - (0x10C9D, 'M', '𐳝'), - (0x10C9E, 'M', '𐳞'), - (0x10C9F, 'M', '𐳟'), - (0x10CA0, 'M', '𐳠'), - (0x10CA1, 'M', '𐳡'), - (0x10CA2, 'M', '𐳢'), - (0x10CA3, 'M', '𐳣'), - (0x10CA4, 'M', '𐳤'), - (0x10CA5, 'M', '𐳥'), - (0x10CA6, 'M', '𐳦'), - (0x10CA7, 'M', '𐳧'), - (0x10CA8, 'M', '𐳨'), - (0x10CA9, 'M', '𐳩'), - (0x10CAA, 'M', '𐳪'), - (0x10CAB, 'M', '𐳫'), - (0x10CAC, 'M', '𐳬'), - (0x10CAD, 'M', '𐳭'), - (0x10CAE, 'M', '𐳮'), - (0x10CAF, 'M', '𐳯'), - (0x10CB0, 'M', '𐳰'), - (0x10CB1, 'M', '𐳱'), - (0x10CB2, 'M', '𐳲'), - (0x10CB3, 'X'), - (0x10CC0, 'V'), - (0x10CF3, 'X'), - (0x10CFA, 'V'), - (0x10D28, 'X'), - (0x10D30, 'V'), - (0x10D3A, 'X'), - (0x10E60, 'V'), - (0x10E7F, 'X'), - (0x10E80, 'V'), - (0x10EAA, 'X'), - (0x10EAB, 'V'), - (0x10EAE, 'X'), - (0x10EB0, 'V'), - (0x10EB2, 'X'), - (0x10F00, 'V'), - (0x10F28, 'X'), - (0x10F30, 'V'), - (0x10F5A, 'X'), - (0x10F70, 'V'), - (0x10F8A, 'X'), - (0x10FB0, 'V'), - (0x10FCC, 'X'), - (0x10FE0, 'V'), - (0x10FF7, 'X'), - (0x11000, 'V'), - (0x1104E, 'X'), - (0x11052, 'V'), - (0x11076, 'X'), - (0x1107F, 'V'), - (0x110BD, 'X'), - (0x110BE, 'V'), - ] - -def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x110C3, 'X'), - (0x110D0, 'V'), - (0x110E9, 'X'), - (0x110F0, 'V'), - (0x110FA, 'X'), - (0x11100, 'V'), - (0x11135, 'X'), - (0x11136, 'V'), - (0x11148, 'X'), - (0x11150, 'V'), - (0x11177, 'X'), - (0x11180, 'V'), - (0x111E0, 'X'), - (0x111E1, 'V'), - (0x111F5, 'X'), - (0x11200, 'V'), - (0x11212, 'X'), - (0x11213, 'V'), - (0x1123F, 'X'), - (0x11280, 'V'), - (0x11287, 'X'), - (0x11288, 'V'), - (0x11289, 'X'), - (0x1128A, 'V'), - (0x1128E, 'X'), - (0x1128F, 'V'), - (0x1129E, 'X'), - (0x1129F, 'V'), - (0x112AA, 'X'), - (0x112B0, 'V'), - (0x112EB, 'X'), - (0x112F0, 'V'), - (0x112FA, 'X'), - (0x11300, 'V'), - (0x11304, 'X'), - (0x11305, 'V'), - (0x1130D, 'X'), - (0x1130F, 'V'), - (0x11311, 'X'), - (0x11313, 'V'), - (0x11329, 'X'), - (0x1132A, 'V'), - (0x11331, 'X'), - (0x11332, 'V'), - (0x11334, 'X'), - (0x11335, 'V'), - (0x1133A, 'X'), - (0x1133B, 'V'), - (0x11345, 'X'), - (0x11347, 'V'), - (0x11349, 'X'), - (0x1134B, 'V'), - (0x1134E, 'X'), - (0x11350, 'V'), - (0x11351, 'X'), - (0x11357, 'V'), - (0x11358, 'X'), - (0x1135D, 'V'), - (0x11364, 'X'), - (0x11366, 'V'), - (0x1136D, 'X'), - (0x11370, 'V'), - (0x11375, 'X'), - (0x11400, 'V'), - (0x1145C, 'X'), - (0x1145D, 'V'), - (0x11462, 'X'), - (0x11480, 'V'), - (0x114C8, 'X'), - (0x114D0, 'V'), - (0x114DA, 'X'), - (0x11580, 'V'), - (0x115B6, 'X'), - (0x115B8, 'V'), - (0x115DE, 'X'), - (0x11600, 'V'), - (0x11645, 'X'), - (0x11650, 'V'), - (0x1165A, 'X'), - (0x11660, 'V'), - (0x1166D, 'X'), - (0x11680, 'V'), - (0x116BA, 'X'), - (0x116C0, 'V'), - (0x116CA, 'X'), - (0x11700, 'V'), - (0x1171B, 'X'), - (0x1171D, 'V'), - (0x1172C, 'X'), - (0x11730, 'V'), - (0x11747, 'X'), - (0x11800, 'V'), - (0x1183C, 'X'), - (0x118A0, 'M', '𑣀'), - (0x118A1, 'M', '𑣁'), - (0x118A2, 'M', '𑣂'), - (0x118A3, 'M', '𑣃'), - (0x118A4, 'M', '𑣄'), - (0x118A5, 'M', '𑣅'), - (0x118A6, 'M', '𑣆'), - ] - -def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x118A7, 'M', '𑣇'), - (0x118A8, 'M', '𑣈'), - (0x118A9, 'M', '𑣉'), - (0x118AA, 'M', '𑣊'), - (0x118AB, 'M', '𑣋'), - (0x118AC, 'M', '𑣌'), - (0x118AD, 'M', '𑣍'), - (0x118AE, 'M', '𑣎'), - (0x118AF, 'M', '𑣏'), - (0x118B0, 'M', '𑣐'), - (0x118B1, 'M', '𑣑'), - (0x118B2, 'M', '𑣒'), - (0x118B3, 'M', '𑣓'), - (0x118B4, 'M', '𑣔'), - (0x118B5, 'M', '𑣕'), - (0x118B6, 'M', '𑣖'), - (0x118B7, 'M', '𑣗'), - (0x118B8, 'M', '𑣘'), - (0x118B9, 'M', '𑣙'), - (0x118BA, 'M', '𑣚'), - (0x118BB, 'M', '𑣛'), - (0x118BC, 'M', '𑣜'), - (0x118BD, 'M', '𑣝'), - (0x118BE, 'M', '𑣞'), - (0x118BF, 'M', '𑣟'), - (0x118C0, 'V'), - (0x118F3, 'X'), - (0x118FF, 'V'), - (0x11907, 'X'), - (0x11909, 'V'), - (0x1190A, 'X'), - (0x1190C, 'V'), - (0x11914, 'X'), - (0x11915, 'V'), - (0x11917, 'X'), - (0x11918, 'V'), - (0x11936, 'X'), - (0x11937, 'V'), - (0x11939, 'X'), - (0x1193B, 'V'), - (0x11947, 'X'), - (0x11950, 'V'), - (0x1195A, 'X'), - (0x119A0, 'V'), - (0x119A8, 'X'), - (0x119AA, 'V'), - (0x119D8, 'X'), - (0x119DA, 'V'), - (0x119E5, 'X'), - (0x11A00, 'V'), - (0x11A48, 'X'), - (0x11A50, 'V'), - (0x11AA3, 'X'), - (0x11AB0, 'V'), - (0x11AF9, 'X'), - (0x11C00, 'V'), - (0x11C09, 'X'), - (0x11C0A, 'V'), - (0x11C37, 'X'), - (0x11C38, 'V'), - (0x11C46, 'X'), - (0x11C50, 'V'), - (0x11C6D, 'X'), - (0x11C70, 'V'), - (0x11C90, 'X'), - (0x11C92, 'V'), - (0x11CA8, 'X'), - (0x11CA9, 'V'), - (0x11CB7, 'X'), - (0x11D00, 'V'), - (0x11D07, 'X'), - (0x11D08, 'V'), - (0x11D0A, 'X'), - (0x11D0B, 'V'), - (0x11D37, 'X'), - (0x11D3A, 'V'), - (0x11D3B, 'X'), - (0x11D3C, 'V'), - (0x11D3E, 'X'), - (0x11D3F, 'V'), - (0x11D48, 'X'), - (0x11D50, 'V'), - (0x11D5A, 'X'), - (0x11D60, 'V'), - (0x11D66, 'X'), - (0x11D67, 'V'), - (0x11D69, 'X'), - (0x11D6A, 'V'), - (0x11D8F, 'X'), - (0x11D90, 'V'), - (0x11D92, 'X'), - (0x11D93, 'V'), - (0x11D99, 'X'), - (0x11DA0, 'V'), - (0x11DAA, 'X'), - (0x11EE0, 'V'), - (0x11EF9, 'X'), - (0x11FB0, 'V'), - (0x11FB1, 'X'), - (0x11FC0, 'V'), - ] - -def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x11FF2, 'X'), - (0x11FFF, 'V'), - (0x1239A, 'X'), - (0x12400, 'V'), - (0x1246F, 'X'), - (0x12470, 'V'), - (0x12475, 'X'), - (0x12480, 'V'), - (0x12544, 'X'), - (0x12F90, 'V'), - (0x12FF3, 'X'), - (0x13000, 'V'), - (0x1342F, 'X'), - (0x14400, 'V'), - (0x14647, 'X'), - (0x16800, 'V'), - (0x16A39, 'X'), - (0x16A40, 'V'), - (0x16A5F, 'X'), - (0x16A60, 'V'), - (0x16A6A, 'X'), - (0x16A6E, 'V'), - (0x16ABF, 'X'), - (0x16AC0, 'V'), - (0x16ACA, 'X'), - (0x16AD0, 'V'), - (0x16AEE, 'X'), - (0x16AF0, 'V'), - (0x16AF6, 'X'), - (0x16B00, 'V'), - (0x16B46, 'X'), - (0x16B50, 'V'), - (0x16B5A, 'X'), - (0x16B5B, 'V'), - (0x16B62, 'X'), - (0x16B63, 'V'), - (0x16B78, 'X'), - (0x16B7D, 'V'), - (0x16B90, 'X'), - (0x16E40, 'M', '𖹠'), - (0x16E41, 'M', '𖹡'), - (0x16E42, 'M', '𖹢'), - (0x16E43, 'M', '𖹣'), - (0x16E44, 'M', '𖹤'), - (0x16E45, 'M', '𖹥'), - (0x16E46, 'M', '𖹦'), - (0x16E47, 'M', '𖹧'), - (0x16E48, 'M', '𖹨'), - (0x16E49, 'M', '𖹩'), - (0x16E4A, 'M', '𖹪'), - (0x16E4B, 'M', '𖹫'), - (0x16E4C, 'M', '𖹬'), - (0x16E4D, 'M', '𖹭'), - (0x16E4E, 'M', '𖹮'), - (0x16E4F, 'M', '𖹯'), - (0x16E50, 'M', '𖹰'), - (0x16E51, 'M', '𖹱'), - (0x16E52, 'M', '𖹲'), - (0x16E53, 'M', '𖹳'), - (0x16E54, 'M', '𖹴'), - (0x16E55, 'M', '𖹵'), - (0x16E56, 'M', '𖹶'), - (0x16E57, 'M', '𖹷'), - (0x16E58, 'M', '𖹸'), - (0x16E59, 'M', '𖹹'), - (0x16E5A, 'M', '𖹺'), - (0x16E5B, 'M', '𖹻'), - (0x16E5C, 'M', '𖹼'), - (0x16E5D, 'M', '𖹽'), - (0x16E5E, 'M', '𖹾'), - (0x16E5F, 'M', '𖹿'), - (0x16E60, 'V'), - (0x16E9B, 'X'), - (0x16F00, 'V'), - (0x16F4B, 'X'), - (0x16F4F, 'V'), - (0x16F88, 'X'), - (0x16F8F, 'V'), - (0x16FA0, 'X'), - (0x16FE0, 'V'), - (0x16FE5, 'X'), - (0x16FF0, 'V'), - (0x16FF2, 'X'), - (0x17000, 'V'), - (0x187F8, 'X'), - (0x18800, 'V'), - (0x18CD6, 'X'), - (0x18D00, 'V'), - (0x18D09, 'X'), - (0x1AFF0, 'V'), - (0x1AFF4, 'X'), - (0x1AFF5, 'V'), - (0x1AFFC, 'X'), - (0x1AFFD, 'V'), - (0x1AFFF, 'X'), - (0x1B000, 'V'), - (0x1B123, 'X'), - (0x1B150, 'V'), - (0x1B153, 'X'), - (0x1B164, 'V'), - ] - -def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1B168, 'X'), - (0x1B170, 'V'), - (0x1B2FC, 'X'), - (0x1BC00, 'V'), - (0x1BC6B, 'X'), - (0x1BC70, 'V'), - (0x1BC7D, 'X'), - (0x1BC80, 'V'), - (0x1BC89, 'X'), - (0x1BC90, 'V'), - (0x1BC9A, 'X'), - (0x1BC9C, 'V'), - (0x1BCA0, 'I'), - (0x1BCA4, 'X'), - (0x1CF00, 'V'), - (0x1CF2E, 'X'), - (0x1CF30, 'V'), - (0x1CF47, 'X'), - (0x1CF50, 'V'), - (0x1CFC4, 'X'), - (0x1D000, 'V'), - (0x1D0F6, 'X'), - (0x1D100, 'V'), - (0x1D127, 'X'), - (0x1D129, 'V'), - (0x1D15E, 'M', '𝅗𝅥'), - (0x1D15F, 'M', '𝅘𝅥'), - (0x1D160, 'M', '𝅘𝅥𝅮'), - (0x1D161, 'M', '𝅘𝅥𝅯'), - (0x1D162, 'M', '𝅘𝅥𝅰'), - (0x1D163, 'M', '𝅘𝅥𝅱'), - (0x1D164, 'M', '𝅘𝅥𝅲'), - (0x1D165, 'V'), - (0x1D173, 'X'), - (0x1D17B, 'V'), - (0x1D1BB, 'M', '𝆹𝅥'), - (0x1D1BC, 'M', '𝆺𝅥'), - (0x1D1BD, 'M', '𝆹𝅥𝅮'), - (0x1D1BE, 'M', '𝆺𝅥𝅮'), - (0x1D1BF, 'M', '𝆹𝅥𝅯'), - (0x1D1C0, 'M', '𝆺𝅥𝅯'), - (0x1D1C1, 'V'), - (0x1D1EB, 'X'), - (0x1D200, 'V'), - (0x1D246, 'X'), - (0x1D2E0, 'V'), - (0x1D2F4, 'X'), - (0x1D300, 'V'), - (0x1D357, 'X'), - (0x1D360, 'V'), - (0x1D379, 'X'), - (0x1D400, 'M', 'a'), - (0x1D401, 'M', 'b'), - (0x1D402, 'M', 'c'), - (0x1D403, 'M', 'd'), - (0x1D404, 'M', 'e'), - (0x1D405, 'M', 'f'), - (0x1D406, 'M', 'g'), - (0x1D407, 'M', 'h'), - (0x1D408, 'M', 'i'), - (0x1D409, 'M', 'j'), - (0x1D40A, 'M', 'k'), - (0x1D40B, 'M', 'l'), - (0x1D40C, 'M', 'm'), - (0x1D40D, 'M', 'n'), - (0x1D40E, 'M', 'o'), - (0x1D40F, 'M', 'p'), - (0x1D410, 'M', 'q'), - (0x1D411, 'M', 'r'), - (0x1D412, 'M', 's'), - (0x1D413, 'M', 't'), - (0x1D414, 'M', 'u'), - (0x1D415, 'M', 'v'), - (0x1D416, 'M', 'w'), - (0x1D417, 'M', 'x'), - (0x1D418, 'M', 'y'), - (0x1D419, 'M', 'z'), - (0x1D41A, 'M', 'a'), - (0x1D41B, 'M', 'b'), - (0x1D41C, 'M', 'c'), - (0x1D41D, 'M', 'd'), - (0x1D41E, 'M', 'e'), - (0x1D41F, 'M', 'f'), - (0x1D420, 'M', 'g'), - (0x1D421, 'M', 'h'), - (0x1D422, 'M', 'i'), - (0x1D423, 'M', 'j'), - (0x1D424, 'M', 'k'), - (0x1D425, 'M', 'l'), - (0x1D426, 'M', 'm'), - (0x1D427, 'M', 'n'), - (0x1D428, 'M', 'o'), - (0x1D429, 'M', 'p'), - (0x1D42A, 'M', 'q'), - (0x1D42B, 'M', 'r'), - (0x1D42C, 'M', 's'), - (0x1D42D, 'M', 't'), - (0x1D42E, 'M', 'u'), - (0x1D42F, 'M', 'v'), - (0x1D430, 'M', 'w'), - ] - -def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D431, 'M', 'x'), - (0x1D432, 'M', 'y'), - (0x1D433, 'M', 'z'), - (0x1D434, 'M', 'a'), - (0x1D435, 'M', 'b'), - (0x1D436, 'M', 'c'), - (0x1D437, 'M', 'd'), - (0x1D438, 'M', 'e'), - (0x1D439, 'M', 'f'), - (0x1D43A, 'M', 'g'), - (0x1D43B, 'M', 'h'), - (0x1D43C, 'M', 'i'), - (0x1D43D, 'M', 'j'), - (0x1D43E, 'M', 'k'), - (0x1D43F, 'M', 'l'), - (0x1D440, 'M', 'm'), - (0x1D441, 'M', 'n'), - (0x1D442, 'M', 'o'), - (0x1D443, 'M', 'p'), - (0x1D444, 'M', 'q'), - (0x1D445, 'M', 'r'), - (0x1D446, 'M', 's'), - (0x1D447, 'M', 't'), - (0x1D448, 'M', 'u'), - (0x1D449, 'M', 'v'), - (0x1D44A, 'M', 'w'), - (0x1D44B, 'M', 'x'), - (0x1D44C, 'M', 'y'), - (0x1D44D, 'M', 'z'), - (0x1D44E, 'M', 'a'), - (0x1D44F, 'M', 'b'), - (0x1D450, 'M', 'c'), - (0x1D451, 'M', 'd'), - (0x1D452, 'M', 'e'), - (0x1D453, 'M', 'f'), - (0x1D454, 'M', 'g'), - (0x1D455, 'X'), - (0x1D456, 'M', 'i'), - (0x1D457, 'M', 'j'), - (0x1D458, 'M', 'k'), - (0x1D459, 'M', 'l'), - (0x1D45A, 'M', 'm'), - (0x1D45B, 'M', 'n'), - (0x1D45C, 'M', 'o'), - (0x1D45D, 'M', 'p'), - (0x1D45E, 'M', 'q'), - (0x1D45F, 'M', 'r'), - (0x1D460, 'M', 's'), - (0x1D461, 'M', 't'), - (0x1D462, 'M', 'u'), - (0x1D463, 'M', 'v'), - (0x1D464, 'M', 'w'), - (0x1D465, 'M', 'x'), - (0x1D466, 'M', 'y'), - (0x1D467, 'M', 'z'), - (0x1D468, 'M', 'a'), - (0x1D469, 'M', 'b'), - (0x1D46A, 'M', 'c'), - (0x1D46B, 'M', 'd'), - (0x1D46C, 'M', 'e'), - (0x1D46D, 'M', 'f'), - (0x1D46E, 'M', 'g'), - (0x1D46F, 'M', 'h'), - (0x1D470, 'M', 'i'), - (0x1D471, 'M', 'j'), - (0x1D472, 'M', 'k'), - (0x1D473, 'M', 'l'), - (0x1D474, 'M', 'm'), - (0x1D475, 'M', 'n'), - (0x1D476, 'M', 'o'), - (0x1D477, 'M', 'p'), - (0x1D478, 'M', 'q'), - (0x1D479, 'M', 'r'), - (0x1D47A, 'M', 's'), - (0x1D47B, 'M', 't'), - (0x1D47C, 'M', 'u'), - (0x1D47D, 'M', 'v'), - (0x1D47E, 'M', 'w'), - (0x1D47F, 'M', 'x'), - (0x1D480, 'M', 'y'), - (0x1D481, 'M', 'z'), - (0x1D482, 'M', 'a'), - (0x1D483, 'M', 'b'), - (0x1D484, 'M', 'c'), - (0x1D485, 'M', 'd'), - (0x1D486, 'M', 'e'), - (0x1D487, 'M', 'f'), - (0x1D488, 'M', 'g'), - (0x1D489, 'M', 'h'), - (0x1D48A, 'M', 'i'), - (0x1D48B, 'M', 'j'), - (0x1D48C, 'M', 'k'), - (0x1D48D, 'M', 'l'), - (0x1D48E, 'M', 'm'), - (0x1D48F, 'M', 'n'), - (0x1D490, 'M', 'o'), - (0x1D491, 'M', 'p'), - (0x1D492, 'M', 'q'), - (0x1D493, 'M', 'r'), - (0x1D494, 'M', 's'), - ] - -def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D495, 'M', 't'), - (0x1D496, 'M', 'u'), - (0x1D497, 'M', 'v'), - (0x1D498, 'M', 'w'), - (0x1D499, 'M', 'x'), - (0x1D49A, 'M', 'y'), - (0x1D49B, 'M', 'z'), - (0x1D49C, 'M', 'a'), - (0x1D49D, 'X'), - (0x1D49E, 'M', 'c'), - (0x1D49F, 'M', 'd'), - (0x1D4A0, 'X'), - (0x1D4A2, 'M', 'g'), - (0x1D4A3, 'X'), - (0x1D4A5, 'M', 'j'), - (0x1D4A6, 'M', 'k'), - (0x1D4A7, 'X'), - (0x1D4A9, 'M', 'n'), - (0x1D4AA, 'M', 'o'), - (0x1D4AB, 'M', 'p'), - (0x1D4AC, 'M', 'q'), - (0x1D4AD, 'X'), - (0x1D4AE, 'M', 's'), - (0x1D4AF, 'M', 't'), - (0x1D4B0, 'M', 'u'), - (0x1D4B1, 'M', 'v'), - (0x1D4B2, 'M', 'w'), - (0x1D4B3, 'M', 'x'), - (0x1D4B4, 'M', 'y'), - (0x1D4B5, 'M', 'z'), - (0x1D4B6, 'M', 'a'), - (0x1D4B7, 'M', 'b'), - (0x1D4B8, 'M', 'c'), - (0x1D4B9, 'M', 'd'), - (0x1D4BA, 'X'), - (0x1D4BB, 'M', 'f'), - (0x1D4BC, 'X'), - (0x1D4BD, 'M', 'h'), - (0x1D4BE, 'M', 'i'), - (0x1D4BF, 'M', 'j'), - (0x1D4C0, 'M', 'k'), - (0x1D4C1, 'M', 'l'), - (0x1D4C2, 'M', 'm'), - (0x1D4C3, 'M', 'n'), - (0x1D4C4, 'X'), - (0x1D4C5, 'M', 'p'), - (0x1D4C6, 'M', 'q'), - (0x1D4C7, 'M', 'r'), - (0x1D4C8, 'M', 's'), - (0x1D4C9, 'M', 't'), - (0x1D4CA, 'M', 'u'), - (0x1D4CB, 'M', 'v'), - (0x1D4CC, 'M', 'w'), - (0x1D4CD, 'M', 'x'), - (0x1D4CE, 'M', 'y'), - (0x1D4CF, 'M', 'z'), - (0x1D4D0, 'M', 'a'), - (0x1D4D1, 'M', 'b'), - (0x1D4D2, 'M', 'c'), - (0x1D4D3, 'M', 'd'), - (0x1D4D4, 'M', 'e'), - (0x1D4D5, 'M', 'f'), - (0x1D4D6, 'M', 'g'), - (0x1D4D7, 'M', 'h'), - (0x1D4D8, 'M', 'i'), - (0x1D4D9, 'M', 'j'), - (0x1D4DA, 'M', 'k'), - (0x1D4DB, 'M', 'l'), - (0x1D4DC, 'M', 'm'), - (0x1D4DD, 'M', 'n'), - (0x1D4DE, 'M', 'o'), - (0x1D4DF, 'M', 'p'), - (0x1D4E0, 'M', 'q'), - (0x1D4E1, 'M', 'r'), - (0x1D4E2, 'M', 's'), - (0x1D4E3, 'M', 't'), - (0x1D4E4, 'M', 'u'), - (0x1D4E5, 'M', 'v'), - (0x1D4E6, 'M', 'w'), - (0x1D4E7, 'M', 'x'), - (0x1D4E8, 'M', 'y'), - (0x1D4E9, 'M', 'z'), - (0x1D4EA, 'M', 'a'), - (0x1D4EB, 'M', 'b'), - (0x1D4EC, 'M', 'c'), - (0x1D4ED, 'M', 'd'), - (0x1D4EE, 'M', 'e'), - (0x1D4EF, 'M', 'f'), - (0x1D4F0, 'M', 'g'), - (0x1D4F1, 'M', 'h'), - (0x1D4F2, 'M', 'i'), - (0x1D4F3, 'M', 'j'), - (0x1D4F4, 'M', 'k'), - (0x1D4F5, 'M', 'l'), - (0x1D4F6, 'M', 'm'), - (0x1D4F7, 'M', 'n'), - (0x1D4F8, 'M', 'o'), - (0x1D4F9, 'M', 'p'), - (0x1D4FA, 'M', 'q'), - (0x1D4FB, 'M', 'r'), - ] - -def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D4FC, 'M', 's'), - (0x1D4FD, 'M', 't'), - (0x1D4FE, 'M', 'u'), - (0x1D4FF, 'M', 'v'), - (0x1D500, 'M', 'w'), - (0x1D501, 'M', 'x'), - (0x1D502, 'M', 'y'), - (0x1D503, 'M', 'z'), - (0x1D504, 'M', 'a'), - (0x1D505, 'M', 'b'), - (0x1D506, 'X'), - (0x1D507, 'M', 'd'), - (0x1D508, 'M', 'e'), - (0x1D509, 'M', 'f'), - (0x1D50A, 'M', 'g'), - (0x1D50B, 'X'), - (0x1D50D, 'M', 'j'), - (0x1D50E, 'M', 'k'), - (0x1D50F, 'M', 'l'), - (0x1D510, 'M', 'm'), - (0x1D511, 'M', 'n'), - (0x1D512, 'M', 'o'), - (0x1D513, 'M', 'p'), - (0x1D514, 'M', 'q'), - (0x1D515, 'X'), - (0x1D516, 'M', 's'), - (0x1D517, 'M', 't'), - (0x1D518, 'M', 'u'), - (0x1D519, 'M', 'v'), - (0x1D51A, 'M', 'w'), - (0x1D51B, 'M', 'x'), - (0x1D51C, 'M', 'y'), - (0x1D51D, 'X'), - (0x1D51E, 'M', 'a'), - (0x1D51F, 'M', 'b'), - (0x1D520, 'M', 'c'), - (0x1D521, 'M', 'd'), - (0x1D522, 'M', 'e'), - (0x1D523, 'M', 'f'), - (0x1D524, 'M', 'g'), - (0x1D525, 'M', 'h'), - (0x1D526, 'M', 'i'), - (0x1D527, 'M', 'j'), - (0x1D528, 'M', 'k'), - (0x1D529, 'M', 'l'), - (0x1D52A, 'M', 'm'), - (0x1D52B, 'M', 'n'), - (0x1D52C, 'M', 'o'), - (0x1D52D, 'M', 'p'), - (0x1D52E, 'M', 'q'), - (0x1D52F, 'M', 'r'), - (0x1D530, 'M', 's'), - (0x1D531, 'M', 't'), - (0x1D532, 'M', 'u'), - (0x1D533, 'M', 'v'), - (0x1D534, 'M', 'w'), - (0x1D535, 'M', 'x'), - (0x1D536, 'M', 'y'), - (0x1D537, 'M', 'z'), - (0x1D538, 'M', 'a'), - (0x1D539, 'M', 'b'), - (0x1D53A, 'X'), - (0x1D53B, 'M', 'd'), - (0x1D53C, 'M', 'e'), - (0x1D53D, 'M', 'f'), - (0x1D53E, 'M', 'g'), - (0x1D53F, 'X'), - (0x1D540, 'M', 'i'), - (0x1D541, 'M', 'j'), - (0x1D542, 'M', 'k'), - (0x1D543, 'M', 'l'), - (0x1D544, 'M', 'm'), - (0x1D545, 'X'), - (0x1D546, 'M', 'o'), - (0x1D547, 'X'), - (0x1D54A, 'M', 's'), - (0x1D54B, 'M', 't'), - (0x1D54C, 'M', 'u'), - (0x1D54D, 'M', 'v'), - (0x1D54E, 'M', 'w'), - (0x1D54F, 'M', 'x'), - (0x1D550, 'M', 'y'), - (0x1D551, 'X'), - (0x1D552, 'M', 'a'), - (0x1D553, 'M', 'b'), - (0x1D554, 'M', 'c'), - (0x1D555, 'M', 'd'), - (0x1D556, 'M', 'e'), - (0x1D557, 'M', 'f'), - (0x1D558, 'M', 'g'), - (0x1D559, 'M', 'h'), - (0x1D55A, 'M', 'i'), - (0x1D55B, 'M', 'j'), - (0x1D55C, 'M', 'k'), - (0x1D55D, 'M', 'l'), - (0x1D55E, 'M', 'm'), - (0x1D55F, 'M', 'n'), - (0x1D560, 'M', 'o'), - (0x1D561, 'M', 'p'), - (0x1D562, 'M', 'q'), - ] - -def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D563, 'M', 'r'), - (0x1D564, 'M', 's'), - (0x1D565, 'M', 't'), - (0x1D566, 'M', 'u'), - (0x1D567, 'M', 'v'), - (0x1D568, 'M', 'w'), - (0x1D569, 'M', 'x'), - (0x1D56A, 'M', 'y'), - (0x1D56B, 'M', 'z'), - (0x1D56C, 'M', 'a'), - (0x1D56D, 'M', 'b'), - (0x1D56E, 'M', 'c'), - (0x1D56F, 'M', 'd'), - (0x1D570, 'M', 'e'), - (0x1D571, 'M', 'f'), - (0x1D572, 'M', 'g'), - (0x1D573, 'M', 'h'), - (0x1D574, 'M', 'i'), - (0x1D575, 'M', 'j'), - (0x1D576, 'M', 'k'), - (0x1D577, 'M', 'l'), - (0x1D578, 'M', 'm'), - (0x1D579, 'M', 'n'), - (0x1D57A, 'M', 'o'), - (0x1D57B, 'M', 'p'), - (0x1D57C, 'M', 'q'), - (0x1D57D, 'M', 'r'), - (0x1D57E, 'M', 's'), - (0x1D57F, 'M', 't'), - (0x1D580, 'M', 'u'), - (0x1D581, 'M', 'v'), - (0x1D582, 'M', 'w'), - (0x1D583, 'M', 'x'), - (0x1D584, 'M', 'y'), - (0x1D585, 'M', 'z'), - (0x1D586, 'M', 'a'), - (0x1D587, 'M', 'b'), - (0x1D588, 'M', 'c'), - (0x1D589, 'M', 'd'), - (0x1D58A, 'M', 'e'), - (0x1D58B, 'M', 'f'), - (0x1D58C, 'M', 'g'), - (0x1D58D, 'M', 'h'), - (0x1D58E, 'M', 'i'), - (0x1D58F, 'M', 'j'), - (0x1D590, 'M', 'k'), - (0x1D591, 'M', 'l'), - (0x1D592, 'M', 'm'), - (0x1D593, 'M', 'n'), - (0x1D594, 'M', 'o'), - (0x1D595, 'M', 'p'), - (0x1D596, 'M', 'q'), - (0x1D597, 'M', 'r'), - (0x1D598, 'M', 's'), - (0x1D599, 'M', 't'), - (0x1D59A, 'M', 'u'), - (0x1D59B, 'M', 'v'), - (0x1D59C, 'M', 'w'), - (0x1D59D, 'M', 'x'), - (0x1D59E, 'M', 'y'), - (0x1D59F, 'M', 'z'), - (0x1D5A0, 'M', 'a'), - (0x1D5A1, 'M', 'b'), - (0x1D5A2, 'M', 'c'), - (0x1D5A3, 'M', 'd'), - (0x1D5A4, 'M', 'e'), - (0x1D5A5, 'M', 'f'), - (0x1D5A6, 'M', 'g'), - (0x1D5A7, 'M', 'h'), - (0x1D5A8, 'M', 'i'), - (0x1D5A9, 'M', 'j'), - (0x1D5AA, 'M', 'k'), - (0x1D5AB, 'M', 'l'), - (0x1D5AC, 'M', 'm'), - (0x1D5AD, 'M', 'n'), - (0x1D5AE, 'M', 'o'), - (0x1D5AF, 'M', 'p'), - (0x1D5B0, 'M', 'q'), - (0x1D5B1, 'M', 'r'), - (0x1D5B2, 'M', 's'), - (0x1D5B3, 'M', 't'), - (0x1D5B4, 'M', 'u'), - (0x1D5B5, 'M', 'v'), - (0x1D5B6, 'M', 'w'), - (0x1D5B7, 'M', 'x'), - (0x1D5B8, 'M', 'y'), - (0x1D5B9, 'M', 'z'), - (0x1D5BA, 'M', 'a'), - (0x1D5BB, 'M', 'b'), - (0x1D5BC, 'M', 'c'), - (0x1D5BD, 'M', 'd'), - (0x1D5BE, 'M', 'e'), - (0x1D5BF, 'M', 'f'), - (0x1D5C0, 'M', 'g'), - (0x1D5C1, 'M', 'h'), - (0x1D5C2, 'M', 'i'), - (0x1D5C3, 'M', 'j'), - (0x1D5C4, 'M', 'k'), - (0x1D5C5, 'M', 'l'), - (0x1D5C6, 'M', 'm'), - ] - -def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D5C7, 'M', 'n'), - (0x1D5C8, 'M', 'o'), - (0x1D5C9, 'M', 'p'), - (0x1D5CA, 'M', 'q'), - (0x1D5CB, 'M', 'r'), - (0x1D5CC, 'M', 's'), - (0x1D5CD, 'M', 't'), - (0x1D5CE, 'M', 'u'), - (0x1D5CF, 'M', 'v'), - (0x1D5D0, 'M', 'w'), - (0x1D5D1, 'M', 'x'), - (0x1D5D2, 'M', 'y'), - (0x1D5D3, 'M', 'z'), - (0x1D5D4, 'M', 'a'), - (0x1D5D5, 'M', 'b'), - (0x1D5D6, 'M', 'c'), - (0x1D5D7, 'M', 'd'), - (0x1D5D8, 'M', 'e'), - (0x1D5D9, 'M', 'f'), - (0x1D5DA, 'M', 'g'), - (0x1D5DB, 'M', 'h'), - (0x1D5DC, 'M', 'i'), - (0x1D5DD, 'M', 'j'), - (0x1D5DE, 'M', 'k'), - (0x1D5DF, 'M', 'l'), - (0x1D5E0, 'M', 'm'), - (0x1D5E1, 'M', 'n'), - (0x1D5E2, 'M', 'o'), - (0x1D5E3, 'M', 'p'), - (0x1D5E4, 'M', 'q'), - (0x1D5E5, 'M', 'r'), - (0x1D5E6, 'M', 's'), - (0x1D5E7, 'M', 't'), - (0x1D5E8, 'M', 'u'), - (0x1D5E9, 'M', 'v'), - (0x1D5EA, 'M', 'w'), - (0x1D5EB, 'M', 'x'), - (0x1D5EC, 'M', 'y'), - (0x1D5ED, 'M', 'z'), - (0x1D5EE, 'M', 'a'), - (0x1D5EF, 'M', 'b'), - (0x1D5F0, 'M', 'c'), - (0x1D5F1, 'M', 'd'), - (0x1D5F2, 'M', 'e'), - (0x1D5F3, 'M', 'f'), - (0x1D5F4, 'M', 'g'), - (0x1D5F5, 'M', 'h'), - (0x1D5F6, 'M', 'i'), - (0x1D5F7, 'M', 'j'), - (0x1D5F8, 'M', 'k'), - (0x1D5F9, 'M', 'l'), - (0x1D5FA, 'M', 'm'), - (0x1D5FB, 'M', 'n'), - (0x1D5FC, 'M', 'o'), - (0x1D5FD, 'M', 'p'), - (0x1D5FE, 'M', 'q'), - (0x1D5FF, 'M', 'r'), - (0x1D600, 'M', 's'), - (0x1D601, 'M', 't'), - (0x1D602, 'M', 'u'), - (0x1D603, 'M', 'v'), - (0x1D604, 'M', 'w'), - (0x1D605, 'M', 'x'), - (0x1D606, 'M', 'y'), - (0x1D607, 'M', 'z'), - (0x1D608, 'M', 'a'), - (0x1D609, 'M', 'b'), - (0x1D60A, 'M', 'c'), - (0x1D60B, 'M', 'd'), - (0x1D60C, 'M', 'e'), - (0x1D60D, 'M', 'f'), - (0x1D60E, 'M', 'g'), - (0x1D60F, 'M', 'h'), - (0x1D610, 'M', 'i'), - (0x1D611, 'M', 'j'), - (0x1D612, 'M', 'k'), - (0x1D613, 'M', 'l'), - (0x1D614, 'M', 'm'), - (0x1D615, 'M', 'n'), - (0x1D616, 'M', 'o'), - (0x1D617, 'M', 'p'), - (0x1D618, 'M', 'q'), - (0x1D619, 'M', 'r'), - (0x1D61A, 'M', 's'), - (0x1D61B, 'M', 't'), - (0x1D61C, 'M', 'u'), - (0x1D61D, 'M', 'v'), - (0x1D61E, 'M', 'w'), - (0x1D61F, 'M', 'x'), - (0x1D620, 'M', 'y'), - (0x1D621, 'M', 'z'), - (0x1D622, 'M', 'a'), - (0x1D623, 'M', 'b'), - (0x1D624, 'M', 'c'), - (0x1D625, 'M', 'd'), - (0x1D626, 'M', 'e'), - (0x1D627, 'M', 'f'), - (0x1D628, 'M', 'g'), - (0x1D629, 'M', 'h'), - (0x1D62A, 'M', 'i'), - ] - -def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D62B, 'M', 'j'), - (0x1D62C, 'M', 'k'), - (0x1D62D, 'M', 'l'), - (0x1D62E, 'M', 'm'), - (0x1D62F, 'M', 'n'), - (0x1D630, 'M', 'o'), - (0x1D631, 'M', 'p'), - (0x1D632, 'M', 'q'), - (0x1D633, 'M', 'r'), - (0x1D634, 'M', 's'), - (0x1D635, 'M', 't'), - (0x1D636, 'M', 'u'), - (0x1D637, 'M', 'v'), - (0x1D638, 'M', 'w'), - (0x1D639, 'M', 'x'), - (0x1D63A, 'M', 'y'), - (0x1D63B, 'M', 'z'), - (0x1D63C, 'M', 'a'), - (0x1D63D, 'M', 'b'), - (0x1D63E, 'M', 'c'), - (0x1D63F, 'M', 'd'), - (0x1D640, 'M', 'e'), - (0x1D641, 'M', 'f'), - (0x1D642, 'M', 'g'), - (0x1D643, 'M', 'h'), - (0x1D644, 'M', 'i'), - (0x1D645, 'M', 'j'), - (0x1D646, 'M', 'k'), - (0x1D647, 'M', 'l'), - (0x1D648, 'M', 'm'), - (0x1D649, 'M', 'n'), - (0x1D64A, 'M', 'o'), - (0x1D64B, 'M', 'p'), - (0x1D64C, 'M', 'q'), - (0x1D64D, 'M', 'r'), - (0x1D64E, 'M', 's'), - (0x1D64F, 'M', 't'), - (0x1D650, 'M', 'u'), - (0x1D651, 'M', 'v'), - (0x1D652, 'M', 'w'), - (0x1D653, 'M', 'x'), - (0x1D654, 'M', 'y'), - (0x1D655, 'M', 'z'), - (0x1D656, 'M', 'a'), - (0x1D657, 'M', 'b'), - (0x1D658, 'M', 'c'), - (0x1D659, 'M', 'd'), - (0x1D65A, 'M', 'e'), - (0x1D65B, 'M', 'f'), - (0x1D65C, 'M', 'g'), - (0x1D65D, 'M', 'h'), - (0x1D65E, 'M', 'i'), - (0x1D65F, 'M', 'j'), - (0x1D660, 'M', 'k'), - (0x1D661, 'M', 'l'), - (0x1D662, 'M', 'm'), - (0x1D663, 'M', 'n'), - (0x1D664, 'M', 'o'), - (0x1D665, 'M', 'p'), - (0x1D666, 'M', 'q'), - (0x1D667, 'M', 'r'), - (0x1D668, 'M', 's'), - (0x1D669, 'M', 't'), - (0x1D66A, 'M', 'u'), - (0x1D66B, 'M', 'v'), - (0x1D66C, 'M', 'w'), - (0x1D66D, 'M', 'x'), - (0x1D66E, 'M', 'y'), - (0x1D66F, 'M', 'z'), - (0x1D670, 'M', 'a'), - (0x1D671, 'M', 'b'), - (0x1D672, 'M', 'c'), - (0x1D673, 'M', 'd'), - (0x1D674, 'M', 'e'), - (0x1D675, 'M', 'f'), - (0x1D676, 'M', 'g'), - (0x1D677, 'M', 'h'), - (0x1D678, 'M', 'i'), - (0x1D679, 'M', 'j'), - (0x1D67A, 'M', 'k'), - (0x1D67B, 'M', 'l'), - (0x1D67C, 'M', 'm'), - (0x1D67D, 'M', 'n'), - (0x1D67E, 'M', 'o'), - (0x1D67F, 'M', 'p'), - (0x1D680, 'M', 'q'), - (0x1D681, 'M', 'r'), - (0x1D682, 'M', 's'), - (0x1D683, 'M', 't'), - (0x1D684, 'M', 'u'), - (0x1D685, 'M', 'v'), - (0x1D686, 'M', 'w'), - (0x1D687, 'M', 'x'), - (0x1D688, 'M', 'y'), - (0x1D689, 'M', 'z'), - (0x1D68A, 'M', 'a'), - (0x1D68B, 'M', 'b'), - (0x1D68C, 'M', 'c'), - (0x1D68D, 'M', 'd'), - (0x1D68E, 'M', 'e'), - ] - -def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D68F, 'M', 'f'), - (0x1D690, 'M', 'g'), - (0x1D691, 'M', 'h'), - (0x1D692, 'M', 'i'), - (0x1D693, 'M', 'j'), - (0x1D694, 'M', 'k'), - (0x1D695, 'M', 'l'), - (0x1D696, 'M', 'm'), - (0x1D697, 'M', 'n'), - (0x1D698, 'M', 'o'), - (0x1D699, 'M', 'p'), - (0x1D69A, 'M', 'q'), - (0x1D69B, 'M', 'r'), - (0x1D69C, 'M', 's'), - (0x1D69D, 'M', 't'), - (0x1D69E, 'M', 'u'), - (0x1D69F, 'M', 'v'), - (0x1D6A0, 'M', 'w'), - (0x1D6A1, 'M', 'x'), - (0x1D6A2, 'M', 'y'), - (0x1D6A3, 'M', 'z'), - (0x1D6A4, 'M', 'ı'), - (0x1D6A5, 'M', 'ȷ'), - (0x1D6A6, 'X'), - (0x1D6A8, 'M', 'α'), - (0x1D6A9, 'M', 'β'), - (0x1D6AA, 'M', 'γ'), - (0x1D6AB, 'M', 'δ'), - (0x1D6AC, 'M', 'ε'), - (0x1D6AD, 'M', 'ζ'), - (0x1D6AE, 'M', 'η'), - (0x1D6AF, 'M', 'θ'), - (0x1D6B0, 'M', 'ι'), - (0x1D6B1, 'M', 'κ'), - (0x1D6B2, 'M', 'λ'), - (0x1D6B3, 'M', 'μ'), - (0x1D6B4, 'M', 'ν'), - (0x1D6B5, 'M', 'ξ'), - (0x1D6B6, 'M', 'ο'), - (0x1D6B7, 'M', 'π'), - (0x1D6B8, 'M', 'ρ'), - (0x1D6B9, 'M', 'θ'), - (0x1D6BA, 'M', 'σ'), - (0x1D6BB, 'M', 'τ'), - (0x1D6BC, 'M', 'υ'), - (0x1D6BD, 'M', 'φ'), - (0x1D6BE, 'M', 'χ'), - (0x1D6BF, 'M', 'ψ'), - (0x1D6C0, 'M', 'ω'), - (0x1D6C1, 'M', '∇'), - (0x1D6C2, 'M', 'α'), - (0x1D6C3, 'M', 'β'), - (0x1D6C4, 'M', 'γ'), - (0x1D6C5, 'M', 'δ'), - (0x1D6C6, 'M', 'ε'), - (0x1D6C7, 'M', 'ζ'), - (0x1D6C8, 'M', 'η'), - (0x1D6C9, 'M', 'θ'), - (0x1D6CA, 'M', 'ι'), - (0x1D6CB, 'M', 'κ'), - (0x1D6CC, 'M', 'λ'), - (0x1D6CD, 'M', 'μ'), - (0x1D6CE, 'M', 'ν'), - (0x1D6CF, 'M', 'ξ'), - (0x1D6D0, 'M', 'ο'), - (0x1D6D1, 'M', 'π'), - (0x1D6D2, 'M', 'ρ'), - (0x1D6D3, 'M', 'σ'), - (0x1D6D5, 'M', 'τ'), - (0x1D6D6, 'M', 'υ'), - (0x1D6D7, 'M', 'φ'), - (0x1D6D8, 'M', 'χ'), - (0x1D6D9, 'M', 'ψ'), - (0x1D6DA, 'M', 'ω'), - (0x1D6DB, 'M', '∂'), - (0x1D6DC, 'M', 'ε'), - (0x1D6DD, 'M', 'θ'), - (0x1D6DE, 'M', 'κ'), - (0x1D6DF, 'M', 'φ'), - (0x1D6E0, 'M', 'ρ'), - (0x1D6E1, 'M', 'π'), - (0x1D6E2, 'M', 'α'), - (0x1D6E3, 'M', 'β'), - (0x1D6E4, 'M', 'γ'), - (0x1D6E5, 'M', 'δ'), - (0x1D6E6, 'M', 'ε'), - (0x1D6E7, 'M', 'ζ'), - (0x1D6E8, 'M', 'η'), - (0x1D6E9, 'M', 'θ'), - (0x1D6EA, 'M', 'ι'), - (0x1D6EB, 'M', 'κ'), - (0x1D6EC, 'M', 'λ'), - (0x1D6ED, 'M', 'μ'), - (0x1D6EE, 'M', 'ν'), - (0x1D6EF, 'M', 'ξ'), - (0x1D6F0, 'M', 'ο'), - (0x1D6F1, 'M', 'π'), - (0x1D6F2, 'M', 'ρ'), - (0x1D6F3, 'M', 'θ'), - (0x1D6F4, 'M', 'σ'), - ] - -def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D6F5, 'M', 'τ'), - (0x1D6F6, 'M', 'υ'), - (0x1D6F7, 'M', 'φ'), - (0x1D6F8, 'M', 'χ'), - (0x1D6F9, 'M', 'ψ'), - (0x1D6FA, 'M', 'ω'), - (0x1D6FB, 'M', '∇'), - (0x1D6FC, 'M', 'α'), - (0x1D6FD, 'M', 'β'), - (0x1D6FE, 'M', 'γ'), - (0x1D6FF, 'M', 'δ'), - (0x1D700, 'M', 'ε'), - (0x1D701, 'M', 'ζ'), - (0x1D702, 'M', 'η'), - (0x1D703, 'M', 'θ'), - (0x1D704, 'M', 'ι'), - (0x1D705, 'M', 'κ'), - (0x1D706, 'M', 'λ'), - (0x1D707, 'M', 'μ'), - (0x1D708, 'M', 'ν'), - (0x1D709, 'M', 'ξ'), - (0x1D70A, 'M', 'ο'), - (0x1D70B, 'M', 'π'), - (0x1D70C, 'M', 'ρ'), - (0x1D70D, 'M', 'σ'), - (0x1D70F, 'M', 'τ'), - (0x1D710, 'M', 'υ'), - (0x1D711, 'M', 'φ'), - (0x1D712, 'M', 'χ'), - (0x1D713, 'M', 'ψ'), - (0x1D714, 'M', 'ω'), - (0x1D715, 'M', '∂'), - (0x1D716, 'M', 'ε'), - (0x1D717, 'M', 'θ'), - (0x1D718, 'M', 'κ'), - (0x1D719, 'M', 'φ'), - (0x1D71A, 'M', 'ρ'), - (0x1D71B, 'M', 'π'), - (0x1D71C, 'M', 'α'), - (0x1D71D, 'M', 'β'), - (0x1D71E, 'M', 'γ'), - (0x1D71F, 'M', 'δ'), - (0x1D720, 'M', 'ε'), - (0x1D721, 'M', 'ζ'), - (0x1D722, 'M', 'η'), - (0x1D723, 'M', 'θ'), - (0x1D724, 'M', 'ι'), - (0x1D725, 'M', 'κ'), - (0x1D726, 'M', 'λ'), - (0x1D727, 'M', 'μ'), - (0x1D728, 'M', 'ν'), - (0x1D729, 'M', 'ξ'), - (0x1D72A, 'M', 'ο'), - (0x1D72B, 'M', 'π'), - (0x1D72C, 'M', 'ρ'), - (0x1D72D, 'M', 'θ'), - (0x1D72E, 'M', 'σ'), - (0x1D72F, 'M', 'τ'), - (0x1D730, 'M', 'υ'), - (0x1D731, 'M', 'φ'), - (0x1D732, 'M', 'χ'), - (0x1D733, 'M', 'ψ'), - (0x1D734, 'M', 'ω'), - (0x1D735, 'M', '∇'), - (0x1D736, 'M', 'α'), - (0x1D737, 'M', 'β'), - (0x1D738, 'M', 'γ'), - (0x1D739, 'M', 'δ'), - (0x1D73A, 'M', 'ε'), - (0x1D73B, 'M', 'ζ'), - (0x1D73C, 'M', 'η'), - (0x1D73D, 'M', 'θ'), - (0x1D73E, 'M', 'ι'), - (0x1D73F, 'M', 'κ'), - (0x1D740, 'M', 'λ'), - (0x1D741, 'M', 'μ'), - (0x1D742, 'M', 'ν'), - (0x1D743, 'M', 'ξ'), - (0x1D744, 'M', 'ο'), - (0x1D745, 'M', 'π'), - (0x1D746, 'M', 'ρ'), - (0x1D747, 'M', 'σ'), - (0x1D749, 'M', 'τ'), - (0x1D74A, 'M', 'υ'), - (0x1D74B, 'M', 'φ'), - (0x1D74C, 'M', 'χ'), - (0x1D74D, 'M', 'ψ'), - (0x1D74E, 'M', 'ω'), - (0x1D74F, 'M', '∂'), - (0x1D750, 'M', 'ε'), - (0x1D751, 'M', 'θ'), - (0x1D752, 'M', 'κ'), - (0x1D753, 'M', 'φ'), - (0x1D754, 'M', 'ρ'), - (0x1D755, 'M', 'π'), - (0x1D756, 'M', 'α'), - (0x1D757, 'M', 'β'), - (0x1D758, 'M', 'γ'), - (0x1D759, 'M', 'δ'), - (0x1D75A, 'M', 'ε'), - ] - -def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D75B, 'M', 'ζ'), - (0x1D75C, 'M', 'η'), - (0x1D75D, 'M', 'θ'), - (0x1D75E, 'M', 'ι'), - (0x1D75F, 'M', 'κ'), - (0x1D760, 'M', 'λ'), - (0x1D761, 'M', 'μ'), - (0x1D762, 'M', 'ν'), - (0x1D763, 'M', 'ξ'), - (0x1D764, 'M', 'ο'), - (0x1D765, 'M', 'π'), - (0x1D766, 'M', 'ρ'), - (0x1D767, 'M', 'θ'), - (0x1D768, 'M', 'σ'), - (0x1D769, 'M', 'τ'), - (0x1D76A, 'M', 'υ'), - (0x1D76B, 'M', 'φ'), - (0x1D76C, 'M', 'χ'), - (0x1D76D, 'M', 'ψ'), - (0x1D76E, 'M', 'ω'), - (0x1D76F, 'M', '∇'), - (0x1D770, 'M', 'α'), - (0x1D771, 'M', 'β'), - (0x1D772, 'M', 'γ'), - (0x1D773, 'M', 'δ'), - (0x1D774, 'M', 'ε'), - (0x1D775, 'M', 'ζ'), - (0x1D776, 'M', 'η'), - (0x1D777, 'M', 'θ'), - (0x1D778, 'M', 'ι'), - (0x1D779, 'M', 'κ'), - (0x1D77A, 'M', 'λ'), - (0x1D77B, 'M', 'μ'), - (0x1D77C, 'M', 'ν'), - (0x1D77D, 'M', 'ξ'), - (0x1D77E, 'M', 'ο'), - (0x1D77F, 'M', 'π'), - (0x1D780, 'M', 'ρ'), - (0x1D781, 'M', 'σ'), - (0x1D783, 'M', 'τ'), - (0x1D784, 'M', 'υ'), - (0x1D785, 'M', 'φ'), - (0x1D786, 'M', 'χ'), - (0x1D787, 'M', 'ψ'), - (0x1D788, 'M', 'ω'), - (0x1D789, 'M', '∂'), - (0x1D78A, 'M', 'ε'), - (0x1D78B, 'M', 'θ'), - (0x1D78C, 'M', 'κ'), - (0x1D78D, 'M', 'φ'), - (0x1D78E, 'M', 'ρ'), - (0x1D78F, 'M', 'π'), - (0x1D790, 'M', 'α'), - (0x1D791, 'M', 'β'), - (0x1D792, 'M', 'γ'), - (0x1D793, 'M', 'δ'), - (0x1D794, 'M', 'ε'), - (0x1D795, 'M', 'ζ'), - (0x1D796, 'M', 'η'), - (0x1D797, 'M', 'θ'), - (0x1D798, 'M', 'ι'), - (0x1D799, 'M', 'κ'), - (0x1D79A, 'M', 'λ'), - (0x1D79B, 'M', 'μ'), - (0x1D79C, 'M', 'ν'), - (0x1D79D, 'M', 'ξ'), - (0x1D79E, 'M', 'ο'), - (0x1D79F, 'M', 'π'), - (0x1D7A0, 'M', 'ρ'), - (0x1D7A1, 'M', 'θ'), - (0x1D7A2, 'M', 'σ'), - (0x1D7A3, 'M', 'τ'), - (0x1D7A4, 'M', 'υ'), - (0x1D7A5, 'M', 'φ'), - (0x1D7A6, 'M', 'χ'), - (0x1D7A7, 'M', 'ψ'), - (0x1D7A8, 'M', 'ω'), - (0x1D7A9, 'M', '∇'), - (0x1D7AA, 'M', 'α'), - (0x1D7AB, 'M', 'β'), - (0x1D7AC, 'M', 'γ'), - (0x1D7AD, 'M', 'δ'), - (0x1D7AE, 'M', 'ε'), - (0x1D7AF, 'M', 'ζ'), - (0x1D7B0, 'M', 'η'), - (0x1D7B1, 'M', 'θ'), - (0x1D7B2, 'M', 'ι'), - (0x1D7B3, 'M', 'κ'), - (0x1D7B4, 'M', 'λ'), - (0x1D7B5, 'M', 'μ'), - (0x1D7B6, 'M', 'ν'), - (0x1D7B7, 'M', 'ξ'), - (0x1D7B8, 'M', 'ο'), - (0x1D7B9, 'M', 'π'), - (0x1D7BA, 'M', 'ρ'), - (0x1D7BB, 'M', 'σ'), - (0x1D7BD, 'M', 'τ'), - (0x1D7BE, 'M', 'υ'), - (0x1D7BF, 'M', 'φ'), - (0x1D7C0, 'M', 'χ'), - ] - -def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D7C1, 'M', 'ψ'), - (0x1D7C2, 'M', 'ω'), - (0x1D7C3, 'M', '∂'), - (0x1D7C4, 'M', 'ε'), - (0x1D7C5, 'M', 'θ'), - (0x1D7C6, 'M', 'κ'), - (0x1D7C7, 'M', 'φ'), - (0x1D7C8, 'M', 'ρ'), - (0x1D7C9, 'M', 'π'), - (0x1D7CA, 'M', 'ϝ'), - (0x1D7CC, 'X'), - (0x1D7CE, 'M', '0'), - (0x1D7CF, 'M', '1'), - (0x1D7D0, 'M', '2'), - (0x1D7D1, 'M', '3'), - (0x1D7D2, 'M', '4'), - (0x1D7D3, 'M', '5'), - (0x1D7D4, 'M', '6'), - (0x1D7D5, 'M', '7'), - (0x1D7D6, 'M', '8'), - (0x1D7D7, 'M', '9'), - (0x1D7D8, 'M', '0'), - (0x1D7D9, 'M', '1'), - (0x1D7DA, 'M', '2'), - (0x1D7DB, 'M', '3'), - (0x1D7DC, 'M', '4'), - (0x1D7DD, 'M', '5'), - (0x1D7DE, 'M', '6'), - (0x1D7DF, 'M', '7'), - (0x1D7E0, 'M', '8'), - (0x1D7E1, 'M', '9'), - (0x1D7E2, 'M', '0'), - (0x1D7E3, 'M', '1'), - (0x1D7E4, 'M', '2'), - (0x1D7E5, 'M', '3'), - (0x1D7E6, 'M', '4'), - (0x1D7E7, 'M', '5'), - (0x1D7E8, 'M', '6'), - (0x1D7E9, 'M', '7'), - (0x1D7EA, 'M', '8'), - (0x1D7EB, 'M', '9'), - (0x1D7EC, 'M', '0'), - (0x1D7ED, 'M', '1'), - (0x1D7EE, 'M', '2'), - (0x1D7EF, 'M', '3'), - (0x1D7F0, 'M', '4'), - (0x1D7F1, 'M', '5'), - (0x1D7F2, 'M', '6'), - (0x1D7F3, 'M', '7'), - (0x1D7F4, 'M', '8'), - (0x1D7F5, 'M', '9'), - (0x1D7F6, 'M', '0'), - (0x1D7F7, 'M', '1'), - (0x1D7F8, 'M', '2'), - (0x1D7F9, 'M', '3'), - (0x1D7FA, 'M', '4'), - (0x1D7FB, 'M', '5'), - (0x1D7FC, 'M', '6'), - (0x1D7FD, 'M', '7'), - (0x1D7FE, 'M', '8'), - (0x1D7FF, 'M', '9'), - (0x1D800, 'V'), - (0x1DA8C, 'X'), - (0x1DA9B, 'V'), - (0x1DAA0, 'X'), - (0x1DAA1, 'V'), - (0x1DAB0, 'X'), - (0x1DF00, 'V'), - (0x1DF1F, 'X'), - (0x1E000, 'V'), - (0x1E007, 'X'), - (0x1E008, 'V'), - (0x1E019, 'X'), - (0x1E01B, 'V'), - (0x1E022, 'X'), - (0x1E023, 'V'), - (0x1E025, 'X'), - (0x1E026, 'V'), - (0x1E02B, 'X'), - (0x1E100, 'V'), - (0x1E12D, 'X'), - (0x1E130, 'V'), - (0x1E13E, 'X'), - (0x1E140, 'V'), - (0x1E14A, 'X'), - (0x1E14E, 'V'), - (0x1E150, 'X'), - (0x1E290, 'V'), - (0x1E2AF, 'X'), - (0x1E2C0, 'V'), - (0x1E2FA, 'X'), - (0x1E2FF, 'V'), - (0x1E300, 'X'), - (0x1E7E0, 'V'), - (0x1E7E7, 'X'), - (0x1E7E8, 'V'), - (0x1E7EC, 'X'), - (0x1E7ED, 'V'), - (0x1E7EF, 'X'), - (0x1E7F0, 'V'), - ] - -def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1E7FF, 'X'), - (0x1E800, 'V'), - (0x1E8C5, 'X'), - (0x1E8C7, 'V'), - (0x1E8D7, 'X'), - (0x1E900, 'M', '𞤢'), - (0x1E901, 'M', '𞤣'), - (0x1E902, 'M', '𞤤'), - (0x1E903, 'M', '𞤥'), - (0x1E904, 'M', '𞤦'), - (0x1E905, 'M', '𞤧'), - (0x1E906, 'M', '𞤨'), - (0x1E907, 'M', '𞤩'), - (0x1E908, 'M', '𞤪'), - (0x1E909, 'M', '𞤫'), - (0x1E90A, 'M', '𞤬'), - (0x1E90B, 'M', '𞤭'), - (0x1E90C, 'M', '𞤮'), - (0x1E90D, 'M', '𞤯'), - (0x1E90E, 'M', '𞤰'), - (0x1E90F, 'M', '𞤱'), - (0x1E910, 'M', '𞤲'), - (0x1E911, 'M', '𞤳'), - (0x1E912, 'M', '𞤴'), - (0x1E913, 'M', '𞤵'), - (0x1E914, 'M', '𞤶'), - (0x1E915, 'M', '𞤷'), - (0x1E916, 'M', '𞤸'), - (0x1E917, 'M', '𞤹'), - (0x1E918, 'M', '𞤺'), - (0x1E919, 'M', '𞤻'), - (0x1E91A, 'M', '𞤼'), - (0x1E91B, 'M', '𞤽'), - (0x1E91C, 'M', '𞤾'), - (0x1E91D, 'M', '𞤿'), - (0x1E91E, 'M', '𞥀'), - (0x1E91F, 'M', '𞥁'), - (0x1E920, 'M', '𞥂'), - (0x1E921, 'M', '𞥃'), - (0x1E922, 'V'), - (0x1E94C, 'X'), - (0x1E950, 'V'), - (0x1E95A, 'X'), - (0x1E95E, 'V'), - (0x1E960, 'X'), - (0x1EC71, 'V'), - (0x1ECB5, 'X'), - (0x1ED01, 'V'), - (0x1ED3E, 'X'), - (0x1EE00, 'M', 'ا'), - (0x1EE01, 'M', 'ب'), - (0x1EE02, 'M', 'ج'), - (0x1EE03, 'M', 'د'), - (0x1EE04, 'X'), - (0x1EE05, 'M', 'و'), - (0x1EE06, 'M', 'ز'), - (0x1EE07, 'M', 'ح'), - (0x1EE08, 'M', 'ط'), - (0x1EE09, 'M', 'ي'), - (0x1EE0A, 'M', 'ك'), - (0x1EE0B, 'M', 'ل'), - (0x1EE0C, 'M', 'م'), - (0x1EE0D, 'M', 'ن'), - (0x1EE0E, 'M', 'س'), - (0x1EE0F, 'M', 'ع'), - (0x1EE10, 'M', 'ف'), - (0x1EE11, 'M', 'ص'), - (0x1EE12, 'M', 'ق'), - (0x1EE13, 'M', 'ر'), - (0x1EE14, 'M', 'ش'), - (0x1EE15, 'M', 'ت'), - (0x1EE16, 'M', 'ث'), - (0x1EE17, 'M', 'خ'), - (0x1EE18, 'M', 'ذ'), - (0x1EE19, 'M', 'ض'), - (0x1EE1A, 'M', 'ظ'), - (0x1EE1B, 'M', 'غ'), - (0x1EE1C, 'M', 'ٮ'), - (0x1EE1D, 'M', 'ں'), - (0x1EE1E, 'M', 'ڡ'), - (0x1EE1F, 'M', 'ٯ'), - (0x1EE20, 'X'), - (0x1EE21, 'M', 'ب'), - (0x1EE22, 'M', 'ج'), - (0x1EE23, 'X'), - (0x1EE24, 'M', 'ه'), - (0x1EE25, 'X'), - (0x1EE27, 'M', 'ح'), - (0x1EE28, 'X'), - (0x1EE29, 'M', 'ي'), - (0x1EE2A, 'M', 'ك'), - (0x1EE2B, 'M', 'ل'), - (0x1EE2C, 'M', 'م'), - (0x1EE2D, 'M', 'ن'), - (0x1EE2E, 'M', 'س'), - (0x1EE2F, 'M', 'ع'), - (0x1EE30, 'M', 'ف'), - (0x1EE31, 'M', 'ص'), - (0x1EE32, 'M', 'ق'), - (0x1EE33, 'X'), - ] - -def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1EE34, 'M', 'ش'), - (0x1EE35, 'M', 'ت'), - (0x1EE36, 'M', 'ث'), - (0x1EE37, 'M', 'خ'), - (0x1EE38, 'X'), - (0x1EE39, 'M', 'ض'), - (0x1EE3A, 'X'), - (0x1EE3B, 'M', 'غ'), - (0x1EE3C, 'X'), - (0x1EE42, 'M', 'ج'), - (0x1EE43, 'X'), - (0x1EE47, 'M', 'ح'), - (0x1EE48, 'X'), - (0x1EE49, 'M', 'ي'), - (0x1EE4A, 'X'), - (0x1EE4B, 'M', 'ل'), - (0x1EE4C, 'X'), - (0x1EE4D, 'M', 'ن'), - (0x1EE4E, 'M', 'س'), - (0x1EE4F, 'M', 'ع'), - (0x1EE50, 'X'), - (0x1EE51, 'M', 'ص'), - (0x1EE52, 'M', 'ق'), - (0x1EE53, 'X'), - (0x1EE54, 'M', 'ش'), - (0x1EE55, 'X'), - (0x1EE57, 'M', 'خ'), - (0x1EE58, 'X'), - (0x1EE59, 'M', 'ض'), - (0x1EE5A, 'X'), - (0x1EE5B, 'M', 'غ'), - (0x1EE5C, 'X'), - (0x1EE5D, 'M', 'ں'), - (0x1EE5E, 'X'), - (0x1EE5F, 'M', 'ٯ'), - (0x1EE60, 'X'), - (0x1EE61, 'M', 'ب'), - (0x1EE62, 'M', 'ج'), - (0x1EE63, 'X'), - (0x1EE64, 'M', 'ه'), - (0x1EE65, 'X'), - (0x1EE67, 'M', 'ح'), - (0x1EE68, 'M', 'ط'), - (0x1EE69, 'M', 'ي'), - (0x1EE6A, 'M', 'ك'), - (0x1EE6B, 'X'), - (0x1EE6C, 'M', 'م'), - (0x1EE6D, 'M', 'ن'), - (0x1EE6E, 'M', 'س'), - (0x1EE6F, 'M', 'ع'), - (0x1EE70, 'M', 'ف'), - (0x1EE71, 'M', 'ص'), - (0x1EE72, 'M', 'ق'), - (0x1EE73, 'X'), - (0x1EE74, 'M', 'ش'), - (0x1EE75, 'M', 'ت'), - (0x1EE76, 'M', 'ث'), - (0x1EE77, 'M', 'خ'), - (0x1EE78, 'X'), - (0x1EE79, 'M', 'ض'), - (0x1EE7A, 'M', 'ظ'), - (0x1EE7B, 'M', 'غ'), - (0x1EE7C, 'M', 'ٮ'), - (0x1EE7D, 'X'), - (0x1EE7E, 'M', 'ڡ'), - (0x1EE7F, 'X'), - (0x1EE80, 'M', 'ا'), - (0x1EE81, 'M', 'ب'), - (0x1EE82, 'M', 'ج'), - (0x1EE83, 'M', 'د'), - (0x1EE84, 'M', 'ه'), - (0x1EE85, 'M', 'و'), - (0x1EE86, 'M', 'ز'), - (0x1EE87, 'M', 'ح'), - (0x1EE88, 'M', 'ط'), - (0x1EE89, 'M', 'ي'), - (0x1EE8A, 'X'), - (0x1EE8B, 'M', 'ل'), - (0x1EE8C, 'M', 'م'), - (0x1EE8D, 'M', 'ن'), - (0x1EE8E, 'M', 'س'), - (0x1EE8F, 'M', 'ع'), - (0x1EE90, 'M', 'ف'), - (0x1EE91, 'M', 'ص'), - (0x1EE92, 'M', 'ق'), - (0x1EE93, 'M', 'ر'), - (0x1EE94, 'M', 'ش'), - (0x1EE95, 'M', 'ت'), - (0x1EE96, 'M', 'ث'), - (0x1EE97, 'M', 'خ'), - (0x1EE98, 'M', 'ذ'), - (0x1EE99, 'M', 'ض'), - (0x1EE9A, 'M', 'ظ'), - (0x1EE9B, 'M', 'غ'), - (0x1EE9C, 'X'), - (0x1EEA1, 'M', 'ب'), - (0x1EEA2, 'M', 'ج'), - (0x1EEA3, 'M', 'د'), - (0x1EEA4, 'X'), - (0x1EEA5, 'M', 'و'), - ] - -def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1EEA6, 'M', 'ز'), - (0x1EEA7, 'M', 'ح'), - (0x1EEA8, 'M', 'ط'), - (0x1EEA9, 'M', 'ي'), - (0x1EEAA, 'X'), - (0x1EEAB, 'M', 'ل'), - (0x1EEAC, 'M', 'م'), - (0x1EEAD, 'M', 'ن'), - (0x1EEAE, 'M', 'س'), - (0x1EEAF, 'M', 'ع'), - (0x1EEB0, 'M', 'ف'), - (0x1EEB1, 'M', 'ص'), - (0x1EEB2, 'M', 'ق'), - (0x1EEB3, 'M', 'ر'), - (0x1EEB4, 'M', 'ش'), - (0x1EEB5, 'M', 'ت'), - (0x1EEB6, 'M', 'ث'), - (0x1EEB7, 'M', 'خ'), - (0x1EEB8, 'M', 'ذ'), - (0x1EEB9, 'M', 'ض'), - (0x1EEBA, 'M', 'ظ'), - (0x1EEBB, 'M', 'غ'), - (0x1EEBC, 'X'), - (0x1EEF0, 'V'), - (0x1EEF2, 'X'), - (0x1F000, 'V'), - (0x1F02C, 'X'), - (0x1F030, 'V'), - (0x1F094, 'X'), - (0x1F0A0, 'V'), - (0x1F0AF, 'X'), - (0x1F0B1, 'V'), - (0x1F0C0, 'X'), - (0x1F0C1, 'V'), - (0x1F0D0, 'X'), - (0x1F0D1, 'V'), - (0x1F0F6, 'X'), - (0x1F101, '3', '0,'), - (0x1F102, '3', '1,'), - (0x1F103, '3', '2,'), - (0x1F104, '3', '3,'), - (0x1F105, '3', '4,'), - (0x1F106, '3', '5,'), - (0x1F107, '3', '6,'), - (0x1F108, '3', '7,'), - (0x1F109, '3', '8,'), - (0x1F10A, '3', '9,'), - (0x1F10B, 'V'), - (0x1F110, '3', '(a)'), - (0x1F111, '3', '(b)'), - (0x1F112, '3', '(c)'), - (0x1F113, '3', '(d)'), - (0x1F114, '3', '(e)'), - (0x1F115, '3', '(f)'), - (0x1F116, '3', '(g)'), - (0x1F117, '3', '(h)'), - (0x1F118, '3', '(i)'), - (0x1F119, '3', '(j)'), - (0x1F11A, '3', '(k)'), - (0x1F11B, '3', '(l)'), - (0x1F11C, '3', '(m)'), - (0x1F11D, '3', '(n)'), - (0x1F11E, '3', '(o)'), - (0x1F11F, '3', '(p)'), - (0x1F120, '3', '(q)'), - (0x1F121, '3', '(r)'), - (0x1F122, '3', '(s)'), - (0x1F123, '3', '(t)'), - (0x1F124, '3', '(u)'), - (0x1F125, '3', '(v)'), - (0x1F126, '3', '(w)'), - (0x1F127, '3', '(x)'), - (0x1F128, '3', '(y)'), - (0x1F129, '3', '(z)'), - (0x1F12A, 'M', '〔s〕'), - (0x1F12B, 'M', 'c'), - (0x1F12C, 'M', 'r'), - (0x1F12D, 'M', 'cd'), - (0x1F12E, 'M', 'wz'), - (0x1F12F, 'V'), - (0x1F130, 'M', 'a'), - (0x1F131, 'M', 'b'), - (0x1F132, 'M', 'c'), - (0x1F133, 'M', 'd'), - (0x1F134, 'M', 'e'), - (0x1F135, 'M', 'f'), - (0x1F136, 'M', 'g'), - (0x1F137, 'M', 'h'), - (0x1F138, 'M', 'i'), - (0x1F139, 'M', 'j'), - (0x1F13A, 'M', 'k'), - (0x1F13B, 'M', 'l'), - (0x1F13C, 'M', 'm'), - (0x1F13D, 'M', 'n'), - (0x1F13E, 'M', 'o'), - (0x1F13F, 'M', 'p'), - (0x1F140, 'M', 'q'), - (0x1F141, 'M', 'r'), - (0x1F142, 'M', 's'), - (0x1F143, 'M', 't'), - ] - -def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1F144, 'M', 'u'), - (0x1F145, 'M', 'v'), - (0x1F146, 'M', 'w'), - (0x1F147, 'M', 'x'), - (0x1F148, 'M', 'y'), - (0x1F149, 'M', 'z'), - (0x1F14A, 'M', 'hv'), - (0x1F14B, 'M', 'mv'), - (0x1F14C, 'M', 'sd'), - (0x1F14D, 'M', 'ss'), - (0x1F14E, 'M', 'ppv'), - (0x1F14F, 'M', 'wc'), - (0x1F150, 'V'), - (0x1F16A, 'M', 'mc'), - (0x1F16B, 'M', 'md'), - (0x1F16C, 'M', 'mr'), - (0x1F16D, 'V'), - (0x1F190, 'M', 'dj'), - (0x1F191, 'V'), - (0x1F1AE, 'X'), - (0x1F1E6, 'V'), - (0x1F200, 'M', 'ほか'), - (0x1F201, 'M', 'ココ'), - (0x1F202, 'M', 'サ'), - (0x1F203, 'X'), - (0x1F210, 'M', '手'), - (0x1F211, 'M', '字'), - (0x1F212, 'M', '双'), - (0x1F213, 'M', 'デ'), - (0x1F214, 'M', '二'), - (0x1F215, 'M', '多'), - (0x1F216, 'M', '解'), - (0x1F217, 'M', '天'), - (0x1F218, 'M', '交'), - (0x1F219, 'M', '映'), - (0x1F21A, 'M', '無'), - (0x1F21B, 'M', '料'), - (0x1F21C, 'M', '前'), - (0x1F21D, 'M', '後'), - (0x1F21E, 'M', '再'), - (0x1F21F, 'M', '新'), - (0x1F220, 'M', '初'), - (0x1F221, 'M', '終'), - (0x1F222, 'M', '生'), - (0x1F223, 'M', '販'), - (0x1F224, 'M', '声'), - (0x1F225, 'M', '吹'), - (0x1F226, 'M', '演'), - (0x1F227, 'M', '投'), - (0x1F228, 'M', '捕'), - (0x1F229, 'M', '一'), - (0x1F22A, 'M', '三'), - (0x1F22B, 'M', '遊'), - (0x1F22C, 'M', '左'), - (0x1F22D, 'M', '中'), - (0x1F22E, 'M', '右'), - (0x1F22F, 'M', '指'), - (0x1F230, 'M', '走'), - (0x1F231, 'M', '打'), - (0x1F232, 'M', '禁'), - (0x1F233, 'M', '空'), - (0x1F234, 'M', '合'), - (0x1F235, 'M', '満'), - (0x1F236, 'M', '有'), - (0x1F237, 'M', '月'), - (0x1F238, 'M', '申'), - (0x1F239, 'M', '割'), - (0x1F23A, 'M', '営'), - (0x1F23B, 'M', '配'), - (0x1F23C, 'X'), - (0x1F240, 'M', '〔本〕'), - (0x1F241, 'M', '〔三〕'), - (0x1F242, 'M', '〔二〕'), - (0x1F243, 'M', '〔安〕'), - (0x1F244, 'M', '〔点〕'), - (0x1F245, 'M', '〔打〕'), - (0x1F246, 'M', '〔盗〕'), - (0x1F247, 'M', '〔勝〕'), - (0x1F248, 'M', '〔敗〕'), - (0x1F249, 'X'), - (0x1F250, 'M', '得'), - (0x1F251, 'M', '可'), - (0x1F252, 'X'), - (0x1F260, 'V'), - (0x1F266, 'X'), - (0x1F300, 'V'), - (0x1F6D8, 'X'), - (0x1F6DD, 'V'), - (0x1F6ED, 'X'), - (0x1F6F0, 'V'), - (0x1F6FD, 'X'), - (0x1F700, 'V'), - (0x1F774, 'X'), - (0x1F780, 'V'), - (0x1F7D9, 'X'), - (0x1F7E0, 'V'), - (0x1F7EC, 'X'), - (0x1F7F0, 'V'), - (0x1F7F1, 'X'), - (0x1F800, 'V'), - ] - -def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1F80C, 'X'), - (0x1F810, 'V'), - (0x1F848, 'X'), - (0x1F850, 'V'), - (0x1F85A, 'X'), - (0x1F860, 'V'), - (0x1F888, 'X'), - (0x1F890, 'V'), - (0x1F8AE, 'X'), - (0x1F8B0, 'V'), - (0x1F8B2, 'X'), - (0x1F900, 'V'), - (0x1FA54, 'X'), - (0x1FA60, 'V'), - (0x1FA6E, 'X'), - (0x1FA70, 'V'), - (0x1FA75, 'X'), - (0x1FA78, 'V'), - (0x1FA7D, 'X'), - (0x1FA80, 'V'), - (0x1FA87, 'X'), - (0x1FA90, 'V'), - (0x1FAAD, 'X'), - (0x1FAB0, 'V'), - (0x1FABB, 'X'), - (0x1FAC0, 'V'), - (0x1FAC6, 'X'), - (0x1FAD0, 'V'), - (0x1FADA, 'X'), - (0x1FAE0, 'V'), - (0x1FAE8, 'X'), - (0x1FAF0, 'V'), - (0x1FAF7, 'X'), - (0x1FB00, 'V'), - (0x1FB93, 'X'), - (0x1FB94, 'V'), - (0x1FBCB, 'X'), - (0x1FBF0, 'M', '0'), - (0x1FBF1, 'M', '1'), - (0x1FBF2, 'M', '2'), - (0x1FBF3, 'M', '3'), - (0x1FBF4, 'M', '4'), - (0x1FBF5, 'M', '5'), - (0x1FBF6, 'M', '6'), - (0x1FBF7, 'M', '7'), - (0x1FBF8, 'M', '8'), - (0x1FBF9, 'M', '9'), - (0x1FBFA, 'X'), - (0x20000, 'V'), - (0x2A6E0, 'X'), - (0x2A700, 'V'), - (0x2B739, 'X'), - (0x2B740, 'V'), - (0x2B81E, 'X'), - (0x2B820, 'V'), - (0x2CEA2, 'X'), - (0x2CEB0, 'V'), - (0x2EBE1, 'X'), - (0x2F800, 'M', '丽'), - (0x2F801, 'M', '丸'), - (0x2F802, 'M', '乁'), - (0x2F803, 'M', '𠄢'), - (0x2F804, 'M', '你'), - (0x2F805, 'M', '侮'), - (0x2F806, 'M', '侻'), - (0x2F807, 'M', '倂'), - (0x2F808, 'M', '偺'), - (0x2F809, 'M', '備'), - (0x2F80A, 'M', '僧'), - (0x2F80B, 'M', '像'), - (0x2F80C, 'M', '㒞'), - (0x2F80D, 'M', '𠘺'), - (0x2F80E, 'M', '免'), - (0x2F80F, 'M', '兔'), - (0x2F810, 'M', '兤'), - (0x2F811, 'M', '具'), - (0x2F812, 'M', '𠔜'), - (0x2F813, 'M', '㒹'), - (0x2F814, 'M', '內'), - (0x2F815, 'M', '再'), - (0x2F816, 'M', '𠕋'), - (0x2F817, 'M', '冗'), - (0x2F818, 'M', '冤'), - (0x2F819, 'M', '仌'), - (0x2F81A, 'M', '冬'), - (0x2F81B, 'M', '况'), - (0x2F81C, 'M', '𩇟'), - (0x2F81D, 'M', '凵'), - (0x2F81E, 'M', '刃'), - (0x2F81F, 'M', '㓟'), - (0x2F820, 'M', '刻'), - (0x2F821, 'M', '剆'), - (0x2F822, 'M', '割'), - (0x2F823, 'M', '剷'), - (0x2F824, 'M', '㔕'), - (0x2F825, 'M', '勇'), - (0x2F826, 'M', '勉'), - (0x2F827, 'M', '勤'), - (0x2F828, 'M', '勺'), - (0x2F829, 'M', '包'), - ] - -def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F82A, 'M', '匆'), - (0x2F82B, 'M', '北'), - (0x2F82C, 'M', '卉'), - (0x2F82D, 'M', '卑'), - (0x2F82E, 'M', '博'), - (0x2F82F, 'M', '即'), - (0x2F830, 'M', '卽'), - (0x2F831, 'M', '卿'), - (0x2F834, 'M', '𠨬'), - (0x2F835, 'M', '灰'), - (0x2F836, 'M', '及'), - (0x2F837, 'M', '叟'), - (0x2F838, 'M', '𠭣'), - (0x2F839, 'M', '叫'), - (0x2F83A, 'M', '叱'), - (0x2F83B, 'M', '吆'), - (0x2F83C, 'M', '咞'), - (0x2F83D, 'M', '吸'), - (0x2F83E, 'M', '呈'), - (0x2F83F, 'M', '周'), - (0x2F840, 'M', '咢'), - (0x2F841, 'M', '哶'), - (0x2F842, 'M', '唐'), - (0x2F843, 'M', '啓'), - (0x2F844, 'M', '啣'), - (0x2F845, 'M', '善'), - (0x2F847, 'M', '喙'), - (0x2F848, 'M', '喫'), - (0x2F849, 'M', '喳'), - (0x2F84A, 'M', '嗂'), - (0x2F84B, 'M', '圖'), - (0x2F84C, 'M', '嘆'), - (0x2F84D, 'M', '圗'), - (0x2F84E, 'M', '噑'), - (0x2F84F, 'M', '噴'), - (0x2F850, 'M', '切'), - (0x2F851, 'M', '壮'), - (0x2F852, 'M', '城'), - (0x2F853, 'M', '埴'), - (0x2F854, 'M', '堍'), - (0x2F855, 'M', '型'), - (0x2F856, 'M', '堲'), - (0x2F857, 'M', '報'), - (0x2F858, 'M', '墬'), - (0x2F859, 'M', '𡓤'), - (0x2F85A, 'M', '売'), - (0x2F85B, 'M', '壷'), - (0x2F85C, 'M', '夆'), - (0x2F85D, 'M', '多'), - (0x2F85E, 'M', '夢'), - (0x2F85F, 'M', '奢'), - (0x2F860, 'M', '𡚨'), - (0x2F861, 'M', '𡛪'), - (0x2F862, 'M', '姬'), - (0x2F863, 'M', '娛'), - (0x2F864, 'M', '娧'), - (0x2F865, 'M', '姘'), - (0x2F866, 'M', '婦'), - (0x2F867, 'M', '㛮'), - (0x2F868, 'X'), - (0x2F869, 'M', '嬈'), - (0x2F86A, 'M', '嬾'), - (0x2F86C, 'M', '𡧈'), - (0x2F86D, 'M', '寃'), - (0x2F86E, 'M', '寘'), - (0x2F86F, 'M', '寧'), - (0x2F870, 'M', '寳'), - (0x2F871, 'M', '𡬘'), - (0x2F872, 'M', '寿'), - (0x2F873, 'M', '将'), - (0x2F874, 'X'), - (0x2F875, 'M', '尢'), - (0x2F876, 'M', '㞁'), - (0x2F877, 'M', '屠'), - (0x2F878, 'M', '屮'), - (0x2F879, 'M', '峀'), - (0x2F87A, 'M', '岍'), - (0x2F87B, 'M', '𡷤'), - (0x2F87C, 'M', '嵃'), - (0x2F87D, 'M', '𡷦'), - (0x2F87E, 'M', '嵮'), - (0x2F87F, 'M', '嵫'), - (0x2F880, 'M', '嵼'), - (0x2F881, 'M', '巡'), - (0x2F882, 'M', '巢'), - (0x2F883, 'M', '㠯'), - (0x2F884, 'M', '巽'), - (0x2F885, 'M', '帨'), - (0x2F886, 'M', '帽'), - (0x2F887, 'M', '幩'), - (0x2F888, 'M', '㡢'), - (0x2F889, 'M', '𢆃'), - (0x2F88A, 'M', '㡼'), - (0x2F88B, 'M', '庰'), - (0x2F88C, 'M', '庳'), - (0x2F88D, 'M', '庶'), - (0x2F88E, 'M', '廊'), - (0x2F88F, 'M', '𪎒'), - (0x2F890, 'M', '廾'), - (0x2F891, 'M', '𢌱'), - ] - -def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F893, 'M', '舁'), - (0x2F894, 'M', '弢'), - (0x2F896, 'M', '㣇'), - (0x2F897, 'M', '𣊸'), - (0x2F898, 'M', '𦇚'), - (0x2F899, 'M', '形'), - (0x2F89A, 'M', '彫'), - (0x2F89B, 'M', '㣣'), - (0x2F89C, 'M', '徚'), - (0x2F89D, 'M', '忍'), - (0x2F89E, 'M', '志'), - (0x2F89F, 'M', '忹'), - (0x2F8A0, 'M', '悁'), - (0x2F8A1, 'M', '㤺'), - (0x2F8A2, 'M', '㤜'), - (0x2F8A3, 'M', '悔'), - (0x2F8A4, 'M', '𢛔'), - (0x2F8A5, 'M', '惇'), - (0x2F8A6, 'M', '慈'), - (0x2F8A7, 'M', '慌'), - (0x2F8A8, 'M', '慎'), - (0x2F8A9, 'M', '慌'), - (0x2F8AA, 'M', '慺'), - (0x2F8AB, 'M', '憎'), - (0x2F8AC, 'M', '憲'), - (0x2F8AD, 'M', '憤'), - (0x2F8AE, 'M', '憯'), - (0x2F8AF, 'M', '懞'), - (0x2F8B0, 'M', '懲'), - (0x2F8B1, 'M', '懶'), - (0x2F8B2, 'M', '成'), - (0x2F8B3, 'M', '戛'), - (0x2F8B4, 'M', '扝'), - (0x2F8B5, 'M', '抱'), - (0x2F8B6, 'M', '拔'), - (0x2F8B7, 'M', '捐'), - (0x2F8B8, 'M', '𢬌'), - (0x2F8B9, 'M', '挽'), - (0x2F8BA, 'M', '拼'), - (0x2F8BB, 'M', '捨'), - (0x2F8BC, 'M', '掃'), - (0x2F8BD, 'M', '揤'), - (0x2F8BE, 'M', '𢯱'), - (0x2F8BF, 'M', '搢'), - (0x2F8C0, 'M', '揅'), - (0x2F8C1, 'M', '掩'), - (0x2F8C2, 'M', '㨮'), - (0x2F8C3, 'M', '摩'), - (0x2F8C4, 'M', '摾'), - (0x2F8C5, 'M', '撝'), - (0x2F8C6, 'M', '摷'), - (0x2F8C7, 'M', '㩬'), - (0x2F8C8, 'M', '敏'), - (0x2F8C9, 'M', '敬'), - (0x2F8CA, 'M', '𣀊'), - (0x2F8CB, 'M', '旣'), - (0x2F8CC, 'M', '書'), - (0x2F8CD, 'M', '晉'), - (0x2F8CE, 'M', '㬙'), - (0x2F8CF, 'M', '暑'), - (0x2F8D0, 'M', '㬈'), - (0x2F8D1, 'M', '㫤'), - (0x2F8D2, 'M', '冒'), - (0x2F8D3, 'M', '冕'), - (0x2F8D4, 'M', '最'), - (0x2F8D5, 'M', '暜'), - (0x2F8D6, 'M', '肭'), - (0x2F8D7, 'M', '䏙'), - (0x2F8D8, 'M', '朗'), - (0x2F8D9, 'M', '望'), - (0x2F8DA, 'M', '朡'), - (0x2F8DB, 'M', '杞'), - (0x2F8DC, 'M', '杓'), - (0x2F8DD, 'M', '𣏃'), - (0x2F8DE, 'M', '㭉'), - (0x2F8DF, 'M', '柺'), - (0x2F8E0, 'M', '枅'), - (0x2F8E1, 'M', '桒'), - (0x2F8E2, 'M', '梅'), - (0x2F8E3, 'M', '𣑭'), - (0x2F8E4, 'M', '梎'), - (0x2F8E5, 'M', '栟'), - (0x2F8E6, 'M', '椔'), - (0x2F8E7, 'M', '㮝'), - (0x2F8E8, 'M', '楂'), - (0x2F8E9, 'M', '榣'), - (0x2F8EA, 'M', '槪'), - (0x2F8EB, 'M', '檨'), - (0x2F8EC, 'M', '𣚣'), - (0x2F8ED, 'M', '櫛'), - (0x2F8EE, 'M', '㰘'), - (0x2F8EF, 'M', '次'), - (0x2F8F0, 'M', '𣢧'), - (0x2F8F1, 'M', '歔'), - (0x2F8F2, 'M', '㱎'), - (0x2F8F3, 'M', '歲'), - (0x2F8F4, 'M', '殟'), - (0x2F8F5, 'M', '殺'), - (0x2F8F6, 'M', '殻'), - (0x2F8F7, 'M', '𣪍'), - ] - -def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F8F8, 'M', '𡴋'), - (0x2F8F9, 'M', '𣫺'), - (0x2F8FA, 'M', '汎'), - (0x2F8FB, 'M', '𣲼'), - (0x2F8FC, 'M', '沿'), - (0x2F8FD, 'M', '泍'), - (0x2F8FE, 'M', '汧'), - (0x2F8FF, 'M', '洖'), - (0x2F900, 'M', '派'), - (0x2F901, 'M', '海'), - (0x2F902, 'M', '流'), - (0x2F903, 'M', '浩'), - (0x2F904, 'M', '浸'), - (0x2F905, 'M', '涅'), - (0x2F906, 'M', '𣴞'), - (0x2F907, 'M', '洴'), - (0x2F908, 'M', '港'), - (0x2F909, 'M', '湮'), - (0x2F90A, 'M', '㴳'), - (0x2F90B, 'M', '滋'), - (0x2F90C, 'M', '滇'), - (0x2F90D, 'M', '𣻑'), - (0x2F90E, 'M', '淹'), - (0x2F90F, 'M', '潮'), - (0x2F910, 'M', '𣽞'), - (0x2F911, 'M', '𣾎'), - (0x2F912, 'M', '濆'), - (0x2F913, 'M', '瀹'), - (0x2F914, 'M', '瀞'), - (0x2F915, 'M', '瀛'), - (0x2F916, 'M', '㶖'), - (0x2F917, 'M', '灊'), - (0x2F918, 'M', '災'), - (0x2F919, 'M', '灷'), - (0x2F91A, 'M', '炭'), - (0x2F91B, 'M', '𠔥'), - (0x2F91C, 'M', '煅'), - (0x2F91D, 'M', '𤉣'), - (0x2F91E, 'M', '熜'), - (0x2F91F, 'X'), - (0x2F920, 'M', '爨'), - (0x2F921, 'M', '爵'), - (0x2F922, 'M', '牐'), - (0x2F923, 'M', '𤘈'), - (0x2F924, 'M', '犀'), - (0x2F925, 'M', '犕'), - (0x2F926, 'M', '𤜵'), - (0x2F927, 'M', '𤠔'), - (0x2F928, 'M', '獺'), - (0x2F929, 'M', '王'), - (0x2F92A, 'M', '㺬'), - (0x2F92B, 'M', '玥'), - (0x2F92C, 'M', '㺸'), - (0x2F92E, 'M', '瑇'), - (0x2F92F, 'M', '瑜'), - (0x2F930, 'M', '瑱'), - (0x2F931, 'M', '璅'), - (0x2F932, 'M', '瓊'), - (0x2F933, 'M', '㼛'), - (0x2F934, 'M', '甤'), - (0x2F935, 'M', '𤰶'), - (0x2F936, 'M', '甾'), - (0x2F937, 'M', '𤲒'), - (0x2F938, 'M', '異'), - (0x2F939, 'M', '𢆟'), - (0x2F93A, 'M', '瘐'), - (0x2F93B, 'M', '𤾡'), - (0x2F93C, 'M', '𤾸'), - (0x2F93D, 'M', '𥁄'), - (0x2F93E, 'M', '㿼'), - (0x2F93F, 'M', '䀈'), - (0x2F940, 'M', '直'), - (0x2F941, 'M', '𥃳'), - (0x2F942, 'M', '𥃲'), - (0x2F943, 'M', '𥄙'), - (0x2F944, 'M', '𥄳'), - (0x2F945, 'M', '眞'), - (0x2F946, 'M', '真'), - (0x2F948, 'M', '睊'), - (0x2F949, 'M', '䀹'), - (0x2F94A, 'M', '瞋'), - (0x2F94B, 'M', '䁆'), - (0x2F94C, 'M', '䂖'), - (0x2F94D, 'M', '𥐝'), - (0x2F94E, 'M', '硎'), - (0x2F94F, 'M', '碌'), - (0x2F950, 'M', '磌'), - (0x2F951, 'M', '䃣'), - (0x2F952, 'M', '𥘦'), - (0x2F953, 'M', '祖'), - (0x2F954, 'M', '𥚚'), - (0x2F955, 'M', '𥛅'), - (0x2F956, 'M', '福'), - (0x2F957, 'M', '秫'), - (0x2F958, 'M', '䄯'), - (0x2F959, 'M', '穀'), - (0x2F95A, 'M', '穊'), - (0x2F95B, 'M', '穏'), - (0x2F95C, 'M', '𥥼'), - (0x2F95D, 'M', '𥪧'), - ] - -def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F95F, 'X'), - (0x2F960, 'M', '䈂'), - (0x2F961, 'M', '𥮫'), - (0x2F962, 'M', '篆'), - (0x2F963, 'M', '築'), - (0x2F964, 'M', '䈧'), - (0x2F965, 'M', '𥲀'), - (0x2F966, 'M', '糒'), - (0x2F967, 'M', '䊠'), - (0x2F968, 'M', '糨'), - (0x2F969, 'M', '糣'), - (0x2F96A, 'M', '紀'), - (0x2F96B, 'M', '𥾆'), - (0x2F96C, 'M', '絣'), - (0x2F96D, 'M', '䌁'), - (0x2F96E, 'M', '緇'), - (0x2F96F, 'M', '縂'), - (0x2F970, 'M', '繅'), - (0x2F971, 'M', '䌴'), - (0x2F972, 'M', '𦈨'), - (0x2F973, 'M', '𦉇'), - (0x2F974, 'M', '䍙'), - (0x2F975, 'M', '𦋙'), - (0x2F976, 'M', '罺'), - (0x2F977, 'M', '𦌾'), - (0x2F978, 'M', '羕'), - (0x2F979, 'M', '翺'), - (0x2F97A, 'M', '者'), - (0x2F97B, 'M', '𦓚'), - (0x2F97C, 'M', '𦔣'), - (0x2F97D, 'M', '聠'), - (0x2F97E, 'M', '𦖨'), - (0x2F97F, 'M', '聰'), - (0x2F980, 'M', '𣍟'), - (0x2F981, 'M', '䏕'), - (0x2F982, 'M', '育'), - (0x2F983, 'M', '脃'), - (0x2F984, 'M', '䐋'), - (0x2F985, 'M', '脾'), - (0x2F986, 'M', '媵'), - (0x2F987, 'M', '𦞧'), - (0x2F988, 'M', '𦞵'), - (0x2F989, 'M', '𣎓'), - (0x2F98A, 'M', '𣎜'), - (0x2F98B, 'M', '舁'), - (0x2F98C, 'M', '舄'), - (0x2F98D, 'M', '辞'), - (0x2F98E, 'M', '䑫'), - (0x2F98F, 'M', '芑'), - (0x2F990, 'M', '芋'), - (0x2F991, 'M', '芝'), - (0x2F992, 'M', '劳'), - (0x2F993, 'M', '花'), - (0x2F994, 'M', '芳'), - (0x2F995, 'M', '芽'), - (0x2F996, 'M', '苦'), - (0x2F997, 'M', '𦬼'), - (0x2F998, 'M', '若'), - (0x2F999, 'M', '茝'), - (0x2F99A, 'M', '荣'), - (0x2F99B, 'M', '莭'), - (0x2F99C, 'M', '茣'), - (0x2F99D, 'M', '莽'), - (0x2F99E, 'M', '菧'), - (0x2F99F, 'M', '著'), - (0x2F9A0, 'M', '荓'), - (0x2F9A1, 'M', '菊'), - (0x2F9A2, 'M', '菌'), - (0x2F9A3, 'M', '菜'), - (0x2F9A4, 'M', '𦰶'), - (0x2F9A5, 'M', '𦵫'), - (0x2F9A6, 'M', '𦳕'), - (0x2F9A7, 'M', '䔫'), - (0x2F9A8, 'M', '蓱'), - (0x2F9A9, 'M', '蓳'), - (0x2F9AA, 'M', '蔖'), - (0x2F9AB, 'M', '𧏊'), - (0x2F9AC, 'M', '蕤'), - (0x2F9AD, 'M', '𦼬'), - (0x2F9AE, 'M', '䕝'), - (0x2F9AF, 'M', '䕡'), - (0x2F9B0, 'M', '𦾱'), - (0x2F9B1, 'M', '𧃒'), - (0x2F9B2, 'M', '䕫'), - (0x2F9B3, 'M', '虐'), - (0x2F9B4, 'M', '虜'), - (0x2F9B5, 'M', '虧'), - (0x2F9B6, 'M', '虩'), - (0x2F9B7, 'M', '蚩'), - (0x2F9B8, 'M', '蚈'), - (0x2F9B9, 'M', '蜎'), - (0x2F9BA, 'M', '蛢'), - (0x2F9BB, 'M', '蝹'), - (0x2F9BC, 'M', '蜨'), - (0x2F9BD, 'M', '蝫'), - (0x2F9BE, 'M', '螆'), - (0x2F9BF, 'X'), - (0x2F9C0, 'M', '蟡'), - (0x2F9C1, 'M', '蠁'), - (0x2F9C2, 'M', '䗹'), - ] - -def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F9C3, 'M', '衠'), - (0x2F9C4, 'M', '衣'), - (0x2F9C5, 'M', '𧙧'), - (0x2F9C6, 'M', '裗'), - (0x2F9C7, 'M', '裞'), - (0x2F9C8, 'M', '䘵'), - (0x2F9C9, 'M', '裺'), - (0x2F9CA, 'M', '㒻'), - (0x2F9CB, 'M', '𧢮'), - (0x2F9CC, 'M', '𧥦'), - (0x2F9CD, 'M', '䚾'), - (0x2F9CE, 'M', '䛇'), - (0x2F9CF, 'M', '誠'), - (0x2F9D0, 'M', '諭'), - (0x2F9D1, 'M', '變'), - (0x2F9D2, 'M', '豕'), - (0x2F9D3, 'M', '𧲨'), - (0x2F9D4, 'M', '貫'), - (0x2F9D5, 'M', '賁'), - (0x2F9D6, 'M', '贛'), - (0x2F9D7, 'M', '起'), - (0x2F9D8, 'M', '𧼯'), - (0x2F9D9, 'M', '𠠄'), - (0x2F9DA, 'M', '跋'), - (0x2F9DB, 'M', '趼'), - (0x2F9DC, 'M', '跰'), - (0x2F9DD, 'M', '𠣞'), - (0x2F9DE, 'M', '軔'), - (0x2F9DF, 'M', '輸'), - (0x2F9E0, 'M', '𨗒'), - (0x2F9E1, 'M', '𨗭'), - (0x2F9E2, 'M', '邔'), - (0x2F9E3, 'M', '郱'), - (0x2F9E4, 'M', '鄑'), - (0x2F9E5, 'M', '𨜮'), - (0x2F9E6, 'M', '鄛'), - (0x2F9E7, 'M', '鈸'), - (0x2F9E8, 'M', '鋗'), - (0x2F9E9, 'M', '鋘'), - (0x2F9EA, 'M', '鉼'), - (0x2F9EB, 'M', '鏹'), - (0x2F9EC, 'M', '鐕'), - (0x2F9ED, 'M', '𨯺'), - (0x2F9EE, 'M', '開'), - (0x2F9EF, 'M', '䦕'), - (0x2F9F0, 'M', '閷'), - (0x2F9F1, 'M', '𨵷'), - (0x2F9F2, 'M', '䧦'), - (0x2F9F3, 'M', '雃'), - (0x2F9F4, 'M', '嶲'), - (0x2F9F5, 'M', '霣'), - (0x2F9F6, 'M', '𩅅'), - (0x2F9F7, 'M', '𩈚'), - (0x2F9F8, 'M', '䩮'), - (0x2F9F9, 'M', '䩶'), - (0x2F9FA, 'M', '韠'), - (0x2F9FB, 'M', '𩐊'), - (0x2F9FC, 'M', '䪲'), - (0x2F9FD, 'M', '𩒖'), - (0x2F9FE, 'M', '頋'), - (0x2FA00, 'M', '頩'), - (0x2FA01, 'M', '𩖶'), - (0x2FA02, 'M', '飢'), - (0x2FA03, 'M', '䬳'), - (0x2FA04, 'M', '餩'), - (0x2FA05, 'M', '馧'), - (0x2FA06, 'M', '駂'), - (0x2FA07, 'M', '駾'), - (0x2FA08, 'M', '䯎'), - (0x2FA09, 'M', '𩬰'), - (0x2FA0A, 'M', '鬒'), - (0x2FA0B, 'M', '鱀'), - (0x2FA0C, 'M', '鳽'), - (0x2FA0D, 'M', '䳎'), - (0x2FA0E, 'M', '䳭'), - (0x2FA0F, 'M', '鵧'), - (0x2FA10, 'M', '𪃎'), - (0x2FA11, 'M', '䳸'), - (0x2FA12, 'M', '𪄅'), - (0x2FA13, 'M', '𪈎'), - (0x2FA14, 'M', '𪊑'), - (0x2FA15, 'M', '麻'), - (0x2FA16, 'M', '䵖'), - (0x2FA17, 'M', '黹'), - (0x2FA18, 'M', '黾'), - (0x2FA19, 'M', '鼅'), - (0x2FA1A, 'M', '鼏'), - (0x2FA1B, 'M', '鼖'), - (0x2FA1C, 'M', '鼻'), - (0x2FA1D, 'M', '𪘀'), - (0x2FA1E, 'X'), - (0x30000, 'V'), - (0x3134B, 'X'), - (0xE0100, 'I'), - (0xE01F0, 'X'), - ] - -uts46data = tuple( - _seg_0() - + _seg_1() - + _seg_2() - + _seg_3() - + _seg_4() - + _seg_5() - + _seg_6() - + _seg_7() - + _seg_8() - + _seg_9() - + _seg_10() - + _seg_11() - + _seg_12() - + _seg_13() - + _seg_14() - + _seg_15() - + _seg_16() - + _seg_17() - + _seg_18() - + _seg_19() - + _seg_20() - + _seg_21() - + _seg_22() - + _seg_23() - + _seg_24() - + _seg_25() - + _seg_26() - + _seg_27() - + _seg_28() - + _seg_29() - + _seg_30() - + _seg_31() - + _seg_32() - + _seg_33() - + _seg_34() - + _seg_35() - + _seg_36() - + _seg_37() - + _seg_38() - + _seg_39() - + _seg_40() - + _seg_41() - + _seg_42() - + _seg_43() - + _seg_44() - + _seg_45() - + _seg_46() - + _seg_47() - + _seg_48() - + _seg_49() - + _seg_50() - + _seg_51() - + _seg_52() - + _seg_53() - + _seg_54() - + _seg_55() - + _seg_56() - + _seg_57() - + _seg_58() - + _seg_59() - + _seg_60() - + _seg_61() - + _seg_62() - + _seg_63() - + _seg_64() - + _seg_65() - + _seg_66() - + _seg_67() - + _seg_68() - + _seg_69() - + _seg_70() - + _seg_71() - + _seg_72() - + _seg_73() - + _seg_74() - + _seg_75() - + _seg_76() - + _seg_77() - + _seg_78() - + _seg_79() - + _seg_80() -) # type: Tuple[Union[Tuple[int, str], Tuple[int, str, str]], ...] diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/jupyter.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/jupyter.py deleted file mode 100644 index 22f4d716ac9764ee18005b9b852946d614152375..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/jupyter.py +++ /dev/null @@ -1,101 +0,0 @@ -from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Sequence - -if TYPE_CHECKING: - from pip._vendor.rich.console import ConsoleRenderable - -from . import get_console -from .segment import Segment -from .terminal_theme import DEFAULT_TERMINAL_THEME - -if TYPE_CHECKING: - from pip._vendor.rich.console import ConsoleRenderable - -JUPYTER_HTML_FORMAT = """\ -
        {code}
        -""" - - -class JupyterRenderable: - """A shim to write html to Jupyter notebook.""" - - def __init__(self, html: str, text: str) -> None: - self.html = html - self.text = text - - def _repr_mimebundle_( - self, include: Sequence[str], exclude: Sequence[str], **kwargs: Any - ) -> Dict[str, str]: - data = {"text/plain": self.text, "text/html": self.html} - if include: - data = {k: v for (k, v) in data.items() if k in include} - if exclude: - data = {k: v for (k, v) in data.items() if k not in exclude} - return data - - -class JupyterMixin: - """Add to an Rich renderable to make it render in Jupyter notebook.""" - - __slots__ = () - - def _repr_mimebundle_( - self: "ConsoleRenderable", - include: Sequence[str], - exclude: Sequence[str], - **kwargs: Any, - ) -> Dict[str, str]: - console = get_console() - segments = list(console.render(self, console.options)) - html = _render_segments(segments) - text = console._render_buffer(segments) - data = {"text/plain": text, "text/html": html} - if include: - data = {k: v for (k, v) in data.items() if k in include} - if exclude: - data = {k: v for (k, v) in data.items() if k not in exclude} - return data - - -def _render_segments(segments: Iterable[Segment]) -> str: - def escape(text: str) -> str: - """Escape html.""" - return text.replace("&", "&").replace("<", "<").replace(">", ">") - - fragments: List[str] = [] - append_fragment = fragments.append - theme = DEFAULT_TERMINAL_THEME - for text, style, control in Segment.simplify(segments): - if control: - continue - text = escape(text) - if style: - rule = style.get_html_style(theme) - text = f'{text}' if rule else text - if style.link: - text = f'{text}' - append_fragment(text) - - code = "".join(fragments) - html = JUPYTER_HTML_FORMAT.format(code=code) - - return html - - -def display(segments: Iterable[Segment], text: str) -> None: - """Render segments to Jupyter.""" - html = _render_segments(segments) - jupyter_renderable = JupyterRenderable(html, text) - try: - from IPython.display import display as ipython_display - - ipython_display(jupyter_renderable) - except ModuleNotFoundError: - # Handle the case where the Console has force_jupyter=True, - # but IPython is not installed. - pass - - -def print(*args: Any, **kwargs: Any) -> None: - """Proxy for Console print.""" - console = get_console() - return console.print(*args, **kwargs) diff --git a/spaces/tracinginsights/api/accelerations.py b/spaces/tracinginsights/api/accelerations.py deleted file mode 100644 index ca03f648d59506a20f92dc1f238eeca84ef9012e..0000000000000000000000000000000000000000 --- a/spaces/tracinginsights/api/accelerations.py +++ /dev/null @@ -1,151 +0,0 @@ -import math - -import numpy as np - - -def smooth_derivative(t_in, v_in): - # - # Function to compute a smooth estimation of a derivative. - # [REF: http://holoborodko.com/pavel/numerical-methods/numerical-derivative/smooth-low-noise-differentiators/] - # - - # Configuration - # - # Derivative method: two options: 'smooth' or 'centered'. Smooth is more conservative - # but helps to supress the very noisy signals. 'centered' is more agressive but more noisy - method = "smooth" - - t = t_in.copy() - v = v_in.copy() - - # (0) Prepare inputs - - # (0.1) Time needs to be transformed to seconds - try: - for i in range(0, t.size): - t.iloc[i] = t.iloc[i].total_seconds() - except: - pass - - t = np.array(t) - v = np.array(v) - - # (0.1) Assert they have the same size - assert t.size == v.size - - # (0.2) Initialize output - dvdt = np.zeros(t.size) - - # (1) Manually compute points out of the stencil - - # (1.1) First point - dvdt[0] = (v[1] - v[0]) / (t[1] - t[0]) - - # (1.2) Second point - dvdt[1] = (v[2] - v[0]) / (t[2] - t[0]) - - # (1.3) Third point - dvdt[2] = (v[3] - v[1]) / (t[3] - t[1]) - - # (1.4) Last points - n = t.size - dvdt[n - 1] = (v[n - 1] - v[n - 2]) / (t[n - 1] - t[n - 2]) - dvdt[n - 2] = (v[n - 1] - v[n - 3]) / (t[n - 1] - t[n - 3]) - dvdt[n - 3] = (v[n - 2] - v[n - 4]) / (t[n - 2] - t[n - 4]) - - # (2) Compute the rest of the points - if method == "smooth": - c = [5.0 / 32.0, 4.0 / 32.0, 1.0 / 32.0] - for i in range(3, t.size - 3): - for j in range(1, 4): - if (t[i + j] - t[i - j]) == 0: - dvdt[i] += 0 - else: - dvdt[i] += ( - 2 * j * c[j - 1] * (v[i + j] - v[i - j]) / (t[i + j] - t[i - j]) - ) - elif method == "centered": - for i in range(3, t.size - 2): - for j in range(1, 4): - if (t[i + j] - t[i - j]) == 0: - dvdt[i] += 0 - else: - - dvdt[i] = (v[i + 1] - v[i - 1]) / (t[i + 1] - t[i - 1]) - - return dvdt - - -def truncated_remainder(dividend, divisor): - divided_number = dividend / divisor - divided_number = ( - -int(-divided_number) if divided_number < 0 else int(divided_number) - ) - - remainder = dividend - divisor * divided_number - - return remainder - - -def transform_to_pipi(input_angle): - pi = math.pi - revolutions = int((input_angle + np.sign(input_angle) * pi) / (2 * pi)) - - p1 = truncated_remainder(input_angle + np.sign(input_angle) * pi, 2 * pi) - p2 = ( - np.sign( - np.sign(input_angle) - + 2 - * ( - np.sign( - math.fabs( - (truncated_remainder(input_angle + pi, 2 * pi)) / (2 * pi) - ) - ) - - 1 - ) - ) - ) * pi - - output_angle = p1 - p2 - - return output_angle, revolutions - - -def remove_acceleration_outliers(acc): - acc_threshold_g = 7.5 - if math.fabs(acc[0]) > acc_threshold_g: - acc[0] = 0.0 - - for i in range(1, acc.size - 1): - if math.fabs(acc[i]) > acc_threshold_g: - acc[i] = acc[i - 1] - - if math.fabs(acc[-1]) > acc_threshold_g: - acc[-1] = acc[-2] - - return acc - - -def compute_accelerations(telemetry): - v = np.array(telemetry["Speed"]) / 3.6 - lon_acc = smooth_derivative(telemetry["Time"], v) / 9.81 - - dx = smooth_derivative(telemetry["Distance"], telemetry["X"]) - dy = smooth_derivative(telemetry["Distance"], telemetry["Y"]) - - theta = np.zeros(dx.size) - theta[0] = math.atan2(dy[0], dx[0]) - for i in range(0, dx.size): - theta[i] = ( - theta[i - 1] + transform_to_pipi(math.atan2(dy[i], dx[i]) - theta[i - 1])[0] - ) - - kappa = smooth_derivative(telemetry["Distance"], theta) - lat_acc = v * v * kappa / 9.81 - - # Remove outliers - lon_acc = remove_acceleration_outliers(lon_acc) - lat_acc = remove_acceleration_outliers(lat_acc) - - return np.round(lon_acc, 2), np.round(lat_acc, 2) diff --git a/spaces/uSerNameDDHL/bingo/src/components/theme-toggle.tsx b/spaces/uSerNameDDHL/bingo/src/components/theme-toggle.tsx deleted file mode 100644 index 67d3f1a2c163ccbeb52c40a7e42f107190237154..0000000000000000000000000000000000000000 --- a/spaces/uSerNameDDHL/bingo/src/components/theme-toggle.tsx +++ /dev/null @@ -1,31 +0,0 @@ -'use client' - -import * as React from 'react' -import { useTheme } from 'next-themes' - -import { Button } from '@/components/ui/button' -import { IconMoon, IconSun } from '@/components/ui/icons' - -export function ThemeToggle() { - const { setTheme, theme } = useTheme() - const [_, startTransition] = React.useTransition() - - return ( - - ) -} diff --git a/spaces/ucanbaklava/stablediffusionapi-disney-pixar-cartoon/app.py b/spaces/ucanbaklava/stablediffusionapi-disney-pixar-cartoon/app.py deleted file mode 100644 index 27fd27bfffeb59d210fd2c7769378680cb81844c..0000000000000000000000000000000000000000 --- a/spaces/ucanbaklava/stablediffusionapi-disney-pixar-cartoon/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/stablediffusionapi/disney-pixar-cartoon").launch() \ No newline at end of file diff --git a/spaces/ulysses115/diffsvc_test/modules/parallel_wavegan/optimizers/radam.py b/spaces/ulysses115/diffsvc_test/modules/parallel_wavegan/optimizers/radam.py deleted file mode 100644 index e805d7e34921bee436e1e7fd9e1f753c7609186b..0000000000000000000000000000000000000000 --- a/spaces/ulysses115/diffsvc_test/modules/parallel_wavegan/optimizers/radam.py +++ /dev/null @@ -1,91 +0,0 @@ -# -*- coding: utf-8 -*- - -"""RAdam optimizer. - -This code is drived from https://github.com/LiyuanLucasLiu/RAdam. -""" - -import math -import torch - -from torch.optim.optimizer import Optimizer - - -class RAdam(Optimizer): - """Rectified Adam optimizer.""" - - def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): - """Initilize RAdam optimizer.""" - defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) - self.buffer = [[None, None, None] for ind in range(10)] - super(RAdam, self).__init__(params, defaults) - - def __setstate__(self, state): - """Set state.""" - super(RAdam, self).__setstate__(state) - - def step(self, closure=None): - """Run one step.""" - loss = None - if closure is not None: - loss = closure() - - for group in self.param_groups: - - for p in group['params']: - if p.grad is None: - continue - grad = p.grad.data.float() - if grad.is_sparse: - raise RuntimeError('RAdam does not support sparse gradients') - - p_data_fp32 = p.data.float() - - state = self.state[p] - - if len(state) == 0: - state['step'] = 0 - state['exp_avg'] = torch.zeros_like(p_data_fp32) - state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) - else: - state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) - state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) - - exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] - beta1, beta2 = group['betas'] - - exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) - exp_avg.mul_(beta1).add_(1 - beta1, grad) - - state['step'] += 1 - buffered = self.buffer[int(state['step'] % 10)] - if state['step'] == buffered[0]: - N_sma, step_size = buffered[1], buffered[2] - else: - buffered[0] = state['step'] - beta2_t = beta2 ** state['step'] - N_sma_max = 2 / (1 - beta2) - 1 - N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) - buffered[1] = N_sma - - # more conservative since it's an approximated value - if N_sma >= 5: - step_size = math.sqrt( - (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step']) # NOQA - else: - step_size = 1.0 / (1 - beta1 ** state['step']) - buffered[2] = step_size - - if group['weight_decay'] != 0: - p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32) - - # more conservative since it's an approximated value - if N_sma >= 5: - denom = exp_avg_sq.sqrt().add_(group['eps']) - p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom) - else: - p_data_fp32.add_(-step_size * group['lr'], exp_avg) - - p.data.copy_(p_data_fp32) - - return loss diff --git a/spaces/unidiffuser-testing/unidiffuser-testing/unidiffuser/sample_v1_test.py b/spaces/unidiffuser-testing/unidiffuser-testing/unidiffuser/sample_v1_test.py deleted file mode 100644 index b6dfd72f310e1fe25392f42a9006bfc5967ea831..0000000000000000000000000000000000000000 --- a/spaces/unidiffuser-testing/unidiffuser-testing/unidiffuser/sample_v1_test.py +++ /dev/null @@ -1,780 +0,0 @@ -import ml_collections -import torch -import random -import utils -from dpm_solver_pp import NoiseScheduleVP, DPM_Solver -from absl import logging -import einops -import libs.autoencoder -import libs.clip -from torchvision.utils import save_image, make_grid -import torchvision.transforms as standard_transforms -import numpy as np -import clip -from PIL import Image -import time - -from typing import Optional, Union, List, Tuple - -from torch import nn -from transformers import ( - CLIPFeatureExtractor, - CLIPProcessor, - CLIPTextModel, - CLIPTokenizer, - CLIPVisionModel, - GPT2LMHeadModel, - GPT2Tokenizer, -) - -from libs.autoencoder import Encoder, Decoder -from libs.clip import AbstractEncoder -from libs.caption_decoder import generate2, generate_beam - - -# ----Define Testing Versions of Classes---- - - -class TestAutoencoderKL(nn.Module): - def __init__(self, ddconfig, embed_dim, pretrained_path, scale_factor=0.18215): - super().__init__() - print(f'Create autoencoder with scale_factor={scale_factor}') - self.encoder = Encoder(**ddconfig) - self.decoder = Decoder(**ddconfig) - assert ddconfig["double_z"] - self.quant_conv = torch.nn.Conv2d(2 * ddconfig["z_channels"], 2 * embed_dim, 1) - self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) - self.embed_dim = embed_dim - self.scale_factor = scale_factor - m, u = self.load_state_dict(torch.load(pretrained_path, map_location='cpu')) - assert len(m) == 0 and len(u) == 0 - self.eval() - self.requires_grad_(False) - - def encode_moments(self, x): - h = self.encoder(x) - moments = self.quant_conv(h) - return moments - - def sample(self, moments, noise=None, generator=None, device="cuda"): - mean, logvar = torch.chunk(moments, 2, dim=1) - if noise is None: - noise = randn_tensor(mean.shape, generator=generator) - noise = noise.to(device) - logvar = torch.clamp(logvar, -30.0, 20.0) - std = torch.exp(0.5 * logvar) - z = mean + std * noise - z = self.scale_factor * z - return z - - def get_moment_params(self, moments): - mean, logvar = torch.chunk(moments, 2, dim=1) - return mean, logvar - - def encode(self, x): - moments = self.encode_moments(x) - # z = self.sample(moments) - # Instead of sampling from the diagonal gaussian, return its mode (mean) - mean, logvar = self.get_moment_params(moments) - return mean - - def decode(self, z): - z = (1. / self.scale_factor) * z - z = self.post_quant_conv(z) - dec = self.decoder(z) - return dec - - def forward(self, inputs, fn): - if fn == 'encode_moments': - return self.encode_moments(inputs) - elif fn == 'encode': - return self.encode(inputs) - elif fn == 'decode': - return self.decode(inputs) - else: - raise NotImplementedError - - def freeze(self): - self.eval() - self.requires_grad_(False) - - -# ----Define Testing Utility Functions---- - - -def get_test_autoencoder(pretrained_path, scale_factor=0.18215): - ddconfig = dict( - double_z=True, - z_channels=4, - resolution=256, - in_channels=3, - out_ch=3, - ch=128, - ch_mult=[1, 2, 4, 4], - num_res_blocks=2, - attn_resolutions=[], - dropout=0.0 - ) - vae_scale_factor = 2 ** (len(ddconfig['ch_mult']) - 1) - return TestAutoencoderKL(ddconfig, 4, pretrained_path, scale_factor), vae_scale_factor - - -# Modified from diffusers.utils.randn_tensor -def randn_tensor( - shape: Union[Tuple, List], - generator: Optional[Union[List["torch.Generator"], "torch.Generator"]] = None, - device: Optional["torch.device"] = None, - dtype: Optional["torch.dtype"] = None, - layout: Optional["torch.layout"] = None, -): - """This is a helper function that allows to create random tensors on the desired `device` with the desired `dtype`. When - passing a list of generators one can seed each batched size individually. If CPU generators are passed the tensor - will always be created on CPU. - """ - # device on which tensor is created defaults to device - rand_device = device - batch_size = shape[0] - - layout = layout or torch.strided - device = device or torch.device("cpu") - - if generator is not None: - gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type - if gen_device_type != device.type and gen_device_type == "cpu": - rand_device = "cpu" - if device != "mps": - logging.info( - f"The passed generator was created on 'cpu' even though a tensor on {device} was expected." - f" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably" - f" slighly speed up this function by passing a generator that was created on the {device} device." - ) - elif gen_device_type != device.type and gen_device_type == "cuda": - raise ValueError(f"Cannot generate a {device} tensor from a generator of type {gen_device_type}.") - - if isinstance(generator, list): - shape = (1,) + shape[1:] - latents = [ - torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout) - for i in range(batch_size) - ] - latents = torch.cat(latents, dim=0).to(device) - else: - latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device) - - return latents - - -# Sample from the autoencoder latent space directly instead of sampling the autoencoder moment. -def prepare_latents( - config, - clip_text_model, - clip_img_model, - clip_img_model_preprocess, - autoencoder, - vae_scale_factor, - device, -): - resolution = config.z_shape[-1] * vae_scale_factor - # Fix device to CPU for reproducibility. - latent_device = "cpu" - # latent_device = device - latent_torch_device = torch.device(latent_device) - generator = torch.Generator(device=latent_torch_device).manual_seed(config.seed) - - contexts = randn_tensor((config.n_samples, 77, config.clip_text_dim), generator=generator, device=latent_torch_device) - img_contexts = randn_tensor((config.n_samples, config.z_shape[0], config.z_shape[1], config.z_shape[2]), generator=generator, device=latent_torch_device) - clip_imgs = randn_tensor((config.n_samples, 1, config.clip_img_dim), generator=generator, device=latent_torch_device) - - if config.mode in ['t2i', 't2i2t']: - prompts = [ config.prompt ] * config.n_samples - contexts = clip_text_model.encode(prompts) - elif config.mode in ['i2t', 'i2t2i']: - from PIL import Image - img_contexts = [] - clip_imgs = [] - - def get_img_feature(image): - image = np.array(image).astype(np.uint8) - image = utils.center_crop(resolution, resolution, image) - clip_img_feature = clip_img_model.encode_image(clip_img_model_preprocess(Image.fromarray(image)).unsqueeze(0).to(device)) - - image = (image / 127.5 - 1.0).astype(np.float32) - image = einops.rearrange(image, 'h w c -> 1 c h w') - image = torch.tensor(image, device=device) - logging.info(f'Preprocessed VAE image {image}') - logging.info(f"Preprocessed VAE image shape {image.shape}") - # Get moments then get the mode of the moment (diagonal Gaussian) distribution - moments = autoencoder.encode_moments(image) - moment_mean, moment_logvar = autoencoder.get_moment_params(moments) - print(f"Moment dist mean: {moment_mean}") - print(f"Moment dist logvar: {moment_logvar}") - moments = autoencoder.sample(moments, generator=generator, device=device) - - return clip_img_feature, moments - - image = Image.open(config.img).convert('RGB') - clip_img, img_context = get_img_feature(image) - - img_contexts.append(img_context) - clip_imgs.append(clip_img) - img_contexts = img_contexts * config.n_samples - clip_imgs = clip_imgs * config.n_samples - - img_contexts = torch.concat(img_contexts, dim=0) - clip_imgs = torch.stack(clip_imgs, dim=0) - - contexts = contexts.to(device) - img_contexts = img_contexts.to(device) - clip_imgs = clip_imgs.to(device) - return contexts, img_contexts, clip_imgs - - -# ----END---- - - -def stable_diffusion_beta_schedule(linear_start=0.00085, linear_end=0.0120, n_timestep=1000): - _betas = ( - torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 - ) - return _betas.numpy() - - -def prepare_contexts(config, clip_text_model, clip_img_model, clip_img_model_preprocess, autoencoder): - resolution = config.z_shape[-1] * 8 - device = 'cuda' if torch.cuda.is_available() else 'cpu' - - contexts = torch.randn(config.n_samples, 77, config.clip_text_dim).to(device) - img_contexts = torch.randn(config.n_samples, 2 * config.z_shape[0], config.z_shape[1], config.z_shape[2]) - clip_imgs = torch.randn(config.n_samples, 1, config.clip_img_dim) - - if config.mode in ['t2i', 't2i2t']: - prompts = [ config.prompt ] * config.n_samples - contexts = clip_text_model.encode(prompts) - - elif config.mode in ['i2t', 'i2t2i']: - from PIL import Image - img_contexts = [] - clip_imgs = [] - - def get_img_feature(image): - image = np.array(image).astype(np.uint8) - image = utils.center_crop(resolution, resolution, image) - clip_img_feature = clip_img_model.encode_image(clip_img_model_preprocess(Image.fromarray(image)).unsqueeze(0).to(device)) - - image = (image / 127.5 - 1.0).astype(np.float32) - image = einops.rearrange(image, 'h w c -> 1 c h w') - image = torch.tensor(image, device=device) - moments = autoencoder.encode_moments(image) - - return clip_img_feature, moments - - image = Image.open(config.img).convert('RGB') - clip_img, img_context = get_img_feature(image) - - img_contexts.append(img_context) - clip_imgs.append(clip_img) - img_contexts = img_contexts * config.n_samples - clip_imgs = clip_imgs * config.n_samples - - img_contexts = torch.concat(img_contexts, dim=0) - clip_imgs = torch.stack(clip_imgs, dim=0) - - return contexts, img_contexts, clip_imgs - - -def unpreprocess(v): # to B C H W and [0, 1] - v = 0.5 * (v + 1.) - v.clamp_(0., 1.) - return v - - -def set_seed(seed: int): - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - - -def evaluate(config): - if config.get('benchmark', False): - torch.backends.cudnn.benchmark = True - torch.backends.cudnn.deterministic = False - - device = config.sample.device - torch_device = torch.device(device) - set_seed(config.seed) - - # Instantiate generator - # cpu_generator = torch.Generator(device="cpu").manual_seed(config.seed) - generator = torch.Generator(device=torch_device).manual_seed(config.seed) - - config = ml_collections.FrozenConfigDict(config) - if config.sample.log_dir is not None: - log_filename = config.sample.log_dir + "/" + config.mode + ".txt" - utils.set_logger(log_level=config.sample.log_level, fname=log_filename) - else: - utils.set_logger(log_level=config.sample.log_level) - - _betas = stable_diffusion_beta_schedule() - N = len(_betas) - - nnet = utils.get_nnet(**config.nnet) - logging.info(f'load nnet from {config.nnet_path}') - nnet.load_state_dict(torch.load(config.nnet_path, map_location='cpu')) - nnet.to(device) - nnet.eval() - - use_caption_decoder = config.text_dim < config.clip_text_dim or config.mode != 't2i' - if use_caption_decoder: - from libs.caption_decoder import CaptionDecoder - caption_decoder = CaptionDecoder(device=device, **config.caption_decoder) - else: - caption_decoder = None - - clip_text_model = libs.clip.FrozenCLIPEmbedder(device=device) - clip_text_model.eval() - clip_text_model.to(device) - - # autoencoder = libs.autoencoder.get_model(**config.autoencoder) - autoencoder, vae_scale_factor = get_test_autoencoder(**config.autoencoder) - autoencoder.to(device) - - clip_img_model, clip_img_model_preprocess = clip.load("ViT-B/32", device=device, jit=False) - - empty_context = clip_text_model.encode([''])[0] - - def split(x): - C, H, W = config.z_shape - z_dim = C * H * W - z, clip_img = x.split([z_dim, config.clip_img_dim], dim=1) - z = einops.rearrange(z, 'B (C H W) -> B C H W', C=C, H=H, W=W) - clip_img = einops.rearrange(clip_img, 'B (L D) -> B L D', L=1, D=config.clip_img_dim) - return z, clip_img - - - def combine(z, clip_img): - z = einops.rearrange(z, 'B C H W -> B (C H W)') - clip_img = einops.rearrange(clip_img, 'B L D -> B (L D)') - return torch.concat([z, clip_img], dim=-1) - - - def t2i_nnet(x, timesteps, text): # text is the low dimension version of the text clip embedding - """ - 1. calculate the conditional model output - 2. calculate unconditional model output - config.sample.t2i_cfg_mode == 'empty_token': using the original cfg with the empty string - config.sample.t2i_cfg_mode == 'true_uncond: using the unconditional model learned by our method - 3. return linear combination of conditional output and unconditional output - """ - z, clip_img = split(x) - - t_text = torch.zeros(timesteps.size(0), dtype=torch.int, device=device) - - z_out, clip_img_out, text_out = nnet(z, clip_img, text=text, t_img=timesteps, t_text=t_text, - data_type=torch.zeros_like(t_text, device=device, dtype=torch.int) + config.data_type) - x_out = combine(z_out, clip_img_out) - - if config.sample.scale == 0.: - return x_out - - if config.sample.t2i_cfg_mode == 'empty_token': - _empty_context = einops.repeat(empty_context, 'L D -> B L D', B=x.size(0)) - if use_caption_decoder: - _empty_context = caption_decoder.encode_prefix(_empty_context) - z_out_uncond, clip_img_out_uncond, text_out_uncond = nnet(z, clip_img, text=_empty_context, t_img=timesteps, t_text=t_text, - data_type=torch.zeros_like(t_text, device=device, dtype=torch.int) + config.data_type) - x_out_uncond = combine(z_out_uncond, clip_img_out_uncond) - elif config.sample.t2i_cfg_mode == 'true_uncond': - # text_N = torch.randn_like(text) # 3 other possible choices - text_N = randn_tensor(text.shape, generator=generator, device=torch_device) - z_out_uncond, clip_img_out_uncond, text_out_uncond = nnet(z, clip_img, text=text_N, t_img=timesteps, t_text=torch.ones_like(timesteps) * N, - data_type=torch.zeros_like(t_text, device=device, dtype=torch.int) + config.data_type) - x_out_uncond = combine(z_out_uncond, clip_img_out_uncond) - else: - raise NotImplementedError - - return x_out + config.sample.scale * (x_out - x_out_uncond) - - - def i_nnet(x, timesteps): - z, clip_img = split(x) - text = torch.randn(x.size(0), 77, config.text_dim, device=device) - t_text = torch.ones_like(timesteps) * N - z_out, clip_img_out, text_out = nnet(z, clip_img, text=text, t_img=timesteps, t_text=t_text, - data_type=torch.zeros_like(t_text, device=device, dtype=torch.int) + config.data_type) - x_out = combine(z_out, clip_img_out) - return x_out - - def t_nnet(x, timesteps): - z = torch.randn(x.size(0), *config.z_shape, device=device) - clip_img = torch.randn(x.size(0), 1, config.clip_img_dim, device=device) - z_out, clip_img_out, text_out = nnet(z, clip_img, text=x, t_img=torch.ones_like(timesteps) * N, t_text=timesteps, - data_type=torch.zeros_like(timesteps, device=device, dtype=torch.int) + config.data_type) - return text_out - - def i2t_nnet(x, timesteps, z, clip_img): - """ - 1. calculate the conditional model output - 2. calculate unconditional model output - 3. return linear combination of conditional output and unconditional output - """ - t_img = torch.zeros(timesteps.size(0), dtype=torch.int, device=device) - - z_out, clip_img_out, text_out = nnet(z, clip_img, text=x, t_img=t_img, t_text=timesteps, - data_type=torch.zeros_like(t_img, device=device, dtype=torch.int) + config.data_type) - - if config.sample.scale == 0.: - return text_out - - # z_N = torch.randn_like(z) # 3 other possible choices - # clip_img_N = torch.randn_like(clip_img) - z_N = randn_tensor(z.shape, generator=generator, device=torch_device) - clip_img_N = randn_tensor(clip_img.shape, generator=generator, device=torch_device) - z_out_uncond, clip_img_out_uncond, text_out_uncond = nnet(z_N, clip_img_N, text=x, t_img=torch.ones_like(timesteps) * N, t_text=timesteps, - data_type=torch.zeros_like(timesteps, device=device, dtype=torch.int) + config.data_type) - - return text_out + config.sample.scale * (text_out - text_out_uncond) - - def split_joint(x): - C, H, W = config.z_shape - z_dim = C * H * W - z, clip_img, text = x.split([z_dim, config.clip_img_dim, 77 * config.text_dim], dim=1) - z = einops.rearrange(z, 'B (C H W) -> B C H W', C=C, H=H, W=W) - clip_img = einops.rearrange(clip_img, 'B (L D) -> B L D', L=1, D=config.clip_img_dim) - text = einops.rearrange(text, 'B (L D) -> B L D', L=77, D=config.text_dim) - return z, clip_img, text - - def combine_joint(z, clip_img, text): - z = einops.rearrange(z, 'B C H W -> B (C H W)') - clip_img = einops.rearrange(clip_img, 'B L D -> B (L D)') - text = einops.rearrange(text, 'B L D -> B (L D)') - return torch.concat([z, clip_img, text], dim=-1) - - def joint_nnet(x, timesteps): - z, clip_img, text = split_joint(x) - z_out, clip_img_out, text_out = nnet(z, clip_img, text=text, t_img=timesteps, t_text=timesteps, - data_type=torch.zeros_like(timesteps, device=device, dtype=torch.int) + config.data_type) - x_out = combine_joint(z_out, clip_img_out, text_out) - - if config.sample.scale == 0.: - return x_out - - # z_noise = torch.randn(x.size(0), *config.z_shape, device=device) - # clip_img_noise = torch.randn(x.size(0), 1, config.clip_img_dim, device=device) - # text_noise = torch.randn(x.size(0), 77, config.text_dim, device=device) - z_noise = randn_tensor((x.size(0), *config.z_shape), generator=generator, device=torch_device) - clip_img_noise = randn_tensor((x.size(0), 1, config.clip_img_dim), generator=generator, device=torch_device) - text_noise = randn_tensor((x.size(0), 77, config.text_dim), generator=generator, device=torch_device) - - _, _, text_out_uncond = nnet(z_noise, clip_img_noise, text=text, t_img=torch.ones_like(timesteps) * N, t_text=timesteps, - data_type=torch.zeros_like(timesteps, device=device, dtype=torch.int) + config.data_type) - z_out_uncond, clip_img_out_uncond, _ = nnet(z, clip_img, text=text_noise, t_img=timesteps, t_text=torch.ones_like(timesteps) * N, - data_type=torch.zeros_like(timesteps, device=device, dtype=torch.int) + config.data_type) - - x_out_uncond = combine_joint(z_out_uncond, clip_img_out_uncond, text_out_uncond) - - return x_out + config.sample.scale * (x_out - x_out_uncond) - - @torch.cuda.amp.autocast() - def encode(_batch): - return autoencoder.encode(_batch) - - @torch.cuda.amp.autocast() - def decode(_batch): - return autoencoder.decode(_batch) - - - logging.info(config.sample) - logging.info(f'N={N}') - - # contexts, img_contexts, clip_imgs = prepare_contexts(config, clip_text_model, clip_img_model, clip_img_model_preprocess, autoencoder) - contexts, img_contexts, clip_imgs = prepare_latents( - config, - clip_text_model, - clip_img_model, - clip_img_model_preprocess, - autoencoder, - vae_scale_factor, - device, - ) - - contexts = contexts # the clip embedding of conditioned texts - contexts_low_dim = contexts if not use_caption_decoder else caption_decoder.encode_prefix(contexts) # the low dimensional version of the contexts, which is the input to the nnet - - logging.debug(f"Text latents: {contexts}") - logging.debug(f"Text latents shape: {contexts.shape}") - logging.debug(f"Low dim text latents: {contexts_low_dim}") - logging.debug(f"Low dim text latents shape: {contexts_low_dim.shape}") - - img_contexts = img_contexts # img_contexts is the autoencoder moment - # z_img = autoencoder.sample(img_contexts, generator=cpu_generator, device=device) - z_img = img_contexts # sample autoencoder latents directly, no need to call sample() - clip_imgs = clip_imgs # the clip embedding of conditioned image - - logging.debug(f"Encoded image VAE latents: {z_img}") - logging.debug(f"Encoded image VAE latents shape: {z_img.shape}") - logging.debug(f"Encoded image CLIP latents: {clip_imgs}") - logging.debug(f"Encoded image CLIP latents shape: {clip_imgs.shape}") - - if config.mode in ['t2i', 't2i2t']: - _n_samples = contexts_low_dim.size(0) - elif config.mode in ['i2t', 'i2t2i']: - _n_samples = img_contexts.size(0) - else: - _n_samples = config.n_samples - - - def sample_fn(mode, **kwargs): - - # _z_init = torch.randn(_n_samples, *config.z_shape, device=device) - # _clip_img_init = torch.randn(_n_samples, 1, config.clip_img_dim, device=device) - # _text_init = torch.randn(_n_samples, 77, config.text_dim, device=device) - _z_init = randn_tensor((_n_samples, *config.z_shape), generator=generator, device=torch_device) - _clip_img_init = randn_tensor((_n_samples, 1, config.clip_img_dim), generator=generator, device=torch_device) - _text_init = randn_tensor((_n_samples, 77, config.text_dim), generator=generator, device=torch_device) - if mode == 'joint': - _x_init = combine_joint(_z_init, _clip_img_init, _text_init) - elif mode in ['t2i', 'i']: - _x_init = combine(_z_init, _clip_img_init) - elif mode in ['i2t', 't']: - _x_init = _text_init - noise_schedule = NoiseScheduleVP(schedule='discrete', betas=torch.tensor(_betas, device=device).float()) - - def model_fn(x, t_continuous): - t = t_continuous * N - if mode == 'joint': - return joint_nnet(x, t) - elif mode == 't2i': - return t2i_nnet(x, t, **kwargs) - elif mode == 'i2t': - return i2t_nnet(x, t, **kwargs) - elif mode == 'i': - return i_nnet(x, t) - elif mode == 't': - return t_nnet(x, t) - - dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True, thresholding=False) - with torch.no_grad(): - with torch.autocast(device_type=device): - start_time = time.time() - x = dpm_solver.sample(_x_init, steps=config.sample.sample_steps, eps=1. / N, T=1.) - end_time = time.time() - print(f'\ngenerate {_n_samples} samples with {config.sample.sample_steps} steps takes {end_time - start_time:.2f}s') - - # os.makedirs(config.output_path, exist_ok=True) - if mode == 'joint': - _z, _clip_img, _text = split_joint(x) - return _z, _clip_img, _text - elif mode in ['t2i', 'i']: - _z, _clip_img = split(x) - return _z, _clip_img - elif mode in ['i2t', 't']: - return x - - def test_sample_fn(mode, **kwargs): - if mode == 'joint': - _x_init = combine_joint(z_img, clip_imgs, contexts_low_dim) - elif mode in ['t2i', 'i']: - _x_init = combine(z_img, clip_imgs) - elif mode in ['i2t', 't']: - _x_init = contexts_low_dim - - logging.debug(f"Latents: {_x_init}") - logging.debug(f"Latents shape: {_x_init.shape}") - - noise_schedule = NoiseScheduleVP(schedule='discrete', betas=torch.tensor(_betas, device=device).float()) - - def model_fn(x, t_continuous): - t = t_continuous * N - if mode == 'joint': - noise_pred = joint_nnet(x, t) - logging.debug(f"Joint noise pred for time {t}: {noise_pred}") - logging.debug(f"Joint noise pred for time {t} shape: {noise_pred.shape}") - return noise_pred - # return joint_nnet(x, t) - elif mode == 't2i': - noise_pred = t2i_nnet(x, t, **kwargs) - logging.debug(f"t2i noise pred for time {t}: {noise_pred}") - logging.debug(f"t2i noise pred for time {t} shape: {noise_pred.shape}") - return noise_pred - # return t2i_nnet(x, t, **kwargs) - elif mode == 'i2t': - noise_pred = i2t_nnet(x, t, **kwargs) - logging.debug(f"i2t noise pred for time {t}: {noise_pred}") - logging.debug(f"i2t noise pred for time {t} shape: {noise_pred.shape}") - return noise_pred - # return i2t_nnet(x, t, **kwargs) - elif mode == 'i': - return i_nnet(x, t) - elif mode == 't': - return t_nnet(x, t) - - dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True, thresholding=False) - with torch.no_grad(): - with torch.autocast(device_type=device): - start_time = time.time() - x = dpm_solver.sample(_x_init, steps=config.sample.sample_steps, eps=1. / N, T=1.) - end_time = time.time() - print(f'\ngenerate {_n_samples} samples with {config.sample.sample_steps} steps takes {end_time - start_time:.2f}s') - - logging.debug(f"Full UNet sample: {x}") - logging.debug(f"Full UNet sample shape: {x.shape}") - - # os.makedirs(config.output_path, exist_ok=True) - if mode == 'joint': - _z, _clip_img, _text = split_joint(x) - return _z, _clip_img, _text - elif mode in ['t2i', 'i']: - _z, _clip_img = split(x) - return _z, _clip_img - elif mode in ['i2t', 't']: - return x - - output_images = None - output_text = None - - if config.mode in ['joint']: - # _z, _clip_img, _text = sample_fn(config.mode) - _z, _clip_img, _text = test_sample_fn(config.mode) - - logging.debug(f"Text output: {_text}") - logging.debug(f"Text output shape: {_text.shape}") - logging.debug(f"VAE output: {_z}") - logging.debug(f"VAE output shape: {_z.shape}") - logging.debug(f"CLIP output: {_clip_img}") - logging.debug(f"CLIP output shape: {_clip_img.shape}") - - samples = unpreprocess(decode(_z)) - - logging.debug(f"VAE decoded sample: {samples}") - logging.debug(f"VAE decoded sample shape: {samples.shape}") - - prompts = caption_decoder.generate_captions(_text) - - logging.debug(f"Generated text: {prompts}") - - # numpy_sample = samples[0].clone().cpu().permute(1, 2, 0).float().numpy() - # numpy_sample_slice = numpy_sample[-3:, -3:, -1].flatten() - # print(f"Expected slice: {numpy_sample_slice}") - # print(f"Expected text: {prompts}") - - output_images = samples - output_text = prompts - - elif config.mode in ['t2i', 'i', 'i2t2i']: - if config.mode == 't2i': - # _z, _clip_img = sample_fn(config.mode, text=contexts_low_dim) # conditioned on the text embedding - _z, _clip_img = test_sample_fn(config.mode, text=contexts_low_dim) - elif config.mode == 'i': - # _z, _clip_img = sample_fn(config.mode) - _z, _clip_img = test_sample_fn(config.mode) - elif config.mode == 'i2t2i': - _text = sample_fn('i2t', z=z_img, clip_img=clip_imgs) # conditioned on the image embedding - _z, _clip_img = sample_fn('t2i', text=_text) - samples = unpreprocess(decode(_z)) - # numpy_sample = samples[0].clone().cpu().permute(1, 2, 0).float().numpy() - # numpy_sample_slice = numpy_sample[-3:, -3:, -1].flatten() - # print(f"Expected slice: {numpy_sample_slice}") - output_images = samples - - - elif config.mode in ['i2t', 't', 't2i2t']: - if config.mode == 'i2t': - # _text = sample_fn(config.mode, z=z_img, clip_img=clip_imgs) # conditioned on the image embedding - _text = test_sample_fn(config.mode, z=z_img, clip_img=clip_imgs) - elif config.mode == 't': - # _text = sample_fn(config.mode) - _text = test_sample_fn(config.mode) - elif config.mode == 't2i2t': - _z, _clip_img = sample_fn('t2i', text=contexts_low_dim) - _text = sample_fn('i2t', z=_z, clip_img=_clip_img) - samples = caption_decoder.generate_captions(_text) - logging.info(samples) - output_text = samples - - print(f'\nGPU memory usage: {torch.cuda.max_memory_reserved() / 1024 ** 3:.2f} GB') - # print(f'\nresults are saved in {os.path.join(config.output_path, config.mode)} :)') - - # Convert sample images to PIL - if output_images is not None: - for sample in output_images: - sample = standard_transforms.ToPILImage()(sample) - - return output_images, output_text - - -def d(**kwargs): - """Helper of creating a config dict.""" - return ml_collections.ConfigDict(initial_dictionary=kwargs) - - -def get_config(): - config = ml_collections.ConfigDict() - - config.seed = 0 - config.pred = 'noise_pred' - config.z_shape = (4, 64, 64) - config.clip_img_dim = 512 - config.clip_text_dim = 768 - config.text_dim = 64 # reduce dimension - config.data_type = 1 - - config.autoencoder = d( - pretrained_path='models/autoencoder_kl.pth', - ) - - config.caption_decoder = d( - pretrained_path="models/caption_decoder.pth", - hidden_dim=config.get_ref('text_dim') - ) - - config.nnet = d( - name='uvit_multi_post_ln_v1', - img_size=64, - in_chans=4, - patch_size=2, - embed_dim=1536, - depth=30, - num_heads=24, - mlp_ratio=4, - qkv_bias=False, - pos_drop_rate=0., - drop_rate=0., - attn_drop_rate=0., - mlp_time_embed=False, - text_dim=config.get_ref('text_dim'), - num_text_tokens=77, - clip_img_dim=config.get_ref('clip_img_dim'), - use_checkpoint=True - ) - - config.sample = d( - sample_steps=3, - scale=7., - t2i_cfg_mode='true_uncond', - device="cuda", - log_level="debug", - log_dir=None, - ) - - return config - - -def sample(mode, prompt, image, sample_steps=50, scale=7.0, seed=None): - config = get_config() - - config.nnet_path = "models/uvit_v1.pth" - config.n_samples = 1 - config.nrow = 1 - - config.mode = mode - config.prompt = prompt - config.img = image - - config.sample.sample_steps = sample_steps - config.sample.scale = scale - if seed is not None: - config.seed = seed - - sample_images, sample_text = evaluate(config) - return sample_images, sample_text \ No newline at end of file diff --git a/spaces/user238921933/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/__init__.py b/spaces/user238921933/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/__init__.py deleted file mode 100644 index fe08549e9c92f0e8a7bc96cead3277dedd12583c..0000000000000000000000000000000000000000 --- a/spaces/user238921933/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -""" -from .save_images import save_samples, get_output_folder -from .depth import DepthModel -from .prompt import sanitize -from .animation import construct_RotationMatrixHomogenous, getRotationMatrixManual, getPoints_for_PerspectiveTranformEstimation, warpMatrix, anim_frame_warp_2d, anim_frame_warp_3d -from .generate import add_noise, load_img, load_mask_latent, prepare_mask -""" \ No newline at end of file diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/cfg/__init__.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/cfg/__init__.md deleted file mode 100644 index 26f4e54adde43223ae4aab2b96f42774e318b10c..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/cfg/__init__.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -description: Explore Ultralytics YOLO's configuration functions and tools. Handle settings, manage defaults, and deal with deprecations in your YOLO configuration. -keywords: Ultralytics, YOLO, configuration, cfg2dict, get_cfg, handle_deprecation, check_cfg_mismatch, merge_equals_args, handle_yolo_hub, handle_yolo_settings, entrypoint, copy_default_cfg ---- - -## cfg2dict ---- -### ::: ultralytics.yolo.cfg.cfg2dict -

        - -## get_cfg ---- -### ::: ultralytics.yolo.cfg.get_cfg -

        - -## _handle_deprecation ---- -### ::: ultralytics.yolo.cfg._handle_deprecation -

        - -## check_cfg_mismatch ---- -### ::: ultralytics.yolo.cfg.check_cfg_mismatch -

        - -## merge_equals_args ---- -### ::: ultralytics.yolo.cfg.merge_equals_args -

        - -## handle_yolo_hub ---- -### ::: ultralytics.yolo.cfg.handle_yolo_hub -

        - -## handle_yolo_settings ---- -### ::: ultralytics.yolo.cfg.handle_yolo_settings -

        - -## entrypoint ---- -### ::: ultralytics.yolo.cfg.entrypoint -

        - -## copy_default_cfg ---- -### ::: ultralytics.yolo.cfg.copy_default_cfg -

        diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/downloads.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/downloads.md deleted file mode 100644 index dd07646c17f7124191e0040532d6d4b55be0b5e8..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/downloads.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -description: Download and unzip YOLO pretrained models. Ultralytics YOLO docs utils.downloads.unzip_file, checks disk space, downloads and attempts assets. -keywords: Ultralytics YOLO, downloads, trained models, datasets, weights, deep learning, computer vision ---- - -## is_url ---- -### ::: ultralytics.yolo.utils.downloads.is_url -

        - -## unzip_file ---- -### ::: ultralytics.yolo.utils.downloads.unzip_file -

        - -## check_disk_space ---- -### ::: ultralytics.yolo.utils.downloads.check_disk_space -

        - -## safe_download ---- -### ::: ultralytics.yolo.utils.downloads.safe_download -

        - -## attempt_download_asset ---- -### ::: ultralytics.yolo.utils.downloads.attempt_download_asset -

        - -## download ---- -### ::: ultralytics.yolo.utils.downloads.download -

        diff --git a/spaces/vinthony/SadTalker/src/facerender/animate.py b/spaces/vinthony/SadTalker/src/facerender/animate.py deleted file mode 100644 index 781f5a3318a086049cc6b74393073ddda7001d5e..0000000000000000000000000000000000000000 --- a/spaces/vinthony/SadTalker/src/facerender/animate.py +++ /dev/null @@ -1,257 +0,0 @@ -import os -import cv2 -import yaml -import numpy as np -import warnings -from skimage import img_as_ubyte -import safetensors -import safetensors.torch -warnings.filterwarnings('ignore') - - -import imageio -import torch -import torchvision - - -from src.facerender.modules.keypoint_detector import HEEstimator, KPDetector -from src.facerender.modules.mapping import MappingNet -from src.facerender.modules.generator import OcclusionAwareGenerator, OcclusionAwareSPADEGenerator -from src.facerender.modules.make_animation import make_animation - -from pydub import AudioSegment -from src.utils.face_enhancer import enhancer_generator_with_len, enhancer_list -from src.utils.paste_pic import paste_pic -from src.utils.videoio import save_video_with_watermark - -try: - import webui # in webui - in_webui = True -except: - in_webui = False - -class AnimateFromCoeff(): - - def __init__(self, sadtalker_path, device): - - with open(sadtalker_path['facerender_yaml']) as f: - config = yaml.safe_load(f) - - generator = OcclusionAwareSPADEGenerator(**config['model_params']['generator_params'], - **config['model_params']['common_params']) - kp_extractor = KPDetector(**config['model_params']['kp_detector_params'], - **config['model_params']['common_params']) - he_estimator = HEEstimator(**config['model_params']['he_estimator_params'], - **config['model_params']['common_params']) - mapping = MappingNet(**config['model_params']['mapping_params']) - - generator.to(device) - kp_extractor.to(device) - he_estimator.to(device) - mapping.to(device) - for param in generator.parameters(): - param.requires_grad = False - for param in kp_extractor.parameters(): - param.requires_grad = False - for param in he_estimator.parameters(): - param.requires_grad = False - for param in mapping.parameters(): - param.requires_grad = False - - if sadtalker_path is not None: - if 'checkpoint' in sadtalker_path: # use safe tensor - self.load_cpk_facevid2vid_safetensor(sadtalker_path['checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=None) - else: - self.load_cpk_facevid2vid(sadtalker_path['free_view_checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=he_estimator) - else: - raise AttributeError("Checkpoint should be specified for video head pose estimator.") - - if sadtalker_path['mappingnet_checkpoint'] is not None: - self.load_cpk_mapping(sadtalker_path['mappingnet_checkpoint'], mapping=mapping) - else: - raise AttributeError("Checkpoint should be specified for video head pose estimator.") - - self.kp_extractor = kp_extractor - self.generator = generator - self.he_estimator = he_estimator - self.mapping = mapping - - self.kp_extractor.eval() - self.generator.eval() - self.he_estimator.eval() - self.mapping.eval() - - self.device = device - - def load_cpk_facevid2vid_safetensor(self, checkpoint_path, generator=None, - kp_detector=None, he_estimator=None, - device="cpu"): - - checkpoint = safetensors.torch.load_file(checkpoint_path) - - if generator is not None: - x_generator = {} - for k,v in checkpoint.items(): - if 'generator' in k: - x_generator[k.replace('generator.', '')] = v - generator.load_state_dict(x_generator) - if kp_detector is not None: - x_generator = {} - for k,v in checkpoint.items(): - if 'kp_extractor' in k: - x_generator[k.replace('kp_extractor.', '')] = v - kp_detector.load_state_dict(x_generator) - if he_estimator is not None: - x_generator = {} - for k,v in checkpoint.items(): - if 'he_estimator' in k: - x_generator[k.replace('he_estimator.', '')] = v - he_estimator.load_state_dict(x_generator) - - return None - - def load_cpk_facevid2vid(self, checkpoint_path, generator=None, discriminator=None, - kp_detector=None, he_estimator=None, optimizer_generator=None, - optimizer_discriminator=None, optimizer_kp_detector=None, - optimizer_he_estimator=None, device="cpu"): - checkpoint = torch.load(checkpoint_path, map_location=torch.device(device)) - if generator is not None: - generator.load_state_dict(checkpoint['generator']) - if kp_detector is not None: - kp_detector.load_state_dict(checkpoint['kp_detector']) - if he_estimator is not None: - he_estimator.load_state_dict(checkpoint['he_estimator']) - if discriminator is not None: - try: - discriminator.load_state_dict(checkpoint['discriminator']) - except: - print ('No discriminator in the state-dict. Dicriminator will be randomly initialized') - if optimizer_generator is not None: - optimizer_generator.load_state_dict(checkpoint['optimizer_generator']) - if optimizer_discriminator is not None: - try: - optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator']) - except RuntimeError as e: - print ('No discriminator optimizer in the state-dict. Optimizer will be not initialized') - if optimizer_kp_detector is not None: - optimizer_kp_detector.load_state_dict(checkpoint['optimizer_kp_detector']) - if optimizer_he_estimator is not None: - optimizer_he_estimator.load_state_dict(checkpoint['optimizer_he_estimator']) - - return checkpoint['epoch'] - - def load_cpk_mapping(self, checkpoint_path, mapping=None, discriminator=None, - optimizer_mapping=None, optimizer_discriminator=None, device='cpu'): - checkpoint = torch.load(checkpoint_path, map_location=torch.device(device)) - if mapping is not None: - mapping.load_state_dict(checkpoint['mapping']) - if discriminator is not None: - discriminator.load_state_dict(checkpoint['discriminator']) - if optimizer_mapping is not None: - optimizer_mapping.load_state_dict(checkpoint['optimizer_mapping']) - if optimizer_discriminator is not None: - optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator']) - - return checkpoint['epoch'] - - def generate(self, x, video_save_dir, pic_path, crop_info, enhancer=None, background_enhancer=None, preprocess='crop', img_size=256): - - source_image=x['source_image'].type(torch.FloatTensor) - source_semantics=x['source_semantics'].type(torch.FloatTensor) - target_semantics=x['target_semantics_list'].type(torch.FloatTensor) - source_image=source_image.to(self.device) - source_semantics=source_semantics.to(self.device) - target_semantics=target_semantics.to(self.device) - if 'yaw_c_seq' in x: - yaw_c_seq = x['yaw_c_seq'].type(torch.FloatTensor) - yaw_c_seq = x['yaw_c_seq'].to(self.device) - else: - yaw_c_seq = None - if 'pitch_c_seq' in x: - pitch_c_seq = x['pitch_c_seq'].type(torch.FloatTensor) - pitch_c_seq = x['pitch_c_seq'].to(self.device) - else: - pitch_c_seq = None - if 'roll_c_seq' in x: - roll_c_seq = x['roll_c_seq'].type(torch.FloatTensor) - roll_c_seq = x['roll_c_seq'].to(self.device) - else: - roll_c_seq = None - - frame_num = x['frame_num'] - - predictions_video = make_animation(source_image, source_semantics, target_semantics, - self.generator, self.kp_extractor, self.he_estimator, self.mapping, - yaw_c_seq, pitch_c_seq, roll_c_seq, use_exp = True) - - predictions_video = predictions_video.reshape((-1,)+predictions_video.shape[2:]) - predictions_video = predictions_video[:frame_num] - - video = [] - for idx in range(predictions_video.shape[0]): - image = predictions_video[idx] - image = np.transpose(image.data.cpu().numpy(), [1, 2, 0]).astype(np.float32) - video.append(image) - result = img_as_ubyte(video) - - ### the generated video is 256x256, so we keep the aspect ratio, - original_size = crop_info[0] - if original_size: - result = [ cv2.resize(result_i,(img_size, int(img_size * original_size[1]/original_size[0]) )) for result_i in result ] - - video_name = x['video_name'] + '.mp4' - path = os.path.join(video_save_dir, 'temp_'+video_name) - - imageio.mimsave(path, result, fps=float(25)) - - av_path = os.path.join(video_save_dir, video_name) - return_path = av_path - - audio_path = x['audio_path'] - audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0] - new_audio_path = os.path.join(video_save_dir, audio_name+'.wav') - start_time = 0 - # cog will not keep the .mp3 filename - sound = AudioSegment.from_file(audio_path) - frames = frame_num - end_time = start_time + frames*1/25*1000 - word1=sound.set_frame_rate(16000) - word = word1[start_time:end_time] - word.export(new_audio_path, format="wav") - - save_video_with_watermark(path, new_audio_path, av_path, watermark= False) - print(f'The generated video is named {video_save_dir}/{video_name}') - - if 'full' in preprocess.lower(): - # only add watermark to the full image. - video_name_full = x['video_name'] + '_full.mp4' - full_video_path = os.path.join(video_save_dir, video_name_full) - return_path = full_video_path - paste_pic(path, pic_path, crop_info, new_audio_path, full_video_path, extended_crop= True if 'ext' in preprocess.lower() else False) - print(f'The generated video is named {video_save_dir}/{video_name_full}') - else: - full_video_path = av_path - - #### paste back then enhancers - if enhancer: - video_name_enhancer = x['video_name'] + '_enhanced.mp4' - enhanced_path = os.path.join(video_save_dir, 'temp_'+video_name_enhancer) - av_path_enhancer = os.path.join(video_save_dir, video_name_enhancer) - return_path = av_path_enhancer - - try: - enhanced_images_gen_with_len = enhancer_generator_with_len(full_video_path, method=enhancer, bg_upsampler=background_enhancer) - imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25)) - except: - enhanced_images_gen_with_len = enhancer_list(full_video_path, method=enhancer, bg_upsampler=background_enhancer) - imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25)) - - save_video_with_watermark(enhanced_path, new_audio_path, av_path_enhancer, watermark= False) - print(f'The generated video is named {video_save_dir}/{video_name_enhancer}') - os.remove(enhanced_path) - - os.remove(path) - os.remove(new_audio_path) - - return return_path - diff --git a/spaces/vivym/image-matting-app/ppmatting/utils/utils.py b/spaces/vivym/image-matting-app/ppmatting/utils/utils.py deleted file mode 100644 index 13513cb193757b63043f44a2c145b3e9b6fad82e..0000000000000000000000000000000000000000 --- a/spaces/vivym/image-matting-app/ppmatting/utils/utils.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - - -def get_files(root_path): - res = [] - for root, dirs, files in os.walk(root_path, followlinks=True): - for f in files: - if f.endswith(('.jpg', '.png', '.jpeg', 'JPG')): - res.append(os.path.join(root, f)) - return res - - -def get_image_list(image_path): - """Get image list""" - valid_suffix = [ - '.JPEG', '.jpeg', '.JPG', '.jpg', '.BMP', '.bmp', '.PNG', '.png' - ] - image_list = [] - image_dir = None - if os.path.isfile(image_path): - image_dir = None - if os.path.splitext(image_path)[-1] in valid_suffix: - image_list.append(image_path) - else: - image_dir = os.path.dirname(image_path) - with open(image_path, 'r') as f: - for line in f: - line = line.strip() - if len(line.split()) > 1: - raise RuntimeError( - 'There should be only one image path per line in `image_path` file. Wrong line: {}' - .format(line)) - image_list.append(os.path.join(image_dir, line)) - elif os.path.isdir(image_path): - image_dir = image_path - for root, dirs, files in os.walk(image_path): - for f in files: - if '.ipynb_checkpoints' in root: - continue - if os.path.splitext(f)[-1] in valid_suffix: - image_list.append(os.path.join(root, f)) - image_list.sort() - else: - raise FileNotFoundError( - '`image_path` is not found. it should be an image file or a directory including images' - ) - - if len(image_list) == 0: - raise RuntimeError('There are not image file in `image_path`') - - return image_list, image_dir - - -def mkdir(path): - sub_dir = os.path.dirname(path) - if not os.path.exists(sub_dir): - os.makedirs(sub_dir) diff --git a/spaces/warp-ai/Wuerstchen/previewer/modules.py b/spaces/warp-ai/Wuerstchen/previewer/modules.py deleted file mode 100644 index 3ded82f7628ccf0241bc6e3528cd8edba779caaa..0000000000000000000000000000000000000000 --- a/spaces/warp-ai/Wuerstchen/previewer/modules.py +++ /dev/null @@ -1,36 +0,0 @@ -from torch import nn - -# Effnet 16x16 to 64x64 previewer -class Previewer(nn.Module): - def __init__(self, c_in=16, c_hidden=512, c_out=3): - super().__init__() - self.blocks = nn.Sequential( - nn.Conv2d(c_in, c_hidden, kernel_size=1), # 36 channels to 512 channels - nn.GELU(), - nn.BatchNorm2d(c_hidden), - - nn.Conv2d(c_hidden, c_hidden, kernel_size=3, padding=1), - nn.GELU(), - nn.BatchNorm2d(c_hidden), - - nn.ConvTranspose2d(c_hidden, c_hidden//2, kernel_size=2, stride=2), # 16 -> 32 - nn.GELU(), - nn.BatchNorm2d(c_hidden//2), - - nn.Conv2d(c_hidden//2, c_hidden//2, kernel_size=3, padding=1), - nn.GELU(), - nn.BatchNorm2d(c_hidden//2), - - nn.ConvTranspose2d(c_hidden//2, c_hidden//4, kernel_size=2, stride=2), # 32 -> 64 - nn.GELU(), - nn.BatchNorm2d(c_hidden//4), - - nn.Conv2d(c_hidden//4, c_hidden//4, kernel_size=3, padding=1), - nn.GELU(), - nn.BatchNorm2d(c_hidden//4), - - nn.Conv2d(c_hidden//4, c_out, kernel_size=1), - ) - - def forward(self, x): - return self.blocks(x) \ No newline at end of file diff --git a/spaces/weishao2019/ChuanhuChatGPT/utils.py b/spaces/weishao2019/ChuanhuChatGPT/utils.py deleted file mode 100644 index f6e4fa4e8a9f908baa4509d7206ff3455ac57f39..0000000000000000000000000000000000000000 --- a/spaces/weishao2019/ChuanhuChatGPT/utils.py +++ /dev/null @@ -1,386 +0,0 @@ -# -*- coding:utf-8 -*- -from __future__ import annotations -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type -import logging -import json -import os -import datetime -import hashlib -import csv -import requests -import re - -import gradio as gr -from pypinyin import lazy_pinyin -import tiktoken -import mdtex2html -from markdown import markdown -from pygments import highlight -from pygments.lexers import get_lexer_by_name -from pygments.formatters import HtmlFormatter - -from presets import * - -# logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s") - -if TYPE_CHECKING: - from typing import TypedDict - - class DataframeData(TypedDict): - headers: List[str] - data: List[List[str | int | bool]] - - -def count_token(message): - encoding = tiktoken.get_encoding("cl100k_base") - input_str = f"role: {message['role']}, content: {message['content']}" - length = len(encoding.encode(input_str)) - return length - - -def markdown_to_html_with_syntax_highlight(md_str): - def replacer(match): - lang = match.group(1) or "text" - code = match.group(2) - - try: - lexer = get_lexer_by_name(lang, stripall=True) - except ValueError: - lexer = get_lexer_by_name("text", stripall=True) - - formatter = HtmlFormatter() - highlighted_code = highlight(code, lexer, formatter) - - return f'
        {highlighted_code}
        ' - - code_block_pattern = r"```(\w+)?\n([\s\S]+?)\n```" - md_str = re.sub(code_block_pattern, replacer, md_str, flags=re.MULTILINE) - - html_str = markdown(md_str) - return html_str - - -def normalize_markdown(md_text: str) -> str: - lines = md_text.split("\n") - normalized_lines = [] - inside_list = False - - for i, line in enumerate(lines): - if re.match(r"^(\d+\.|-|\*|\+)\s", line.strip()): - if not inside_list and i > 0 and lines[i - 1].strip() != "": - normalized_lines.append("") - inside_list = True - normalized_lines.append(line) - elif inside_list and line.strip() == "": - if i < len(lines) - 1 and not re.match( - r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip() - ): - normalized_lines.append(line) - continue - else: - inside_list = False - normalized_lines.append(line) - - return "\n".join(normalized_lines) - - -def convert_mdtext(md_text): - code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL) - inline_code_pattern = re.compile(r"`(.*?)`", re.DOTALL) - code_blocks = code_block_pattern.findall(md_text) - non_code_parts = code_block_pattern.split(md_text)[::2] - - result = [] - for non_code, code in zip(non_code_parts, code_blocks + [""]): - if non_code.strip(): - non_code = normalize_markdown(non_code) - if inline_code_pattern.search(non_code): - result.append(markdown(non_code, extensions=["tables"])) - else: - result.append(mdtex2html.convert(non_code, extensions=["tables"])) - if code.strip(): - # _, code = detect_language(code) # 暂时去除代码高亮功能,因为在大段代码的情况下会出现问题 - # code = code.replace("\n\n", "\n") # 暂时去除代码中的空行,因为在大段代码的情况下会出现问题 - code = f"```{code}\n\n```" - code = markdown_to_html_with_syntax_highlight(code) - result.append(code) - result = "".join(result) - return result - - -def detect_language(code): - if code.startswith("\n"): - first_line = "" - else: - first_line = code.strip().split("\n", 1)[0] - language = first_line.lower() if first_line else "" - code_without_language = code[len(first_line) :].lstrip() if first_line else code - return language, code_without_language - - -def construct_text(role, text): - return {"role": role, "content": text} - - -def construct_user(text): - return construct_text("user", text) - - -def construct_system(text): - return construct_text("system", text) - - -def construct_assistant(text): - return construct_text("assistant", text) - - -def construct_token_message(token, stream=False): - return f"Token 计数: {token}" - - -def delete_last_conversation(chatbot, history, previous_token_count): - if len(chatbot) > 0 and standard_error_msg in chatbot[-1][1]: - logging.info("由于包含报错信息,只删除chatbot记录") - chatbot.pop() - return chatbot, history - if len(history) > 0: - logging.info("删除了一组对话历史") - history.pop() - history.pop() - if len(chatbot) > 0: - logging.info("删除了一组chatbot对话") - chatbot.pop() - if len(previous_token_count) > 0: - logging.info("删除了一组对话的token计数记录") - previous_token_count.pop() - return ( - chatbot, - history, - previous_token_count, - construct_token_message(sum(previous_token_count)), - ) - - -def save_file(filename, system, history, chatbot): - logging.info("保存对话历史中……") - os.makedirs(HISTORY_DIR, exist_ok=True) - if filename.endswith(".json"): - json_s = {"system": system, "history": history, "chatbot": chatbot} - print(json_s) - with open(os.path.join(HISTORY_DIR, filename), "w") as f: - json.dump(json_s, f) - elif filename.endswith(".md"): - md_s = f"system: \n- {system} \n" - for data in history: - md_s += f"\n{data['role']}: \n- {data['content']} \n" - with open(os.path.join(HISTORY_DIR, filename), "w", encoding="utf8") as f: - f.write(md_s) - logging.info("保存对话历史完毕") - return os.path.join(HISTORY_DIR, filename) - - -def save_chat_history(filename, system, history, chatbot): - if filename == "": - return - if not filename.endswith(".json"): - filename += ".json" - return save_file(filename, system, history, chatbot) - - -def export_markdown(filename, system, history, chatbot): - if filename == "": - return - if not filename.endswith(".md"): - filename += ".md" - return save_file(filename, system, history, chatbot) - - -def load_chat_history(filename, system, history, chatbot): - logging.info("加载对话历史中……") - if type(filename) != str: - filename = filename.name - try: - with open(os.path.join(HISTORY_DIR, filename), "r") as f: - json_s = json.load(f) - try: - if type(json_s["history"][0]) == str: - logging.info("历史记录格式为旧版,正在转换……") - new_history = [] - for index, item in enumerate(json_s["history"]): - if index % 2 == 0: - new_history.append(construct_user(item)) - else: - new_history.append(construct_assistant(item)) - json_s["history"] = new_history - logging.info(new_history) - except: - # 没有对话历史 - pass - logging.info("加载对话历史完毕") - return filename, json_s["system"], json_s["history"], json_s["chatbot"] - except FileNotFoundError: - logging.info("没有找到对话历史文件,不执行任何操作") - return filename, system, history, chatbot - - -def sorted_by_pinyin(list): - return sorted(list, key=lambda char: lazy_pinyin(char)[0][0]) - - -def get_file_names(dir, plain=False, filetypes=[".json"]): - logging.info(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}") - files = [] - try: - for type in filetypes: - files += [f for f in os.listdir(dir) if f.endswith(type)] - except FileNotFoundError: - files = [] - files = sorted_by_pinyin(files) - if files == []: - files = [""] - if plain: - return files - else: - return gr.Dropdown.update(choices=files) - - -def get_history_names(plain=False): - logging.info("获取历史记录文件名列表") - return get_file_names(HISTORY_DIR, plain) - - -def load_template(filename, mode=0): - logging.info(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)") - lines = [] - logging.info("Loading template...") - if filename.endswith(".json"): - with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f: - lines = json.load(f) - lines = [[i["act"], i["prompt"]] for i in lines] - else: - with open( - os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8" - ) as csvfile: - reader = csv.reader(csvfile) - lines = list(reader) - lines = lines[1:] - if mode == 1: - return sorted_by_pinyin([row[0] for row in lines]) - elif mode == 2: - return {row[0]: row[1] for row in lines} - else: - choices = sorted_by_pinyin([row[0] for row in lines]) - return {row[0]: row[1] for row in lines}, gr.Dropdown.update( - choices=choices, value=choices[0] - ) - - -def get_template_names(plain=False): - logging.info("获取模板文件名列表") - return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"]) - - -def get_template_content(templates, selection, original_system_prompt): - logging.info(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}") - try: - return templates[selection] - except: - return original_system_prompt - - -def reset_state(): - logging.info("重置状态") - return [], [], [], construct_token_message(0) - - -def reset_textbox(): - return gr.update(value="") - - -def reset_default(): - global API_URL - API_URL = "https://api.openai.com/v1/chat/completions" - os.environ.pop("HTTPS_PROXY", None) - os.environ.pop("https_proxy", None) - return gr.update(value=API_URL), gr.update(value=""), "API URL 和代理已重置" - - -def change_api_url(url): - global API_URL - API_URL = url - msg = f"API地址更改为了{url}" - logging.info(msg) - return msg - - -def change_proxy(proxy): - os.environ["HTTPS_PROXY"] = proxy - msg = f"代理更改为了{proxy}" - logging.info(msg) - return msg - - -def hide_middle_chars(s): - if len(s) <= 8: - return s - else: - head = s[:4] - tail = s[-4:] - hidden = "*" * (len(s) - 8) - return head + hidden + tail - - -def submit_key(key): - key = key.strip() - msg = f"API密钥更改为了{hide_middle_chars(key)}" - logging.info(msg) - return key, msg - - -def sha1sum(filename): - sha1 = hashlib.sha1() - sha1.update(filename.encode("utf-8")) - return sha1.hexdigest() - - -def replace_today(prompt): - today = datetime.datetime.today().strftime("%Y-%m-%d") - return prompt.replace("{current_date}", today) - - -def get_geoip(): - response = requests.get("https://ipapi.co/json/", timeout=5) - try: - data = response.json() - except: - data = {"error": True, "reason": "连接ipapi失败"} - if "error" in data.keys(): - logging.warning(f"无法获取IP地址信息。\n{data}") - if data["reason"] == "RateLimited": - return ( - f"获取IP地理位置失败,因为达到了检测IP的速率限制。聊天功能可能仍然可用,但请注意,如果您的IP地址在不受支持的地区,您可能会遇到问题。" - ) - else: - return f"获取IP地理位置失败。原因:{data['reason']}。你仍然可以使用聊天功能。" - else: - country = data["country_name"] - if country == "China": - text = "**您的IP区域:中国。请立即检查代理设置,在不受支持的地区使用API可能导致账号被封禁。**" - else: - text = f"您的IP区域:{country}。" - logging.info(text) - return text - - -def find_n(lst, max_num): - n = len(lst) - total = sum(lst) - - if total < max_num: - return n - - for i in range(len(lst)): - if total - lst[i] < max_num: - return n - i -1 - total = total - lst[i] - return 1 diff --git a/spaces/wong26/faster-whisper-webui/README.md b/spaces/wong26/faster-whisper-webui/README.md deleted file mode 100644 index b530ec893be48ff9471257f74d7b03c524c8bfe4..0000000000000000000000000000000000000000 --- a/spaces/wong26/faster-whisper-webui/README.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Faster Whisper Webui -emoji: 🚀 -colorFrom: indigo -colorTo: blue -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: aadnk/faster-whisper-webui ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - -# Running Locally - -To run this program locally, first install Python 3.9+ and Git. Then install Pytorch 10.1+ and all the other dependencies: -``` -pip install -r requirements.txt -``` - -You can find detailed instructions for how to install this on Windows 10/11 [here (PDF)](docs/windows/install_win10_win11.pdf). - -Finally, run the full version (no audio length restrictions) of the app with parallel CPU/GPU enabled: -``` -python app.py --input_audio_max_duration -1 --server_name 127.0.0.1 --auto_parallel True -``` - -You can also run the CLI interface, which is similar to Whisper's own CLI but also supports the following additional arguments: -``` -python cli.py \ -[--vad {none,silero-vad,silero-vad-skip-gaps,silero-vad-expand-into-gaps,periodic-vad}] \ -[--vad_merge_window VAD_MERGE_WINDOW] \ -[--vad_max_merge_size VAD_MAX_MERGE_SIZE] \ -[--vad_padding VAD_PADDING] \ -[--vad_prompt_window VAD_PROMPT_WINDOW] -[--vad_cpu_cores NUMBER_OF_CORES] -[--vad_parallel_devices COMMA_DELIMITED_DEVICES] -[--auto_parallel BOOLEAN] -``` -In addition, you may also use URL's in addition to file paths as input. -``` -python cli.py --model large --vad silero-vad --language Japanese "https://www.youtube.com/watch?v=4cICErqqRSM" -``` - -Rather than supplying arguments to `app.py` or `cli.py`, you can also use the configuration file [config.json5](config.json5). See that file for more information. -If you want to use a different configuration file, you can use the `WHISPER_WEBUI_CONFIG` environment variable to specify the path to another file. - -### Multiple Files - -You can upload multiple files either through the "Upload files" option, or as a playlist on YouTube. -Each audio file will then be processed in turn, and the resulting SRT/VTT/Transcript will be made available in the "Download" section. -When more than one file is processed, the UI will also generate a "All_Output" zip file containing all the text output files. - -## Diarization - -To detect different speakers in the audio, you can use the [whisper-diarization](https://gitlab.com/aadnk/whisper-diarization) application. - -Download the JSON file after running Whisper on an audio file, and then run app.py in the -whisper-diarization repository with the audio file and the JSON file as arguments. - -## Whisper Implementation - -You can choose between using `whisper` or `faster-whisper`. [Faster Whisper](https://github.com/guillaumekln/faster-whisper) as a drop-in replacement for the -default Whisper which achieves up to a 4x speedup and 2x reduction in memory usage. - -You can install the requirements for a specific Whisper implementation in `requirements-fasterWhisper.txt` -or `requirements-whisper.txt`: -``` -pip install -r requirements-fasterWhisper.txt -``` -And then run the App or the CLI with the `--whisper_implementation faster-whisper` flag: -``` -python app.py --whisper_implementation faster-whisper --input_audio_max_duration -1 --server_name 127.0.0.1 --auto_parallel True -``` -You can also select the whisper implementation in `config.json5`: -```json5 -{ - "whisper_implementation": "faster-whisper" -} -``` -### GPU Acceleration - -In order to use GPU acceleration with Faster Whisper, both CUDA 11.2 and cuDNN 8 must be installed. You may want to install it in a virtual environment like Anaconda. - -## Google Colab - -You can also run this Web UI directly on [Google Colab](https://colab.research.google.com/drive/1qeTSvi7Bt_5RMm88ipW4fkcsMOKlDDss?usp=sharing), if you haven't got a GPU powerful enough to run the larger models. - -See the [colab documentation](docs/colab.md) for more information. - -## Parallel Execution - -You can also run both the Web-UI or the CLI on multiple GPUs in parallel, using the `vad_parallel_devices` option. This takes a comma-delimited list of -device IDs (0, 1, etc.) that Whisper should be distributed to and run on concurrently: -``` -python cli.py --model large --vad silero-vad --language Japanese \ ---vad_parallel_devices 0,1 "https://www.youtube.com/watch?v=4cICErqqRSM" -``` - -Note that this requires a VAD to function properly, otherwise only the first GPU will be used. Though you could use `period-vad` to avoid taking the hit -of running Silero-Vad, at a slight cost to accuracy. - -This is achieved by creating N child processes (where N is the number of selected devices), where Whisper is run concurrently. In `app.py`, you can also -set the `vad_process_timeout` option. This configures the number of seconds until a process is killed due to inactivity, freeing RAM and video memory. -The default value is 30 minutes. - -``` -python app.py --input_audio_max_duration -1 --vad_parallel_devices 0,1 --vad_process_timeout 3600 -``` - -To execute the Silero VAD itself in parallel, use the `vad_cpu_cores` option: -``` -python app.py --input_audio_max_duration -1 --vad_parallel_devices 0,1 --vad_process_timeout 3600 --vad_cpu_cores 4 -``` - -You may also use `vad_process_timeout` with a single device (`--vad_parallel_devices 0`), if you prefer to always free video memory after a period of time. - -### Auto Parallel - -You can also set `auto_parallel` to `True`. This will set `vad_parallel_devices` to use all the GPU devices on the system, and `vad_cpu_cores` to be equal to the number of -cores (up to 8): -``` -python app.py --input_audio_max_duration -1 --auto_parallel True -``` - -# Docker - -To run it in Docker, first install Docker and optionally the NVIDIA Container Toolkit in order to use the GPU. -Then either use the GitLab hosted container below, or check out this repository and build an image: -``` -sudo docker build -t whisper-webui:1 . -``` - -You can then start the WebUI with GPU support like so: -``` -sudo docker run -d --gpus=all -p 7860:7860 whisper-webui:1 -``` - -Leave out "--gpus=all" if you don't have access to a GPU with enough memory, and are fine with running it on the CPU only: -``` -sudo docker run -d -p 7860:7860 whisper-webui:1 -``` - -# GitLab Docker Registry - -This Docker container is also hosted on GitLab: - -``` -sudo docker run -d --gpus=all -p 7860:7860 registry.gitlab.com/aadnk/whisper-webui:latest -``` - -## Custom Arguments - -You can also pass custom arguments to `app.py` in the Docker container, for instance to be able to use all the GPUs in parallel (replace administrator with your user): -``` -sudo docker run -d --gpus all -p 7860:7860 \ ---mount type=bind,source=/home/administrator/.cache/whisper,target=/root/.cache/whisper \ ---mount type=bind,source=/home/administrator/.cache/huggingface,target=/root/.cache/huggingface \ ---restart=on-failure:15 registry.gitlab.com/aadnk/whisper-webui:latest \ -app.py --input_audio_max_duration -1 --server_name 0.0.0.0 --auto_parallel True \ ---default_vad silero-vad --default_model_name large -``` - -You can also call `cli.py` the same way: -``` -sudo docker run --gpus all \ ---mount type=bind,source=/home/administrator/.cache/whisper,target=/root/.cache/whisper \ ---mount type=bind,source=/home/administrator/.cache/huggingface,target=/root/.cache/huggingface \ ---mount type=bind,source=${PWD},target=/app/data \ -registry.gitlab.com/aadnk/whisper-webui:latest \ -cli.py --model large --auto_parallel True --vad silero-vad \ ---output_dir /app/data /app/data/YOUR-FILE-HERE.mp4 -``` - -## Caching - -Note that the models themselves are currently not included in the Docker images, and will be downloaded on the demand. -To avoid this, bind the directory /root/.cache/whisper to some directory on the host (for instance /home/administrator/.cache/whisper), where you can (optionally) -prepopulate the directory with the different Whisper models. -``` -sudo docker run -d --gpus=all -p 7860:7860 \ ---mount type=bind,source=/home/administrator/.cache/whisper,target=/root/.cache/whisper \ -registry.gitlab.com/aadnk/whisper-webui:latest -``` \ No newline at end of file diff --git a/spaces/wuhuik/bingo/cloudflare/worker.js b/spaces/wuhuik/bingo/cloudflare/worker.js deleted file mode 100644 index e0debd750615f1329b2c72fbce73e1b9291f7137..0000000000000000000000000000000000000000 --- a/spaces/wuhuik/bingo/cloudflare/worker.js +++ /dev/null @@ -1,18 +0,0 @@ -const TRAGET_HOST='hf4all-bingo.hf.space' // 请将此域名改成你自己的,域名信息在设置》站点域名查看。 - -export default { - async fetch(request) { - const uri = new URL(request.url); - if (uri.protocol === 'http:') { - uri.protocol = 'https:'; - return new Response('', { - status: 301, - headers: { - location: uri.toString(), - }, - }) - } - uri.host = TRAGET_HOST - return fetch(new Request(uri.toString(), request)); - }, -}; diff --git a/spaces/xswu/HPSv2/app.py b/spaces/xswu/HPSv2/app.py deleted file mode 100644 index 94940c938855cc6065b73587a360d31587440cbd..0000000000000000000000000000000000000000 --- a/spaces/xswu/HPSv2/app.py +++ /dev/null @@ -1,83 +0,0 @@ -import gradio as gr -import torch -from PIL import Image -from src.open_clip import create_model_and_transforms, get_tokenizer -import warnings -import argparse - -warnings.filterwarnings("ignore", category=UserWarning) - -# Create an argument parser -parser = argparse.ArgumentParser() -parser.add_argument('--checkpoint', type=str, default='HPS_v2.pt', help='Path to the model checkpoint') - -args = parser.parse_args() - -device = 'cuda' if torch.cuda.is_available() else 'cpu' -model, preprocess_train, preprocess_val = create_model_and_transforms( - 'ViT-H-14', - 'laion2B-s32B-b79K', - precision='amp', - device=device, - jit=False, - force_quick_gelu=False, - force_custom_text=False, - force_patch_dropout=False, - force_image_size=None, - pretrained_image=False, - image_mean=None, - image_std=None, - light_augmentation=True, - aug_cfg={}, - output_dict=True, - with_score_predictor=False, - with_region_predictor=False -) - -checkpoint = torch.load(args.checkpoint) -model.load_state_dict(checkpoint['state_dict']) -tokenizer = get_tokenizer('ViT-H-14') -model.eval() - -intro = """ -

        - HPS v2 -

        -

        - evaluating human preference for generated images -

        -

        - GitHub | ArXiv -

        -

        -

        """ - -def inference(image, prompt): - # Load your image and prompt - with torch.no_grad(): - - # Process the image - image = preprocess_val(image).unsqueeze(0).to(device=device, non_blocking=True) - # Process the prompt - text = tokenizer([prompt]).to(device=device, non_blocking=True) - # Calculate the HPS - with torch.cuda.amp.autocast(): - outputs = model(image, text) - image_features, text_features = outputs["image_features"], outputs["text_features"] - logits_per_image = image_features @ text_features.T - - hps_score = torch.diagonal(logits_per_image).cpu().numpy() - output = 'HPSv2 score: ' + str(hps_score[0]) - return output - -with gr.Blocks(css="style.css") as demo: - gr.HTML(intro) - with gr.Column(): - image = gr.Image(label="Image", type="pil") - prompt = gr.Textbox(lines=1, label="Prompt") - button = gr.Button("Compute HPS v2") - score = gr.Textbox(label="output", lines=1, interactive=False, elem_id="output") - button.click(inference, inputs=[image, prompt], outputs=score) - -demo.queue(concurrency_count=1) -demo.launch() \ No newline at end of file diff --git a/spaces/yangheng/Super-Resolution-Anime-Diffusion/api.py b/spaces/yangheng/Super-Resolution-Anime-Diffusion/api.py deleted file mode 100644 index 08317b4eba5c62ae17646f121c0f0758b2592917..0000000000000000000000000000000000000000 --- a/spaces/yangheng/Super-Resolution-Anime-Diffusion/api.py +++ /dev/null @@ -1,35 +0,0 @@ -# -*- coding: utf-8 -*- -# file: api.py.py -# time: 20:37 2022/12/6 -# author: yangheng -# github: https://github.com/yangheng95 -# huggingface: https://huggingface.co/yangheng -# google scholar: https://scholar.google.com/citations?user=NPq5a_0AAAAJ&hl=en -# Copyright (C) 2021. All Rights Reserved. -import requests -from PIL import Image -from io import BytesIO - -response = requests.post( - "https://yangheng-super-resolution-anime-diffusion.hf.space/run/generate", - json={ - "data": [ - "anything v3", - "girl,lovely,cute,beautiful eyes,cumulonimbus clouds,sky,detailed fingers,pants,red hair,blue eyes,flower meadow,Elif", - 7.5, - 15, - 512, - 512, - 0, - "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAACklEQVR4nGMAAQAABQABDQottAAAAABJRU5ErkJggg==", - 0.5, - "", - 2, - ] - }, - timeout=3000, -) - -img = Image.open(BytesIO(response.content)) -img.show() -img.save("test_api.png") diff --git a/spaces/yaoshining/text-generation-webui/convert-to-safetensors.py b/spaces/yaoshining/text-generation-webui/convert-to-safetensors.py deleted file mode 100644 index 3b721e7cd4d15cf7e5e03caaee57ef83a41553bc..0000000000000000000000000000000000000000 --- a/spaces/yaoshining/text-generation-webui/convert-to-safetensors.py +++ /dev/null @@ -1,38 +0,0 @@ -''' - -Converts a transformers model to safetensors format and shards it. - -This makes it faster to load (because of safetensors) and lowers its RAM usage -while loading (because of sharding). - -Based on the original script by 81300: - -https://gist.github.com/81300/fe5b08bff1cba45296a829b9d6b0f303 - -''' - -import argparse -from pathlib import Path - -import torch -from transformers import AutoModelForCausalLM, AutoTokenizer - -parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=54)) -parser.add_argument('MODEL', type=str, default=None, nargs='?', help="Path to the input model.") -parser.add_argument('--output', type=str, default=None, help='Path to the output folder (default: models/{model_name}_safetensors).') -parser.add_argument("--max-shard-size", type=str, default="2GB", help="Maximum size of a shard in GB or MB (default: %(default)s).") -parser.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.') -args = parser.parse_args() - -if __name__ == '__main__': - path = Path(args.MODEL) - model_name = path.name - - print(f"Loading {model_name}...") - model = AutoModelForCausalLM.from_pretrained(path, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if args.bf16 else torch.float16) - tokenizer = AutoTokenizer.from_pretrained(path) - - out_folder = args.output or Path(f"models/{model_name}_safetensors") - print(f"Saving the converted model to {out_folder} with a maximum shard size of {args.max_shard_size}...") - model.save_pretrained(out_folder, max_shard_size=args.max_shard_size, safe_serialization=True) - tokenizer.save_pretrained(out_folder) diff --git a/spaces/yderre-aubay/midi-player-demo/src/common/transform/TempoCoordTransform.ts b/spaces/yderre-aubay/midi-player-demo/src/common/transform/TempoCoordTransform.ts deleted file mode 100644 index d13cb98e4227f56a76c51f920d93dcf50c4fb217..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/common/transform/TempoCoordTransform.ts +++ /dev/null @@ -1,54 +0,0 @@ -import { IPoint } from "../geometry" - -export default class TempoCoordTransform { - readonly pixelsPerTick: number - // グラフの描画領域の高さ - // Higher graph drawing area - readonly height: number - readonly maxBPM: number - - constructor(pixelsPerTick: number, height: number, maxBPM = 320) { - this.pixelsPerTick = pixelsPerTick - this.height = height - this.maxBPM = maxBPM - } - - getX(tick: number) { - return tick * this.pixelsPerTick - } - - getY(bpm: number) { - return (1 - bpm / this.maxBPM) * this.height // 上下反転 - } - - getMaxY() { - return this.height - } - - getTicks(pixels: number) { - return pixels / this.pixelsPerTick - } - - getBPM(pixels: number) { - return (1 - pixels / this.height) * this.maxBPM - } - - getDeltaBPM(pixels: number) { - return (-pixels / this.height) * this.maxBPM - } - - equals(t: TempoCoordTransform) { - return ( - this.pixelsPerTick === t.pixelsPerTick && - this.height === t.height && - this.maxBPM === t.maxBPM - ) - } - - fromPosition(position: IPoint) { - return { - tick: this.getTicks(position.x), - bpm: this.getBPM(position.y), - } - } -} diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/actions/history.ts b/spaces/yderre-aubay/midi-player-demo/src/main/actions/history.ts deleted file mode 100644 index 445d4c10b935c43160353192f329f5f40ed90791..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/actions/history.ts +++ /dev/null @@ -1,68 +0,0 @@ -import cloneDeep from "lodash/cloneDeep" -import { deserialize, serialize } from "serializr" -import Song from "../../common/song/Song" -import ArrangeViewStore from "../stores/ArrangeViewStore" -import { ControlStore } from "../stores/ControlStore" -import PianoRollStore from "../stores/PianoRollStore" -import RootStore from "../stores/RootStore" - -// we use any for now. related: https://github.com/Microsoft/TypeScript/issues/1897 -type Json = any - -export interface SerializedState { - song: Json - selection: PianoRollStore["selection"] - selectedNoteIds: PianoRollStore["selectedNoteIds"] - selectedControllerEventIds: ControlStore["selectedEventIds"] - arrangeSelection: ArrangeViewStore["selection"] - arrangeSelectedEventIds: ArrangeViewStore["selectedEventIds"] -} - -const serializeUndoableState = (rootStore: RootStore): SerializedState => { - return { - song: serialize(rootStore.song), - selection: cloneDeep(rootStore.pianoRollStore.selection), - selectedNoteIds: cloneDeep(rootStore.pianoRollStore.selectedNoteIds), - selectedControllerEventIds: cloneDeep( - rootStore.controlStore.selectedEventIds, - ), - arrangeSelection: cloneDeep(rootStore.arrangeViewStore.selection), - arrangeSelectedEventIds: cloneDeep( - rootStore.arrangeViewStore.selectedEventIds, - ), - } -} - -const restoreState = - (rootStore: RootStore) => (serializedState: SerializedState) => { - const song = deserialize(Song, serializedState.song) - rootStore.song = song - rootStore.pianoRollStore.selection = serializedState.selection - rootStore.pianoRollStore.selectedNoteIds = serializedState.selectedNoteIds - rootStore.controlStore.selectedEventIds = - serializedState.selectedControllerEventIds - rootStore.arrangeViewStore.selection = serializedState.arrangeSelection - rootStore.arrangeViewStore.selectedEventIds = - serializedState.arrangeSelectedEventIds - } - -export const pushHistory = (rootStore: RootStore) => () => { - const state = serializeUndoableState(rootStore) - rootStore.historyStore.push(state) -} - -export const undo = (rootStore: RootStore) => () => { - const currentState = serializeUndoableState(rootStore) - const nextState = rootStore.historyStore.undo(currentState) - if (nextState !== undefined) { - restoreState(rootStore)(nextState) - } -} - -export const redo = (rootStore: RootStore) => () => { - const currentState = serializeUndoableState(rootStore) - const nextState = rootStore.historyStore.redo(currentState) - if (nextState !== undefined) { - restoreState(rootStore)(nextState) - } -} diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/components/ExportDialog/ExportDialog.tsx b/spaces/yderre-aubay/midi-player-demo/src/main/components/ExportDialog/ExportDialog.tsx deleted file mode 100644 index 5de1b825feef34d34491522c65018e8baf794233..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/components/ExportDialog/ExportDialog.tsx +++ /dev/null @@ -1,65 +0,0 @@ -import { observer } from "mobx-react-lite" -import { FC, useCallback, useEffect, useState } from "react" -import { Alert } from "../../../components/Alert" -import { Button, PrimaryButton } from "../../../components/Button" -import { - Dialog, - DialogActions, - DialogContent, - DialogTitle, -} from "../../../components/Dialog" -import { Localized } from "../../../components/Localized" -import { canExport, exportSongAsWav } from "../../actions" -import { useStores } from "../../hooks/useStores" - -export const ExportDialog: FC = observer(() => { - const rootStore = useStores() - const { exportStore, song } = rootStore - const { openExportDialog: open } = exportStore - const onClose = useCallback( - () => (exportStore.openExportDialog = false), - [exportStore], - ) - - const onClickExport = useCallback(() => { - exportStore.openExportDialog = false - exportSongAsWav(rootStore)() - }, [rootStore, exportStore]) - - const [exportEnabled, setExportEnabled] = useState(false) - useEffect(() => { - if (open) { - setExportEnabled(canExport(song)) - } - }, [open]) - - return ( -

        - - export-audio - - -

        - file-type: WAV -

        - {!exportEnabled && ( - - - export-error-too-short - - - )} -
        - - - {exportEnabled && ( - - export - - )} - -
        - ) -}) diff --git a/spaces/ysharma/Low-rank-Adaptation/train_lora_dreambooth1.py b/spaces/ysharma/Low-rank-Adaptation/train_lora_dreambooth1.py deleted file mode 100644 index f32f81a92b59a7dae9b860ce0ad1bb51de19360f..0000000000000000000000000000000000000000 --- a/spaces/ysharma/Low-rank-Adaptation/train_lora_dreambooth1.py +++ /dev/null @@ -1,964 +0,0 @@ -# Bootstrapped from: -# https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py - -import argparse -import hashlib -import itertools -import math -import os -from pathlib import Path -from typing import Optional - -import torch -import torch.nn.functional as F -import torch.utils.checkpoint - - -from accelerate import Accelerator -from accelerate.logging import get_logger -from accelerate.utils import set_seed -from diffusers import ( - AutoencoderKL, - DDPMScheduler, - StableDiffusionPipeline, - UNet2DConditionModel, -) -from diffusers.optimization import get_scheduler -from huggingface_hub import HfFolder, Repository, whoami - -from tqdm.auto import tqdm -from transformers import CLIPTextModel, CLIPTokenizer - -from lora_diffusion import ( - inject_trainable_lora, - save_lora_weight, - extract_lora_ups_down, -) - -from torch.utils.data import Dataset -from PIL import Image -from torchvision import transforms - -from pathlib import Path - -import random -import re - - -class DreamBoothDataset(Dataset): - """ - A dataset to prepare the instance and class images with the prompts for fine-tuning the model. - It pre-processes the images and the tokenizes prompts. - """ - - def __init__( - self, - instance_data_root, - instance_prompt, - tokenizer, - class_data_root=None, - class_prompt=None, - size=512, - center_crop=False, - ): - self.size = size - self.center_crop = center_crop - self.tokenizer = tokenizer - - self.instance_data_root = Path(instance_data_root) - if not self.instance_data_root.exists(): - raise ValueError("Instance images root doesn't exists.") - - self.instance_images_path = list(Path(instance_data_root).iterdir()) - self.num_instance_images = len(self.instance_images_path) - self.instance_prompt = instance_prompt - self._length = self.num_instance_images - - if class_data_root is not None: - self.class_data_root = Path(class_data_root) - self.class_data_root.mkdir(parents=True, exist_ok=True) - self.class_images_path = list(self.class_data_root.iterdir()) - self.num_class_images = len(self.class_images_path) - self._length = max(self.num_class_images, self.num_instance_images) - self.class_prompt = class_prompt - else: - self.class_data_root = None - - self.image_transforms = transforms.Compose( - [ - transforms.Resize( - size, interpolation=transforms.InterpolationMode.BILINEAR - ), - transforms.CenterCrop(size) - if center_crop - else transforms.RandomCrop(size), - transforms.ToTensor(), - transforms.Normalize([0.5], [0.5]), - ] - ) - - def __len__(self): - return self._length - - def __getitem__(self, index): - example = {} - instance_image = Image.open( - self.instance_images_path[index % self.num_instance_images] - ) - if not instance_image.mode == "RGB": - instance_image = instance_image.convert("RGB") - example["instance_images"] = self.image_transforms(instance_image) - example["instance_prompt_ids"] = self.tokenizer( - self.instance_prompt, - padding="do_not_pad", - truncation=True, - max_length=self.tokenizer.model_max_length, - ).input_ids - - if self.class_data_root: - class_image = Image.open( - self.class_images_path[index % self.num_class_images] - ) - if not class_image.mode == "RGB": - class_image = class_image.convert("RGB") - example["class_images"] = self.image_transforms(class_image) - example["class_prompt_ids"] = self.tokenizer( - self.class_prompt, - padding="do_not_pad", - truncation=True, - max_length=self.tokenizer.model_max_length, - ).input_ids - - return example - - -class DreamBoothLabled(Dataset): - """ - A dataset to prepare the instance and class images with the prompts for fine-tuning the model. - It pre-processes the images and the tokenizes prompts. - """ - - def __init__( - self, - instance_data_root, - instance_prompt, - tokenizer, - class_data_root=None, - class_prompt=None, - size=512, - center_crop=False, - ): - self.size = size - self.center_crop = center_crop - self.tokenizer = tokenizer - - self.instance_data_root = Path(instance_data_root) - if not self.instance_data_root.exists(): - raise ValueError("Instance images root doesn't exists.") - - self.instance_images_path = list(Path(instance_data_root).iterdir()) - self.num_instance_images = len(self.instance_images_path) - self.instance_prompt = instance_prompt - self._length = self.num_instance_images - - if class_data_root is not None: - self.class_data_root = Path(class_data_root) - self.class_data_root.mkdir(parents=True, exist_ok=True) - self.class_images_path = list(self.class_data_root.iterdir()) - self.num_class_images = len(self.class_images_path) - self._length = max(self.num_class_images, self.num_instance_images) - self.class_prompt = class_prompt - else: - self.class_data_root = None - - self.image_transforms = transforms.Compose( - [ - transforms.Resize( - size, interpolation=transforms.InterpolationMode.BILINEAR - ), - transforms.CenterCrop(size) - if center_crop - else transforms.RandomCrop(size), - transforms.ToTensor(), - transforms.Normalize([0.5], [0.5]), - ] - ) - - def __len__(self): - return self._length - - def __getitem__(self, index): - example = {} - instance_image = Image.open( - self.instance_images_path[index % self.num_instance_images] - ) - - instance_prompt = ( - str(self.instance_images_path[index % self.num_instance_images]) - .split("/")[-1] - .split(".")[0] - .replace("-", " ") - ) - # remove numbers in prompt - instance_prompt = re.sub(r"\d+", "", instance_prompt) - # print(instance_prompt) - - _svg = random.choice(["svg", "flat color", "vector illustration", "sks"]) - instance_prompt = f"{instance_prompt}, style of {_svg}" - - if not instance_image.mode == "RGB": - instance_image = instance_image.convert("RGB") - example["instance_images"] = self.image_transforms(instance_image) - example["instance_prompt_ids"] = self.tokenizer( - instance_prompt, - padding="do_not_pad", - truncation=True, - max_length=self.tokenizer.model_max_length, - ).input_ids - - if self.class_data_root: - class_image = Image.open( - self.class_images_path[index % self.num_class_images] - ) - if not class_image.mode == "RGB": - class_image = class_image.convert("RGB") - example["class_images"] = self.image_transforms(class_image) - example["class_prompt_ids"] = self.tokenizer( - self.class_prompt, - padding="do_not_pad", - truncation=True, - max_length=self.tokenizer.model_max_length, - ).input_ids - - return example - - -class PromptDataset(Dataset): - "A simple dataset to prepare the prompts to generate class images on multiple GPUs." - - def __init__(self, prompt, num_samples): - self.prompt = prompt - self.num_samples = num_samples - - def __len__(self): - return self.num_samples - - def __getitem__(self, index): - example = {} - example["prompt"] = self.prompt - example["index"] = index - return example - - -logger = get_logger(__name__) - - -def parse_args(input_args=None): - parser = argparse.ArgumentParser(description="Simple example of a training script.") - parser.add_argument( - "--pretrained_model_name_or_path", - type=str, - default=None, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--revision", - type=str, - default=None, - required=False, - help="Revision of pretrained model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--tokenizer_name", - type=str, - default=None, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--instance_data_dir", - type=str, - default=None, - required=True, - help="A folder containing the training data of instance images.", - ) - parser.add_argument( - "--class_data_dir", - type=str, - default=None, - required=False, - help="A folder containing the training data of class images.", - ) - parser.add_argument( - "--instance_prompt", - type=str, - default=None, - required=True, - help="The prompt with identifier specifying the instance", - ) - parser.add_argument( - "--class_prompt", - type=str, - default=None, - help="The prompt to specify images in the same class as provided instance images.", - ) - parser.add_argument( - "--with_prior_preservation", - default=False, - action="store_true", - help="Flag to add prior preservation loss.", - ) - parser.add_argument( - "--prior_loss_weight", - type=float, - default=1.0, - help="The weight of prior preservation loss.", - ) - parser.add_argument( - "--num_class_images", - type=int, - default=100, - help=( - "Minimal class images for prior preservation loss. If not have enough images, additional images will be" - " sampled with class_prompt." - ), - ) - parser.add_argument( - "--output_dir", - type=str, - default="text-inversion-model", - help="The output directory where the model predictions and checkpoints will be written.", - ) - parser.add_argument( - "--seed", type=int, default=None, help="A seed for reproducible training." - ) - parser.add_argument( - "--resolution", - type=int, - default=512, - help=( - "The resolution for input images, all the images in the train/validation dataset will be resized to this" - " resolution" - ), - ) - parser.add_argument( - "--center_crop", - action="store_true", - help="Whether to center crop images before resizing to resolution", - ) - parser.add_argument( - "--train_text_encoder", - action="store_true", - help="Whether to train the text encoder", - ) - parser.add_argument( - "--train_batch_size", - type=int, - default=4, - help="Batch size (per device) for the training dataloader.", - ) - parser.add_argument( - "--sample_batch_size", - type=int, - default=4, - help="Batch size (per device) for sampling images.", - ) - parser.add_argument("--num_train_epochs", type=int, default=1) - parser.add_argument( - "--max_train_steps", - type=int, - default=None, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.", - ) - parser.add_argument( - "--save_steps", - type=int, - default=500, - help="Save checkpoint every X updates steps.", - ) - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument( - "--gradient_checkpointing", - action="store_true", - help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=5e-6, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument( - "--scale_lr", - action="store_true", - default=False, - help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", - ) - parser.add_argument( - "--lr_scheduler", - type=str, - default="constant", - help=( - 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' - ' "constant", "constant_with_warmup"]' - ), - ) - parser.add_argument( - "--lr_warmup_steps", - type=int, - default=500, - help="Number of steps for the warmup in the lr scheduler.", - ) - parser.add_argument( - "--use_8bit_adam", - action="store_true", - help="Whether or not to use 8-bit Adam from bitsandbytes.", - ) - parser.add_argument( - "--adam_beta1", - type=float, - default=0.9, - help="The beta1 parameter for the Adam optimizer.", - ) - parser.add_argument( - "--adam_beta2", - type=float, - default=0.999, - help="The beta2 parameter for the Adam optimizer.", - ) - parser.add_argument( - "--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use." - ) - parser.add_argument( - "--adam_epsilon", - type=float, - default=1e-08, - help="Epsilon value for the Adam optimizer", - ) - parser.add_argument( - "--max_grad_norm", default=1.0, type=float, help="Max gradient norm." - ) - parser.add_argument( - "--push_to_hub", - action="store_true", - help="Whether or not to push the model to the Hub.", - ) - parser.add_argument( - "--hub_token", - type=str, - default=None, - help="The token to use to push to the Model Hub.", - ) - parser.add_argument( - "--hub_model_id", - type=str, - default=None, - help="The name of the repository to keep in sync with the local `output_dir`.", - ) - parser.add_argument( - "--logging_dir", - type=str, - default="logs", - help=( - "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" - " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." - ), - ) - parser.add_argument( - "--mixed_precision", - type=str, - default=None, - choices=["no", "fp16", "bf16"], - help=( - "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" - " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" - " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." - ), - ) - parser.add_argument( - "--local_rank", - type=int, - default=-1, - help="For distributed training: local_rank", - ) - - if input_args is not None: - args = parser.parse_args(input_args) - else: - args = parser.parse_args() - - env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) - if env_local_rank != -1 and env_local_rank != args.local_rank: - args.local_rank = env_local_rank - - if args.with_prior_preservation: - if args.class_data_dir is None: - raise ValueError("You must specify a data directory for class images.") - if args.class_prompt is None: - raise ValueError("You must specify prompt for class images.") - else: - if args.class_data_dir is not None: - logger.warning( - "You need not use --class_data_dir without --with_prior_preservation." - ) - if args.class_prompt is not None: - logger.warning( - "You need not use --class_prompt without --with_prior_preservation." - ) - - return args - - -def get_full_repo_name( - model_id: str, organization: Optional[str] = None, token: Optional[str] = None -): - if token is None: - token = HfFolder.get_token() - if organization is None: - username = whoami(token)["name"] - return f"{username}/{model_id}" - else: - return f"{organization}/{model_id}" - - -def main(args): - logging_dir = Path(args.output_dir, args.logging_dir) - - accelerator = Accelerator( - gradient_accumulation_steps=args.gradient_accumulation_steps, - mixed_precision=args.mixed_precision, - log_with="tensorboard", - logging_dir=logging_dir, - ) - - # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate - # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. - # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. - if ( - args.train_text_encoder - and args.gradient_accumulation_steps > 1 - and accelerator.num_processes > 1 - ): - raise ValueError( - "Gradient accumulation is not supported when training the text encoder in distributed training. " - "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." - ) - - if args.seed is not None: - set_seed(args.seed) - - if args.with_prior_preservation: - class_images_dir = Path(args.class_data_dir) - if not class_images_dir.exists(): - class_images_dir.mkdir(parents=True) - cur_class_images = len(list(class_images_dir.iterdir())) - - if cur_class_images < args.num_class_images: - torch_dtype = ( - torch.float16 if accelerator.device.type == "cuda" else torch.float32 - ) - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - torch_dtype=torch_dtype, - safety_checker=None, - revision=args.revision, - ) - pipeline.set_progress_bar_config(disable=True) - - num_new_images = args.num_class_images - cur_class_images - logger.info(f"Number of class images to sample: {num_new_images}.") - - sample_dataset = PromptDataset(args.class_prompt, num_new_images) - sample_dataloader = torch.utils.data.DataLoader( - sample_dataset, batch_size=args.sample_batch_size - ) - - sample_dataloader = accelerator.prepare(sample_dataloader) - pipeline.to(accelerator.device) - - for example in tqdm( - sample_dataloader, - desc="Generating class images", - disable=not accelerator.is_local_main_process, - ): - images = pipeline(example["prompt"]).images - - for i, image in enumerate(images): - hash_image = hashlib.sha1(image.tobytes()).hexdigest() - image_filename = ( - class_images_dir - / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" - ) - image.save(image_filename) - - del pipeline - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - # Handle the repository creation - if accelerator.is_main_process: - if args.push_to_hub: - if args.hub_model_id is None: - repo_name = get_full_repo_name( - Path(args.output_dir).name, token=args.hub_token - ) - else: - repo_name = args.hub_model_id - repo = Repository(args.output_dir, clone_from=repo_name) - - with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: - if "step_*" not in gitignore: - gitignore.write("step_*\n") - if "epoch_*" not in gitignore: - gitignore.write("epoch_*\n") - elif args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - - # Load the tokenizer - if args.tokenizer_name: - tokenizer = CLIPTokenizer.from_pretrained( - args.tokenizer_name, - revision=args.revision, - ) - elif args.pretrained_model_name_or_path: - tokenizer = CLIPTokenizer.from_pretrained( - args.pretrained_model_name_or_path, - subfolder="tokenizer", - revision=args.revision, - ) - - # Load models and create wrapper for stable diffusion - text_encoder = CLIPTextModel.from_pretrained( - args.pretrained_model_name_or_path, - subfolder="text_encoder", - revision=args.revision, - ) - vae = AutoencoderKL.from_pretrained( - args.pretrained_model_name_or_path, - subfolder="vae", - revision=args.revision, - ) - unet = UNet2DConditionModel.from_pretrained( - args.pretrained_model_name_or_path, - subfolder="unet", - revision=args.revision, - ) - unet.requires_grad_(False) - unet_lora_params, train_names = inject_trainable_lora(unet) - - for _up, _down in extract_lora_ups_down(unet): - print(_up.weight) - print(_down.weight) - break - - vae.requires_grad_(False) - if not args.train_text_encoder: - text_encoder.requires_grad_(False) - - if args.gradient_checkpointing: - unet.enable_gradient_checkpointing() - if args.train_text_encoder: - text_encoder.gradient_checkpointing_enable() - - if args.scale_lr: - args.learning_rate = ( - args.learning_rate - * args.gradient_accumulation_steps - * args.train_batch_size - * accelerator.num_processes - ) - - # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs - if args.use_8bit_adam: - try: - import bitsandbytes as bnb - except ImportError: - raise ImportError( - "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." - ) - - optimizer_class = bnb.optim.AdamW8bit - else: - optimizer_class = torch.optim.AdamW - - params_to_optimize = ( - itertools.chain(*unet_lora_params, text_encoder.parameters()) - if args.train_text_encoder - else itertools.chain(*unet_lora_params) - ) - optimizer = optimizer_class( - params_to_optimize, - lr=args.learning_rate, - betas=(args.adam_beta1, args.adam_beta2), - weight_decay=args.adam_weight_decay, - eps=args.adam_epsilon, - ) - - noise_scheduler = DDPMScheduler.from_config( - args.pretrained_model_name_or_path, subfolder="scheduler" - ) - - train_dataset = DreamBoothDataset( - instance_data_root=args.instance_data_dir, - instance_prompt=args.instance_prompt, - class_data_root=args.class_data_dir if args.with_prior_preservation else None, - class_prompt=args.class_prompt, - tokenizer=tokenizer, - size=args.resolution, - center_crop=args.center_crop, - ) - - def collate_fn(examples): - input_ids = [example["instance_prompt_ids"] for example in examples] - pixel_values = [example["instance_images"] for example in examples] - - # Concat class and instance examples for prior preservation. - # We do this to avoid doing two forward passes. - if args.with_prior_preservation: - input_ids += [example["class_prompt_ids"] for example in examples] - pixel_values += [example["class_images"] for example in examples] - - pixel_values = torch.stack(pixel_values) - pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() - - input_ids = tokenizer.pad( - {"input_ids": input_ids}, - padding="max_length", - max_length=tokenizer.model_max_length, - return_tensors="pt", - ).input_ids - - batch = { - "input_ids": input_ids, - "pixel_values": pixel_values, - } - return batch - - train_dataloader = torch.utils.data.DataLoader( - train_dataset, - batch_size=args.train_batch_size, - shuffle=True, - collate_fn=collate_fn, - num_workers=1, - ) - - # Scheduler and math around the number of training steps. - overrode_max_train_steps = False - num_update_steps_per_epoch = math.ceil( - len(train_dataloader) / args.gradient_accumulation_steps - ) - if args.max_train_steps is None: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - overrode_max_train_steps = True - - lr_scheduler = get_scheduler( - args.lr_scheduler, - optimizer=optimizer, - num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, - num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, - ) - - if args.train_text_encoder: - ( - unet, - text_encoder, - optimizer, - train_dataloader, - lr_scheduler, - ) = accelerator.prepare( - unet, text_encoder, optimizer, train_dataloader, lr_scheduler - ) - else: - unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - unet, optimizer, train_dataloader, lr_scheduler - ) - - weight_dtype = torch.float32 - if accelerator.mixed_precision == "fp16": - weight_dtype = torch.float16 - elif accelerator.mixed_precision == "bf16": - weight_dtype = torch.bfloat16 - - # Move text_encode and vae to gpu. - # For mixed precision training we cast the text_encoder and vae weights to half-precision - # as these models are only used for inference, keeping weights in full precision is not required. - vae.to(accelerator.device, dtype=weight_dtype) - if not args.train_text_encoder: - text_encoder.to(accelerator.device, dtype=weight_dtype) - - # We need to recalculate our total training steps as the size of the training dataloader may have changed. - num_update_steps_per_epoch = math.ceil( - len(train_dataloader) / args.gradient_accumulation_steps - ) - if overrode_max_train_steps: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - # Afterwards we recalculate our number of training epochs - args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) - - # We need to initialize the trackers we use, and also store our configuration. - # The trackers initializes automatically on the main process. - if accelerator.is_main_process: - accelerator.init_trackers("dreambooth", config=vars(args)) - - # Train! - total_batch_size = ( - args.train_batch_size - * accelerator.num_processes - * args.gradient_accumulation_steps - ) - - print("***** Running training *****") - print(f" Num examples = {len(train_dataset)}") - print(f" Num batches each epoch = {len(train_dataloader)}") - print(f" Num Epochs = {args.num_train_epochs}") - print(f" Instantaneous batch size per device = {args.train_batch_size}") - print( - f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}" - ) - print(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - print(f" Total optimization steps = {args.max_train_steps}") - # Only show the progress bar once on each machine. - progress_bar = tqdm( - range(args.max_train_steps), disable=not accelerator.is_local_main_process - ) - progress_bar.set_description("Steps") - global_step = 0 - - for epoch in range(args.num_train_epochs): - unet.train() - if args.train_text_encoder: - text_encoder.train() - for step, batch in enumerate(train_dataloader): - - # Convert images to latent space - latents = vae.encode( - batch["pixel_values"].to(dtype=weight_dtype) - ).latent_dist.sample() - latents = latents * 0.18215 - - # Sample noise that we'll add to the latents - noise = torch.randn_like(latents) - bsz = latents.shape[0] - # Sample a random timestep for each image - timesteps = torch.randint( - 0, - noise_scheduler.config.num_train_timesteps, - (bsz,), - device=latents.device, - ) - timesteps = timesteps.long() - - # Add noise to the latents according to the noise magnitude at each timestep - # (this is the forward diffusion process) - noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) - - # Get the text embedding for conditioning - encoder_hidden_states = text_encoder(batch["input_ids"])[0] - - # Predict the noise residual - model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample - - # Get the target for loss depending on the prediction type - if noise_scheduler.config.prediction_type == "epsilon": - target = noise - elif noise_scheduler.config.prediction_type == "v_prediction": - target = noise_scheduler.get_velocity(latents, noise, timesteps) - else: - raise ValueError( - f"Unknown prediction type {noise_scheduler.config.prediction_type}" - ) - - if args.with_prior_preservation: - # Chunk the noise and model_pred into two parts and compute the loss on each part separately. - model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) - target, target_prior = torch.chunk(target, 2, dim=0) - - # Compute instance loss - loss = ( - F.mse_loss(model_pred.float(), target.float(), reduction="none") - .mean([1, 2, 3]) - .mean() - ) - - # Compute prior loss - prior_loss = F.mse_loss( - model_pred_prior.float(), target_prior.float(), reduction="mean" - ) - - # Add the prior loss to the instance loss. - loss = loss + args.prior_loss_weight * prior_loss - else: - loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") - - accelerator.backward(loss) - if accelerator.sync_gradients: - params_to_clip = ( - itertools.chain(unet.parameters(), text_encoder.parameters()) - if args.train_text_encoder - else unet.parameters() - ) - accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) - optimizer.step() - lr_scheduler.step() - progress_bar.update(1) - optimizer.zero_grad() - - # Checks if the accelerator has performed an optimization step behind the scenes - if accelerator.sync_gradients: - - global_step += 1 - - if global_step % args.save_steps == 0: - if accelerator.is_main_process: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - revision=args.revision, - ) - - save_lora_weight(pipeline.unet, args.output_dir + "/lora_weight.pt") - - logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} - progress_bar.set_postfix(**logs) - accelerator.log(logs, step=global_step) - - if global_step >= args.max_train_steps: - break - - accelerator.wait_for_everyone() - - # Create the pipeline using using the trained modules and save it. - if accelerator.is_main_process: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - revision=args.revision, - ) - - print("\n\nLora TRAINING DONE!\n\n") - - save_lora_weight(pipeline.unet, args.output_dir + "/lora_weight.pt") - - for _up, _down in extract_lora_ups_down(pipeline.unet): - print("First Layer's Up Weight is now : ", _up.weight) - print("First Layer's Down Weight is now : ", _down.weight) - break - - if args.push_to_hub: - repo.push_to_hub( - commit_message="End of training", blocking=False, auto_lfs_prune=True - ) - - accelerator.end_training() - - -if __name__ == "__main__": - args = parse_args() - main(args) diff --git a/spaces/zomehwh/rvc-models/infer_pack/attentions.py b/spaces/zomehwh/rvc-models/infer_pack/attentions.py deleted file mode 100644 index 77cb63ffccf3e33badf22d50862a64ba517b487f..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/rvc-models/infer_pack/attentions.py +++ /dev/null @@ -1,417 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from infer_pack import commons -from infer_pack import modules -from infer_pack.modules import LayerNorm - - -class Encoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - window_size=10, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - window_size=window_size, - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - proximal_bias=False, - proximal_init=True, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - proximal_bias=proximal_bias, - proximal_init=proximal_init, - ) - ) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append( - MultiHeadAttention( - hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - causal=True, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( - device=x.device, dtype=x.dtype - ) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__( - self, - channels, - out_channels, - n_heads, - p_dropout=0.0, - window_size=None, - heads_share=True, - block_length=None, - proximal_bias=False, - proximal_init=False, - ): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.emb_rel_v = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert ( - t_s == t_t - ), "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys( - query / math.sqrt(self.k_channels), key_relative_embeddings - ) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to( - device=scores.device, dtype=scores.dtype - ) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert ( - t_s == t_t - ), "Local attention is only available for self-attention." - block_mask = ( - torch.ones_like(scores) - .triu(-self.block_length) - .tril(self.block_length) - ) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings( - self.emb_rel_v, t_s - ) - output = output + self._matmul_with_relative_values( - relative_weights, value_relative_embeddings - ) - output = ( - output.transpose(2, 3).contiguous().view(b, d, t_t) - ) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - ) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[ - :, slice_start_position:slice_end_position - ] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) - ) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ - :, :, :length, length - 1 : - ] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) - ) - x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__( - self, - in_channels, - out_channels, - filter_channels, - kernel_size, - p_dropout=0.0, - activation=None, - causal=False, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/zomehwh/vits-models-pcr/text/symbols.py b/spaces/zomehwh/vits-models-pcr/text/symbols.py deleted file mode 100644 index edfbd24247be8c757275ce80b9ec27a0ffa808f3..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/vits-models-pcr/text/symbols.py +++ /dev/null @@ -1,39 +0,0 @@ -''' -Defines the set of symbols used in text input to the model. -''' - -'''# japanese_cleaners -_pad = '_' -_punctuation = ',.!?-' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ ' -''' - -'''# japanese_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ ' -''' - -'''# korean_cleaners -_pad = '_' -_punctuation = ',.!?…~' -_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ ' -''' - -'''# chinese_cleaners -_pad = '_' -_punctuation = ',。!?—…' -_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ ' -''' - -# zh_ja_mixture_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ ' - - -# Export all symbols: -symbols = [_pad] + list(_punctuation) + list(_letters) - -# Special symbol ids -SPACE_ID = symbols.index(" ") \ No newline at end of file diff --git a/spaces/zxy666/bingo-chatai666/src/components/tone-selector.tsx b/spaces/zxy666/bingo-chatai666/src/components/tone-selector.tsx deleted file mode 100644 index 5c6e464c91f564b895acd121f0a4a79ed9c5c356..0000000000000000000000000000000000000000 --- a/spaces/zxy666/bingo-chatai666/src/components/tone-selector.tsx +++ /dev/null @@ -1,43 +0,0 @@ -import React from 'react' -import { BingConversationStyle } from '@/lib/bots/bing/types' -import { cn } from '@/lib/utils' - -type ToneItem = { - type: BingConversationStyle, - name: string -} - -const ToneList: ToneItem[] = [ - { name: '有创造力', type: BingConversationStyle.Creative }, - { name: '更平衡', type: BingConversationStyle.Balanced }, - { name: '更精确', type: BingConversationStyle.Precise } -] - -interface ToneSelectorProps { - type: BingConversationStyle | '' - onChange?: (type: BingConversationStyle) => void -} - -export function ToneSelector({ type, onChange }: ToneSelectorProps) { - return ( -
        -
        - 选择对话样式 -
        -
        -
          - { - ToneList.map(tone => ( -
        • onChange?.(tone.type)}> - -
        • - )) - } -
        -
        -
        - ) -}