- -
-
-
- -
-
- -
- Lincon Legal Text Summarizer - Made Using Legal-Bert - -
- -
-
- -
-
-
-
-
-
- -
- - - -
- -
-
- Input Legal Text - - -
- -
- -
- -
- - -
-
-
- -
-
-
-
-
-
- -
- - - -
- Upload Legal Document - -
- - -
- -
- - - -
-
-
-
- - 5 Pages Per Minute -
-
- -
- - -
-
-
- -
- - - diff --git a/spaces/artificialguybr/video-dubbing/TTS/tests/tts_tests/test_vits_train.py b/spaces/artificialguybr/video-dubbing/TTS/tests/tts_tests/test_vits_train.py deleted file mode 100644 index ea5dc02405ab1450d905f95a39ebea65dd72c4a4..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/tests/tts_tests/test_vits_train.py +++ /dev/null @@ -1,72 +0,0 @@ -import glob -import json -import os -import shutil - -from trainer import get_last_checkpoint - -from tests import get_device_id, get_tests_output_path, run_cli -from TTS.tts.configs.vits_config import VitsConfig - -config_path = os.path.join(get_tests_output_path(), "test_model_config.json") -output_path = os.path.join(get_tests_output_path(), "train_outputs") - - -config = VitsConfig( - batch_size=2, - eval_batch_size=2, - num_loader_workers=0, - num_eval_loader_workers=0, - text_cleaner="english_cleaners", - use_phonemes=True, - phoneme_language="en-us", - phoneme_cache_path="tests/data/ljspeech/phoneme_cache/", - run_eval=True, - test_delay_epochs=-1, - epochs=1, - print_step=1, - print_eval=True, - test_sentences=[ - ["Be a voice, not an echo."], - ], -) -config.audio.do_trim_silence = True -config.audio.trim_db = 60 -config.save_json(config_path) - -# train the model for one epoch -command_train = ( - f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --config_path {config_path} " - f"--coqpit.output_path {output_path} " - "--coqpit.datasets.0.formatter ljspeech " - "--coqpit.datasets.0.meta_file_train metadata.csv " - "--coqpit.datasets.0.meta_file_val metadata.csv " - "--coqpit.datasets.0.path tests/data/ljspeech " - "--coqpit.datasets.0.meta_file_attn_mask tests/data/ljspeech/metadata_attn_mask.txt " - "--coqpit.test_delay_epochs 0" -) -run_cli(command_train) - -# Find latest folder -continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime) - -# Inference using TTS API -continue_config_path = os.path.join(continue_path, "config.json") -continue_restore_path, _ = get_last_checkpoint(continue_path) -out_wav_path = os.path.join(get_tests_output_path(), "output.wav") - -# Check integrity of the config -with open(continue_config_path, "r", encoding="utf-8") as f: - config_loaded = json.load(f) -assert config_loaded["characters"] is not None -assert config_loaded["output_path"] in continue_path -assert config_loaded["test_delay_epochs"] == 0 - -# Load the model and run inference -inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}" -run_cli(inference_command) - -# restore the model and continue training for one more epoch -command_train = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --continue_path {continue_path} " -run_cli(command_train) -shutil.rmtree(continue_path) diff --git a/spaces/artificialguybr/video-dubbing/TTS/tests/vocoder_tests/test_vocoder_parallel_wavegan_discriminator.py b/spaces/artificialguybr/video-dubbing/TTS/tests/vocoder_tests/test_vocoder_parallel_wavegan_discriminator.py deleted file mode 100644 index d4eca0d1374fb5cabf111cb52cf249969392bad4..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/tests/vocoder_tests/test_vocoder_parallel_wavegan_discriminator.py +++ /dev/null @@ -1,46 +0,0 @@ -import numpy as np -import torch - -from TTS.vocoder.models.parallel_wavegan_discriminator import ( - ParallelWaveganDiscriminator, - ResidualParallelWaveganDiscriminator, -) - - -def test_pwgan_disciminator(): - model = ParallelWaveganDiscriminator( - in_channels=1, - out_channels=1, - kernel_size=3, - num_layers=10, - conv_channels=64, - dilation_factor=1, - nonlinear_activation="LeakyReLU", - nonlinear_activation_params={"negative_slope": 0.2}, - bias=True, - ) - dummy_x = torch.rand((4, 1, 64 * 256)) - output = model(dummy_x) - assert np.all(output.shape == (4, 1, 64 * 256)) - model.remove_weight_norm() - - -def test_redisual_pwgan_disciminator(): - model = ResidualParallelWaveganDiscriminator( - in_channels=1, - out_channels=1, - kernel_size=3, - num_layers=30, - stacks=3, - res_channels=64, - gate_channels=128, - skip_channels=64, - dropout=0.0, - bias=True, - nonlinear_activation="LeakyReLU", - nonlinear_activation_params={"negative_slope": 0.2}, - ) - dummy_x = torch.rand((4, 1, 64 * 256)) - output = model(dummy_x) - assert np.all(output.shape == (4, 1, 64 * 256)) - model.remove_weight_norm() diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/Hash/KMAC128.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/Hash/KMAC128.py deleted file mode 100644 index 05061fc2ea402ae614fb235f9fde0e91b9e83286..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/Hash/KMAC128.py +++ /dev/null @@ -1,179 +0,0 @@ -# =================================================================== -# -# Copyright (c) 2021, Legrandin -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# =================================================================== - -from binascii import unhexlify - -from Crypto.Util.py3compat import bord, tobytes, is_bytes -from Crypto.Random import get_random_bytes - -from . import cSHAKE128, SHA3_256 -from .cSHAKE128 import _bytepad, _encode_str, _right_encode - - -class KMAC_Hash(object): - """A KMAC hash object. - Do not instantiate directly. - Use the :func:`new` function. - """ - - def __init__(self, data, key, mac_len, custom, - oid_variant, cshake, rate): - - # See https://tools.ietf.org/html/rfc8702 - self.oid = "2.16.840.1.101.3.4.2." + oid_variant - self.digest_size = mac_len - - self._mac = None - - partial_newX = _bytepad(_encode_str(tobytes(key)), rate) - self._cshake = cshake._new(partial_newX, custom, b"KMAC") - - if data: - self._cshake.update(data) - - def update(self, data): - """Authenticate the next chunk of message. - - Args: - data (bytes/bytearray/memoryview): The next chunk of the message to - authenticate. - """ - - if self._mac: - raise TypeError("You can only call 'digest' or 'hexdigest' on this object") - - self._cshake.update(data) - return self - - def digest(self): - """Return the **binary** (non-printable) MAC tag of the message. - - :return: The MAC tag. Binary form. - :rtype: byte string - """ - - if not self._mac: - self._cshake.update(_right_encode(self.digest_size * 8)) - self._mac = self._cshake.read(self.digest_size) - - return self._mac - - def hexdigest(self): - """Return the **printable** MAC tag of the message. - - :return: The MAC tag. Hexadecimal encoded. - :rtype: string - """ - - return "".join(["%02x" % bord(x) for x in tuple(self.digest())]) - - def verify(self, mac_tag): - """Verify that a given **binary** MAC (computed by another party) - is valid. - - Args: - mac_tag (bytes/bytearray/memoryview): the expected MAC of the message. - - Raises: - ValueError: if the MAC does not match. It means that the message - has been tampered with or that the MAC key is incorrect. - """ - - secret = get_random_bytes(16) - - mac1 = SHA3_256.new(secret + mac_tag) - mac2 = SHA3_256.new(secret + self.digest()) - - if mac1.digest() != mac2.digest(): - raise ValueError("MAC check failed") - - def hexverify(self, hex_mac_tag): - """Verify that a given **printable** MAC (computed by another party) - is valid. - - Args: - hex_mac_tag (string): the expected MAC of the message, as a hexadecimal string. - - Raises: - ValueError: if the MAC does not match. It means that the message - has been tampered with or that the MAC key is incorrect. - """ - - self.verify(unhexlify(tobytes(hex_mac_tag))) - - def new(self, **kwargs): - """Return a new instance of a KMAC hash object. - See :func:`new`. - """ - - if "mac_len" not in kwargs: - kwargs["mac_len"] = self.digest_size - - return new(**kwargs) - - -def new(**kwargs): - """Create a new KMAC128 object. - - Args: - key (bytes/bytearray/memoryview): - The key to use to compute the MAC. - It must be at least 128 bits long (16 bytes). - data (bytes/bytearray/memoryview): - Optional. The very first chunk of the message to authenticate. - It is equivalent to an early call to :meth:`KMAC_Hash.update`. - mac_len (integer): - Optional. The size of the authentication tag, in bytes. - Default is 64. Minimum is 8. - custom (bytes/bytearray/memoryview): - Optional. A customization byte string (``S`` in SP 800-185). - - Returns: - A :class:`KMAC_Hash` hash object - """ - - key = kwargs.pop("key", None) - if not is_bytes(key): - raise TypeError("You must pass a key to KMAC128") - if len(key) < 16: - raise ValueError("The key must be at least 128 bits long (16 bytes)") - - data = kwargs.pop("data", None) - - mac_len = kwargs.pop("mac_len", 64) - if mac_len < 8: - raise ValueError("'mac_len' must be 8 bytes or more") - - custom = kwargs.pop("custom", b"") - - if kwargs: - raise TypeError("Unknown parameters: " + str(kwargs)) - - return KMAC_Hash(data, key, mac_len, custom, "19", cSHAKE128, 168) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/PIL/CurImagePlugin.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/PIL/CurImagePlugin.py deleted file mode 100644 index 42af5cafcefc407821bac92b5b3ed88b73de71e1..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/PIL/CurImagePlugin.py +++ /dev/null @@ -1,75 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# Windows Cursor support for PIL -# -# notes: -# uses BmpImagePlugin.py to read the bitmap data. -# -# history: -# 96-05-27 fl Created -# -# Copyright (c) Secret Labs AB 1997. -# Copyright (c) Fredrik Lundh 1996. -# -# See the README file for information on usage and redistribution. -# -from . import BmpImagePlugin, Image -from ._binary import i16le as i16 -from ._binary import i32le as i32 - -# -# -------------------------------------------------------------------- - - -def _accept(prefix): - return prefix[:4] == b"\0\0\2\0" - - -## -# Image plugin for Windows Cursor files. - - -class CurImageFile(BmpImagePlugin.BmpImageFile): - - format = "CUR" - format_description = "Windows Cursor" - - def _open(self): - - offset = self.fp.tell() - - # check magic - s = self.fp.read(6) - if not _accept(s): - raise SyntaxError("not a CUR file") - - # pick the largest cursor in the file - m = b"" - for i in range(i16(s, 4)): - s = self.fp.read(16) - if not m: - m = s - elif s[0] > m[0] and s[1] > m[1]: - m = s - if not m: - raise TypeError("No cursors were found") - - # load as bitmap - self._bitmap(i32(m, 12) + offset) - - # patch up the bitmap height - self._size = self.size[0], self.size[1] // 2 - d, e, o, a = self.tile[0] - self.tile[0] = d, (0, 0) + self.size, o, a - - return - - -# -# -------------------------------------------------------------------- - -Image.register_open(CurImageFile.format, CurImageFile, _accept) - -Image.register_extension(CurImageFile.format, ".cur") diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/benchmark/dummy_mt.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/benchmark/dummy_mt.py deleted file mode 100644 index 28d78cffdbf8c2bcee69b454a79891cb34def200..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/benchmark/dummy_mt.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging - -import numpy as np -import torch - -from fairseq.data import Dictionary, FairseqDataset -from fairseq.tasks import LegacyFairseqTask, register_task - -logger = logging.getLogger(__name__) - - -@register_task("dummy_mt") -class DummyMTTask(LegacyFairseqTask): - @staticmethod - def add_args(parser): - """Add task-specific arguments to the parser.""" - parser.add_argument("--dict-size", default=49996, type=int) - parser.add_argument("--dataset-size", default=100000, type=int) - parser.add_argument("--src-len", default=30, type=int) - parser.add_argument("--tgt-len", default=30, type=int) - - def __init__(self, args, dictionary): - super().__init__(args) - self.dictionary = dictionary - self.seed = args.seed - - dictionary.pad_to_multiple_(8) # often faster if divisible by 8 - - self.dummy_src = torch.arange(args.src_len + 1) + dictionary.pad() + 1 - self.dummy_tgt = torch.arange(args.tgt_len + 1) + dictionary.pad() + 1 - - @classmethod - def setup_task(cls, args, **kwargs): - """Setup the task.""" - dictionary = Dictionary() - for i in range(args.dict_size): - dictionary.add_symbol("word{}".format(i)) - logger.info("dictionary: {} types".format(len(dictionary))) - - args.max_source_positions = args.src_len + dictionary.pad() + 2 - args.max_target_positions = args.tgt_len + dictionary.pad() + 2 - - return cls(args, dictionary) - - def load_dataset(self, split, epoch=1, combine=False, **kwargs): - """Load a given dataset split. - Args: - split (str): name of the split (e.g., train, valid, test) - """ - item_size = max(self.args.src_len, self.args.tgt_len) - if self.args.batch_size is not None: - bsz = self.args.batch_size - else: - bsz = max(1, self.args.max_tokens // item_size) - tgt = torch.stack([self.dummy_tgt for _ in range(bsz)]) - self.datasets[split] = DummyDataset( - { - "id": 1, - "net_input": { - "src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]), - "src_lengths": torch.full( - (bsz,), self.args.src_len, dtype=torch.long - ), - "prev_output_tokens": tgt.clone(), - }, - "target": tgt, - "nsentences": bsz, - "ntokens": bsz * self.args.tgt_len, - }, - num_items=self.args.dataset_size, - item_size=item_size, - ) - - @property - def source_dictionary(self): - return self.dictionary - - @property - def target_dictionary(self): - return self.dictionary - - -class DummyDataset(FairseqDataset): - def __init__(self, batch, num_items, item_size): - super().__init__() - self.batch = batch - self.num_items = num_items - self.item_size = item_size - - def __getitem__(self, index): - return index - - def __len__(self): - return self.num_items - - def collater(self, samples): - return self.batch - - @property - def sizes(self): - return np.array([self.item_size] * self.num_items) - - def num_tokens(self, index): - return self.item_size - - def size(self, index): - return self.item_size - - def ordered_indices(self): - return np.arange(self.num_items) - - @property - def supports_prefetch(self): - return False diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/distributed/__init__.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/distributed/__init__.py deleted file mode 100644 index 9130db8f5d039519d663ee16c7ff2c102f5481f5..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/distributed/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .distributed_timeout_wrapper import DistributedTimeoutWrapper -from .fully_sharded_data_parallel import ( - fsdp_enable_wrap, - fsdp_wrap, - FullyShardedDataParallel, -) -from .legacy_distributed_data_parallel import LegacyDistributedDataParallel -from .module_proxy_wrapper import ModuleProxyWrapper -from .tpu_distributed_data_parallel import TPUDistributedDataParallel - - -__all__ = [ - "DistributedTimeoutWrapper", - "fsdp_enable_wrap", - "fsdp_wrap", - "FullyShardedDataParallel", - "LegacyDistributedDataParallel", - "ModuleProxyWrapper", - "TPUDistributedDataParallel", -] diff --git a/spaces/asciicorp/hotel-chat/config.py b/spaces/asciicorp/hotel-chat/config.py deleted file mode 100644 index fab50a17b5c08b92e364e5a73c54014c4f3b6284..0000000000000000000000000000000000000000 --- a/spaces/asciicorp/hotel-chat/config.py +++ /dev/null @@ -1,21 +0,0 @@ -from datetime import datetime - -def get_date(): - now = datetime.now() - today = now.strftime("%A, %Y/%m/%d") - return today - -DEFAULT_PREFIX = f"""You are Neura. you are a professional customer support agent of Obsidian Heritage Hotel Colombo having a conversation with a human. -introduce your self and offer help. -today's date is {get_date()}. -You have access to the following tools:""" - -DEFAULT_TEMPERATURE = 0.6 - -GREET_TEMPLATE = """You are an customer service agent for Obsidian Heritage Hotel Colombo. -Offer polite, professional and respectful small talk. -If the question is not related to hotel, politely guide the user to ask questions related to the hotel. -your conversation history with same person: -{chat_history} -QUESTION: {question} -FINAL ANSWER:""" \ No newline at end of file diff --git a/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/modules/diffusionmodules/hed_grounding_downsampler.py b/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/modules/diffusionmodules/hed_grounding_downsampler.py deleted file mode 100644 index 99d1e7def372b74db331b6f50d3dc4574ace47a2..0000000000000000000000000000000000000000 --- a/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/modules/diffusionmodules/hed_grounding_downsampler.py +++ /dev/null @@ -1,23 +0,0 @@ -import torch -import torch.nn as nn -from ldm.modules.attention import BasicTransformerBlock -from ldm.modules.diffusionmodules.util import checkpoint, FourierEmbedder -import torch.nn.functional as F - - - -class GroundingDownsampler(nn.Module): - def __init__(self, out_dim=1): - super().__init__() - self.out_dim = out_dim - # No learnable params for hed edge map, just downsample it with bicubic - - def forward(self, grounding_extra_input): - # this is actually gary scale, but converted to rgb in dataset, information redudant - grounding_extra_input = grounding_extra_input[:,0].unsqueeze(1) - - out = torch.nn.functional.interpolate(grounding_extra_input, (64,64), mode='bicubic') - assert out.shape[1] == self.out_dim - return out - - diff --git a/spaces/avid-ml/bias-detection/avidtools/datamodels/components.py b/spaces/avid-ml/bias-detection/avidtools/datamodels/components.py deleted file mode 100644 index f4229941ac7a1c63b58147c8777ae76f3299debe..0000000000000000000000000000000000000000 --- a/spaces/avid-ml/bias-detection/avidtools/datamodels/components.py +++ /dev/null @@ -1,53 +0,0 @@ -from typing import Dict, List, Optional -from typing_extensions import TypedDict -from pydantic import BaseModel - -from .enums import * - -class LangValue(BaseModel): - lang: str - value: str - -class Artifact(BaseModel): - type: ArtifactTypeEnum - name: str - -class Detection(BaseModel): - type: MethodEnum - name: str - -class Affects(BaseModel): - developer: List[str] - deployer: List[str] - artifacts: List[Artifact] - -class Problemtype(BaseModel): - classof: ClassEnum - type: Optional[TypeEnum] - description: LangValue - -class Metric(BaseModel): - name: str - detection_method: Detection - results: Dict - -class Reference(BaseModel): - type: Optional[str] - label: str - url: str # AnyUrl is a better fit, but keeping this because submissions are not standard yet - - class Config: # type is excluded if None - fields = {'type': {'exclude': True}} - -class AvidTaxonomy(BaseModel): - vuln_id: Optional[str] - risk_domain: List[str] - sep_view: List[SepEnum] - lifecycle_view: List[LifecycleEnum] - taxonomy_version: str - - class Config: # vuln_id is excluded if None - fields = {'vuln_id': {'exclude': True}} - -class Impact(BaseModel): - avid: AvidTaxonomy \ No newline at end of file diff --git a/spaces/awacke1/ChatGPTStreamlit8/README.md b/spaces/awacke1/ChatGPTStreamlit8/README.md deleted file mode 100644 index fa3ce7e68f234c93ed5187fff1be7c68c27187e2..0000000000000000000000000000000000000000 --- a/spaces/awacke1/ChatGPTStreamlit8/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ChatGPTStreamlit8 -emoji: 🏃 -colorFrom: yellow -colorTo: blue -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/QuickLearner/README.md b/spaces/awacke1/QuickLearner/README.md deleted file mode 100644 index d6dc770a7aa5c72e2aca627558905517333d3bc6..0000000000000000000000000000000000000000 --- a/spaces/awacke1/QuickLearner/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: QuickLearner -emoji: 😻 -colorFrom: yellow -colorTo: green -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/VideoPlayer/app.py b/spaces/awacke1/VideoPlayer/app.py deleted file mode 100644 index c32075396f63fe6b66fc8bb8b915e172e52fb7bf..0000000000000000000000000000000000000000 --- a/spaces/awacke1/VideoPlayer/app.py +++ /dev/null @@ -1,82 +0,0 @@ - -import os -import streamlit as st - -# These are the formats supported in Streamlit right now. -VIDEO_EXTENSIONS = ["mp4", "ogv", "m4v", "webm"] - -# For sample video files, try the Internet Archive, or download a few samples here: -# http://techslides.com/sample-webm-ogg-and-mp4-video-files-for-html5 - - -st.title("Video Player") - -st.header("Local video files") -st.write( - "You can use st.video to play a locally-stored video by supplying it with a valid filesystem path." -) - - -def get_video_files_in_dir(directory): - out = [] - for item in os.listdir(directory): - try: - name, ext = item.split(".") - except: - continue - if name and ext: - if ext in VIDEO_EXTENSIONS: - out.append(item) - return out - - -avdir = os.path.expanduser("~") -files = get_video_files_in_dir(avdir) - -if len(files) == 0: - st.write( - "Put some video files in your home directory (%s) to activate this player." - % avdir - ) - -else: - filename = st.selectbox( - "Select a video file from your home directory (%s) to play" % avdir, - files, - 0, - ) - - st.video(os.path.join(avdir, filename)) -st.header("Remote video playback") -st.write("st.video allows a variety of HTML5 supported video links, including YouTube.") - - -def shorten_vid_option(opt): - return opt.split("/")[-1] - - -# A random sampling of videos found around the web. We should replace -# these with those sourced from the streamlit community if possible! -vidurl = st.selectbox( - "Pick a video to play", - ( - "https://youtu.be/IoQsyHVFflU", - "https://youtu.be/ZT3_2X7Txu0?list=PLHgX2IExbFosM9aYef9-1ymyx3QNUw3w1", - "https://youtu.be/2N83yzUcomc?list=PLHgX2IExbFosM9aYef9-1ymyx3QNUw3w1", - "https://youtu.be/0i3U47OQs0E?list=PLHgX2IExbFosM9aYef9-1ymyx3QNUw3w1", - "https://youtu.be/L8ObyHp9PY0?list=PLHgX2IExbFosM9aYef9-1ymyx3QNUw3w1", - "https://youtu.be/x9vRg5KmLIo?list=PLHgX2IExbFosM9aYef9-1ymyx3QNUw3w1", - "https://youtu.be/ZEchaKXaaas?list=PLHgX2IExbFosM9aYef9-1ymyx3QNUw3w1", - "https://youtu.be/5yToL7ymfNo?list=PLHgX2IExbFosM9aYef9-1ymyx3QNUw3w1", - "https://youtu.be/N2hM3RYdyrE?list=PLHgX2IExbFosM9aYef9-1ymyx3QNUw3w1", - "https://youtu.be/Kd8OAVRyDc4?list=PLHgX2IExbFosM9aYef9-1ymyx3QNUw3w1", - "https://youtu.be/OsRBhgTxYPQ?list=PLHgX2IExbFosM9aYef9-1ymyx3QNUw3w1", - "https://youtu.be/V7rJ3eS1dHQ", - "https://youtu.be/w7EPhC1WEvE" - - ), - 0, - shorten_vid_option, -) - -st.video(vidurl) \ No newline at end of file diff --git a/spaces/balaramas/s2t_translator/s2t_en2hi.py b/spaces/balaramas/s2t_translator/s2t_en2hi.py deleted file mode 100644 index 51972671cdbec29758fe0814289fe600f5d3fd42..0000000000000000000000000000000000000000 --- a/spaces/balaramas/s2t_translator/s2t_en2hi.py +++ /dev/null @@ -1,32 +0,0 @@ -""" -Script to translate given single english audio file to corresponding hindi text - -Usage : python s2t_en2hi.py -""" - -import sys -import os -import subprocess - -# TODO better argument handling -hi_wav = sys.argv[1] -en2hi_model_checkpoint = sys.argv[2] - -os.system(f"cp {hi_wav} ./MUSTC_ROOT/en-hi/data/tst-COMMON/wav/test.wav") - -print("------Starting data prepration...") -subprocess.run(["python", "prep_mustc_data_hindi_single.py", "--data-root", "MUSTC_ROOT/", "--task", "st", "--vocab-type", "unigram", "--vocab-size", "8000"], stdout=subprocess.DEVNULL) - -print("------Performing translation...") -translation_result = subprocess.run(["fairseq-generate", "./MUSTC_ROOT/en-hi/", "--config-yaml", "config_st.yaml", "--gen-subset", "tst-COMMON_st", "--task", "speech_to_text", "--path", sys.argv[2], "--max-tokens", "50000", "--beam", "5", "--scoring", "sacrebleu"], capture_output=True, text=True) -translation_result_text = translation_result.stdout -print(translation_result.std) -lines = translation_result_text.split("\n") - -print("\n\n------Translation results are:") -for i in lines: - if (i.startswith("D-0")): - print(i) - break - -os.system("rm ./MUSTC_ROOT/en-hi/data/tst-COMMON/wav/test.wav") \ No newline at end of file diff --git a/spaces/balgot/text-to-stylegan3/app.py b/spaces/balgot/text-to-stylegan3/app.py deleted file mode 100644 index 54f2696acf37ade4062436d8a393badfb50bb591..0000000000000000000000000000000000000000 --- a/spaces/balgot/text-to-stylegan3/app.py +++ /dev/null @@ -1,124 +0,0 @@ -import pickle -import subprocess -import sys -import warnings - -import gradio as gr -import torch -import torch.nn as nn -import torch.nn.functional as F -import torchvision.transforms as T -import transformers -from sentence_transformers import SentenceTransformer - -from utils import DownloadModelCtx - - -class LaTran(nn.Module): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.pipe = nn.Sequential( - nn.Linear(384, 512), - nn.ReLU(), - nn.Linear(512, 512) - ) - - def forward(self, v): - return self.pipe(v.unsqueeze(1)) - - -dev = torch.device("cuda" if torch.cuda.is_available() else "cpu") -print("Running on", dev, "\n\n") - - -################################################################################ -## M O D E L S -################################################################################ - -with DownloadModelCtx("Loading language model"): - LANGUAGE_MODEL = "sentence-transformers/all-MiniLM-L6-v2" - lang_model = SentenceTransformer(LANGUAGE_MODEL).to(dev) - -with DownloadModelCtx("Loading translation model") as f: - TRANSLATION_MODEL = "model.pt" - p = subprocess.run(["bash", "download_model.sh"], capture_output=True, text=True) - f.add_process(p) - tran_model = LaTran().to(dev) - tran_model.load_state_dict(torch.load(TRANSLATION_MODEL, map_location=dev)) - -with DownloadModelCtx("Loading StyleGAN3") as f: - p = subprocess.run(["bash", "download_stylegan.sh"], capture_output=True, text=True) - f.add_process(p) - sys.path.insert(0, "./stylegan3") - with open('stylegan3/stylegan3-t-ffhq-1024x1024.pkl', 'rb') as f: - stylegan = pickle.load(f)['G_ema'].to(dev) - -with DownloadModelCtx("Loading captioning model"): - CAPTION_MODEL = "Salesforce/blip-image-captioning-base" - caption_model = transformers.BlipForConditionalGeneration.from_pretrained(CAPTION_MODEL).to(dev) - caption_processor = transformers.AutoProcessor.from_pretrained(CAPTION_MODEL) - - -################################################################################ -## H E L P E R S -################################################################################ - - -def caption_image(image: torch.Tensor) -> str: - """Return the suitable caption for the input image.""" - assert len(image.shape) == 3, f"expected (dim, dim, channel) but got {image.shape}" - QUERIES = ["picture of", "the hair is", "hair color is", "eyes are", "face looks"] - - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - - result = [] - for q in QUERIES: - _inputs = caption_processor(images=image.unsqueeze(0), text=q, return_tensors="pt").to(dev) - _tokens = caption_model.generate(**_inputs) - _description = caption_processor.decode(_tokens.squeeze(), skip_special_tokens=True) - _description = _description.lower().strip() - result.append(_description.capitalize()) - return ". ".join(result) - - - -def generate(text: str): - with torch.no_grad(): - print(f"[input] {text}") - encoded_np = lang_model.encode(text) - encoded = torch.tensor(encoded_np).unsqueeze(0).to(dev) - print(f"[encoded] {encoded.shape}") - gan_intro = tran_model(encoded).squeeze(1) - noise = (0.15 * torch.randn(*gan_intro.shape)).to(dev) - print(f"[gan-in] {(gan_intro.shape)}") - # normalize the input vector - gan_intro += noise - gan_intro = F.normalize(gan_intro) - image = stylegan(gan_intro, None) - print(f"[gan-out] {image.shape}") - normalized_image = (image.cpu() * 127.5 + 128).clamp(0, 255).to(torch.uint8) - pil_image = T.functional.to_pil_image(normalized_image[0]).resize((256, 256)) - caption = caption_image(normalized_image[0]) - print(f"[caption] {caption}\n\n") - return pil_image, caption - - -################################################################################ -## M A I N -################################################################################ - -if __name__ == "__main__": - print("serving...") - input_text = gr.components.Textbox(lines=5, label="Input Text") - output_image = gr.components.Image(label="Generated Image", type="pil") - output_caption = gr.components.Textbox(label="Generated Image Caption") - - with open("README.md", "r", encoding="utf-8") as f: - gr.Interface( - fn=generate, - inputs=input_text, - outputs=[output_image, output_caption], - title="Text to StyleGAN3 Image", - description=f.read().split("---")[-1].strip() - ).launch() \ No newline at end of file diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/controls/FirstPersonControls.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/controls/FirstPersonControls.js deleted file mode 100644 index 34c255109891346efd4dfbfbacd5c25074e54a1b..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/controls/FirstPersonControls.js +++ /dev/null @@ -1,342 +0,0 @@ -/** - * @author mrdoob / http://mrdoob.com/ - * @author alteredq / http://alteredqualia.com/ - * @author paulirish / http://paulirish.com/ - */ - -THREE.FirstPersonControls = function ( object, domElement ) { - - this.object = object; - - this.domElement = ( domElement !== undefined ) ? domElement : document; - - this.enabled = true; - - this.movementSpeed = 1.0; - this.lookSpeed = 0.005; - - this.lookVertical = true; - this.autoForward = false; - - this.activeLook = true; - - this.heightSpeed = false; - this.heightCoef = 1.0; - this.heightMin = 0.0; - this.heightMax = 1.0; - - this.constrainVertical = false; - this.verticalMin = 0; - this.verticalMax = Math.PI; - - this.autoSpeedFactor = 0.0; - - this.mouseX = 0; - this.mouseY = 0; - - this.moveForward = false; - this.moveBackward = false; - this.moveLeft = false; - this.moveRight = false; - - this.mouseDragOn = false; - - this.viewHalfX = 0; - this.viewHalfY = 0; - - // private variables - - var lat = 0; - var lon = 0; - - var lookDirection = new THREE.Vector3(); - var spherical = new THREE.Spherical(); - var target = new THREE.Vector3(); - - // - - if ( this.domElement !== document ) { - - this.domElement.setAttribute( 'tabindex', - 1 ); - - } - - // - - this.handleResize = function () { - - if ( this.domElement === document ) { - - this.viewHalfX = window.innerWidth / 2; - this.viewHalfY = window.innerHeight / 2; - - } else { - - this.viewHalfX = this.domElement.offsetWidth / 2; - this.viewHalfY = this.domElement.offsetHeight / 2; - - } - - }; - - this.onMouseDown = function ( event ) { - - if ( this.domElement !== document ) { - - this.domElement.focus(); - - } - - event.preventDefault(); - event.stopPropagation(); - - if ( this.activeLook ) { - - switch ( event.button ) { - - case 0: this.moveForward = true; break; - case 2: this.moveBackward = true; break; - - } - - } - - this.mouseDragOn = true; - - }; - - this.onMouseUp = function ( event ) { - - event.preventDefault(); - event.stopPropagation(); - - if ( this.activeLook ) { - - switch ( event.button ) { - - case 0: this.moveForward = false; break; - case 2: this.moveBackward = false; break; - - } - - } - - this.mouseDragOn = false; - - }; - - this.onMouseMove = function ( event ) { - - if ( this.domElement === document ) { - - this.mouseX = event.pageX - this.viewHalfX; - this.mouseY = event.pageY - this.viewHalfY; - - } else { - - this.mouseX = event.pageX - this.domElement.offsetLeft - this.viewHalfX; - this.mouseY = event.pageY - this.domElement.offsetTop - this.viewHalfY; - - } - - }; - - this.onKeyDown = function ( event ) { - - //event.preventDefault(); - - switch ( event.keyCode ) { - - case 38: /*up*/ - case 87: /*W*/ this.moveForward = true; break; - - case 37: /*left*/ - case 65: /*A*/ this.moveLeft = true; break; - - case 40: /*down*/ - case 83: /*S*/ this.moveBackward = true; break; - - case 39: /*right*/ - case 68: /*D*/ this.moveRight = true; break; - - case 82: /*R*/ this.moveUp = true; break; - case 70: /*F*/ this.moveDown = true; break; - - } - - }; - - this.onKeyUp = function ( event ) { - - switch ( event.keyCode ) { - - case 38: /*up*/ - case 87: /*W*/ this.moveForward = false; break; - - case 37: /*left*/ - case 65: /*A*/ this.moveLeft = false; break; - - case 40: /*down*/ - case 83: /*S*/ this.moveBackward = false; break; - - case 39: /*right*/ - case 68: /*D*/ this.moveRight = false; break; - - case 82: /*R*/ this.moveUp = false; break; - case 70: /*F*/ this.moveDown = false; break; - - } - - }; - - this.lookAt = function ( x, y, z ) { - - if ( x.isVector3 ) { - - target.copy( x ); - - } else { - - target.set( x, y, z ); - - } - - this.object.lookAt( target ); - - setOrientation( this ); - - return this; - - }; - - this.update = function () { - - var targetPosition = new THREE.Vector3(); - - return function update( delta ) { - - if ( this.enabled === false ) return; - - if ( this.heightSpeed ) { - - var y = THREE.Math.clamp( this.object.position.y, this.heightMin, this.heightMax ); - var heightDelta = y - this.heightMin; - - this.autoSpeedFactor = delta * ( heightDelta * this.heightCoef ); - - } else { - - this.autoSpeedFactor = 0.0; - - } - - var actualMoveSpeed = delta * this.movementSpeed; - - if ( this.moveForward || ( this.autoForward && ! this.moveBackward ) ) this.object.translateZ( - ( actualMoveSpeed + this.autoSpeedFactor ) ); - if ( this.moveBackward ) this.object.translateZ( actualMoveSpeed ); - - if ( this.moveLeft ) this.object.translateX( - actualMoveSpeed ); - if ( this.moveRight ) this.object.translateX( actualMoveSpeed ); - - if ( this.moveUp ) this.object.translateY( actualMoveSpeed ); - if ( this.moveDown ) this.object.translateY( - actualMoveSpeed ); - - var actualLookSpeed = delta * this.lookSpeed; - - if ( ! this.activeLook ) { - - actualLookSpeed = 0; - - } - - var verticalLookRatio = 1; - - if ( this.constrainVertical ) { - - verticalLookRatio = Math.PI / ( this.verticalMax - this.verticalMin ); - - } - - lon -= this.mouseX * actualLookSpeed; - if ( this.lookVertical ) lat -= this.mouseY * actualLookSpeed * verticalLookRatio; - - lat = Math.max( - 85, Math.min( 85, lat ) ); - - var phi = THREE.Math.degToRad( 90 - lat ); - var theta = THREE.Math.degToRad( lon ); - - if ( this.constrainVertical ) { - - phi = THREE.Math.mapLinear( phi, 0, Math.PI, this.verticalMin, this.verticalMax ); - - } - - var position = this.object.position; - - targetPosition.setFromSphericalCoords( 1, phi, theta ).add( position ); - - this.object.lookAt( targetPosition ); - - }; - - }(); - - function contextmenu( event ) { - - event.preventDefault(); - - } - - this.dispose = function () { - - this.domElement.removeEventListener( 'contextmenu', contextmenu, false ); - this.domElement.removeEventListener( 'mousedown', _onMouseDown, false ); - this.domElement.removeEventListener( 'mousemove', _onMouseMove, false ); - this.domElement.removeEventListener( 'mouseup', _onMouseUp, false ); - - window.removeEventListener( 'keydown', _onKeyDown, false ); - window.removeEventListener( 'keyup', _onKeyUp, false ); - - }; - - var _onMouseMove = bind( this, this.onMouseMove ); - var _onMouseDown = bind( this, this.onMouseDown ); - var _onMouseUp = bind( this, this.onMouseUp ); - var _onKeyDown = bind( this, this.onKeyDown ); - var _onKeyUp = bind( this, this.onKeyUp ); - - this.domElement.addEventListener( 'contextmenu', contextmenu, false ); - this.domElement.addEventListener( 'mousemove', _onMouseMove, false ); - this.domElement.addEventListener( 'mousedown', _onMouseDown, false ); - this.domElement.addEventListener( 'mouseup', _onMouseUp, false ); - - window.addEventListener( 'keydown', _onKeyDown, false ); - window.addEventListener( 'keyup', _onKeyUp, false ); - - function bind( scope, fn ) { - - return function () { - - fn.apply( scope, arguments ); - - }; - - } - - function setOrientation( controls ) { - - var quaternion = controls.object.quaternion; - - lookDirection.set( 0, 0, - 1 ).applyQuaternion( quaternion ); - spherical.setFromVector3( lookDirection ); - - lat = 90 - THREE.Math.radToDeg( spherical.phi ); - lon = THREE.Math.radToDeg( spherical.theta ); - - } - - this.handleResize(); - - setOrientation( this ); - -}; diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/alphamap_fragment.glsl.js b/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/alphamap_fragment.glsl.js deleted file mode 100644 index 81c5796588a2e2fc837d2110f75bb2de407dc628..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/alphamap_fragment.glsl.js +++ /dev/null @@ -1,7 +0,0 @@ -export default /* glsl */` -#ifdef USE_ALPHAMAP - - diffuseColor.a *= texture2D( alphaMap, vUv ).g; - -#endif -`; diff --git a/spaces/baulab/Erasing-Concepts-In-Diffusion/app.py b/spaces/baulab/Erasing-Concepts-In-Diffusion/app.py deleted file mode 100644 index 933d667f81793c7bd24a8b0d1a0463d56524e1e4..0000000000000000000000000000000000000000 --- a/spaces/baulab/Erasing-Concepts-In-Diffusion/app.py +++ /dev/null @@ -1,258 +0,0 @@ -import gradio as gr -import torch -from finetuning import FineTunedModel -from StableDiffuser import StableDiffuser -from train import train - -import os -model_map = {'Van Gogh' : 'models/vangogh.pt', - 'Pablo Picasso': 'models/pablopicasso.pt', - 'Car' : 'models/car.pt', - 'Garbage Truck': 'models/garbagetruck.pt', - 'French Horn': 'models/frenchhorn.pt', - 'Kilian Eng' : 'models/kilianeng.pt', - 'Thomas Kinkade' : 'models/thomaskinkade.pt', - 'Tyler Edlin' : 'models/tyleredlin.pt', - 'Kelly McKernan': 'models/kellymckernan.pt', - 'Rembrandt': 'models/rembrandt.pt' } - -ORIGINAL_SPACE_ID = 'baulab/Erasing-Concepts-In-Diffusion' -SPACE_ID = os.getenv('SPACE_ID') - -SHARED_UI_WARNING = f'''## Attention - Training using the ESD-u method does not work in this shared UI. You can either duplicate and use it with a gpu with at least 40GB, or clone this repository to run on your own machine. -
Duplicate Space
-''' - - -class Demo: - - def __init__(self) -> None: - - self.training = False - self.generating = False - - self.diffuser = StableDiffuser(scheduler='DDIM').to('cuda').eval().half() - - with gr.Blocks() as demo: - self.layout() - demo.queue(concurrency_count=5).launch() - - - def layout(self): - - with gr.Row(): - - if SPACE_ID == ORIGINAL_SPACE_ID: - - self.warning = gr.Markdown(SHARED_UI_WARNING) - - with gr.Row(): - - with gr.Tab("Test") as inference_column: - - with gr.Row(): - - self.explain_infr = gr.Markdown(interactive=False, - value='This is a demo of [Erasing Concepts from Stable Diffusion](https://erasing.baulab.info/). To try out a model where a concept has been erased, select a model and enter any prompt. For example, if you select the model "Van Gogh" you can generate images for the prompt "A portrait in the style of Van Gogh" and compare the erased and unerased models. We have also provided several other pre-fine-tuned models with artistic styles and objects erased (Check out the "ESD Model" drop-down). You can also train and run your own custom models. Check out the "train" section for custom erasure of concepts.') - - with gr.Row(): - - with gr.Column(scale=1): - - self.prompt_input_infr = gr.Text( - placeholder="Enter prompt...", - label="Prompt", - info="Prompt to generate" - ) - - with gr.Row(): - - self.model_dropdown = gr.Dropdown( - label="ESD Model", - choices= list(model_map.keys()), - value='Van Gogh', - interactive=True - ) - - self.seed_infr = gr.Number( - label="Seed", - value=42 - ) - - with gr.Column(scale=2): - - self.infr_button = gr.Button( - value="Generate", - interactive=True - ) - - with gr.Row(): - - self.image_new = gr.Image( - label="ESD", - interactive=False - ) - self.image_orig = gr.Image( - label="SD", - interactive=False - ) - - with gr.Tab("Train") as training_column: - - with gr.Row(): - - self.explain_train= gr.Markdown(interactive=False, - value='In this part you can erase any concept from Stable Diffusion. Enter a prompt for the concept or style you want to erase, and select ESD-x if you want to focus erasure on prompts that mention the concept explicitly. [NOTE: ESD-u is currently unavailable in this space. But you can duplicate the space and run it on GPU with VRAM >40GB for enabling ESD-u]. With default settings, it takes about 15 minutes to fine-tune the model; then you can try inference above or download the weights. The training code used here is slightly different than the code tested in the original paper. Code and details are at [github link](https://github.com/rohitgandikota/erasing).') - - with gr.Row(): - - with gr.Column(scale=3): - - self.prompt_input = gr.Text( - placeholder="Enter prompt...", - label="Prompt to Erase", - info="Prompt corresponding to concept to erase" - ) - - choices = ['ESD-x'] - if torch.cuda.get_device_properties(0).total_memory * 1e-9 >= 40: - choices.append('ESD-u') - - self.train_method_input = gr.Dropdown( - choices=choices, - value='ESD-x', - label='Train Method', - info='Method of training' - ) - - self.neg_guidance_input = gr.Number( - value=1, - label="Negative Guidance", - info='Guidance of negative training used to train' - ) - - self.iterations_input = gr.Number( - value=150, - precision=0, - label="Iterations", - info='iterations used to train' - ) - - self.lr_input = gr.Number( - value=1e-5, - label="Learning Rate", - info='Learning rate used to train' - ) - - with gr.Column(scale=1): - - self.train_status = gr.Button(value='', variant='primary', label='Status', interactive=False) - - self.train_button = gr.Button( - value="Train", - ) - - self.download = gr.Files() - - self.infr_button.click(self.inference, inputs = [ - self.prompt_input_infr, - self.seed_infr, - self.model_dropdown - ], - outputs=[ - self.image_new, - self.image_orig - ] - ) - self.train_button.click(self.train, inputs = [ - self.prompt_input, - self.train_method_input, - self.neg_guidance_input, - self.iterations_input, - self.lr_input - ], - outputs=[self.train_button, self.train_status, self.download, self.model_dropdown] - ) - - def train(self, prompt, train_method, neg_guidance, iterations, lr, pbar = gr.Progress(track_tqdm=True)): - - if self.training: - return [gr.update(interactive=True, value='Train'), gr.update(value='Someone else is training... Try again soon'), None, gr.update()] - - if train_method == 'ESD-x': - - modules = ".*attn2$" - frozen = [] - - elif train_method == 'ESD-u': - - modules = "unet$" - frozen = [".*attn2$", "unet.time_embedding$", "unet.conv_out$"] - - elif train_method == 'ESD-self': - - modules = ".*attn1$" - frozen = [] - - randn = torch.randint(1, 10000000, (1,)).item() - - save_path = f"models/{randn}_{prompt.lower().replace(' ', '')}.pt" - - self.training = True - - train(prompt, modules, frozen, iterations, neg_guidance, lr, save_path) - - self.training = False - - torch.cuda.empty_cache() - - model_map['Custom'] = save_path - - return [gr.update(interactive=True, value='Train'), gr.update(value='Done Training! \n Try your custom model in the "Test" tab'), save_path, gr.Dropdown.update(choices=list(model_map.keys()), value='Custom')] - - - def inference(self, prompt, seed, model_name, pbar = gr.Progress(track_tqdm=True)): - - seed = seed or 42 - - generator = torch.manual_seed(seed) - - model_path = model_map[model_name] - - checkpoint = torch.load(model_path) - - finetuner = FineTunedModel.from_checkpoint(self.diffuser, checkpoint).eval().half() - - torch.cuda.empty_cache() - - images = self.diffuser( - prompt, - n_steps=50, - generator=generator - ) - - - orig_image = images[0][0] - - torch.cuda.empty_cache() - - generator = torch.manual_seed(seed) - - with finetuner: - - images = self.diffuser( - prompt, - n_steps=50, - generator=generator - ) - - edited_image = images[0][0] - - del finetuner - torch.cuda.empty_cache() - - return edited_image, orig_image - - -demo = Demo() - diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327012413.py b/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327012413.py deleted file mode 100644 index f5b1195110153fc1057059ac04749712a932992f..0000000000000000000000000000000000000000 --- a/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327012413.py +++ /dev/null @@ -1,66 +0,0 @@ -import os -#os.system("pip install gfpgan") - -#os.system("pip freeze") -#os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth -P .") -import random -import gradio as gr -from PIL import Image -import torch -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/ab/Abraham_Lincoln_O-77_matte_collodion_print.jpg/1024px-Abraham_Lincoln_O-77_matte_collodion_print.jpg', 'lincoln.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/5/50/Albert_Einstein_%28Nobel%29.png', 'einstein.png') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/9/9d/Thomas_Edison2.jpg/1024px-Thomas_Edison2.jpg', 'edison.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/a9/Henry_Ford_1888.jpg/1024px-Henry_Ford_1888.jpg', 'Henry.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/0/06/Frida_Kahlo%2C_by_Guillermo_Kahlo.jpg/800px-Frida_Kahlo%2C_by_Guillermo_Kahlo.jpg', 'Frida.jpg') - - -import cv2 -import glob -import numpy as np -from basicsr.utils import imwrite -from gfpgan import GFPGANer - -import warnings -warnings.warn('The unoptimized RealESRGAN is very slow on CPU. We do not use it. ' - 'If you really want to use it, please modify the corresponding codes.') -bg_upsampler = None - - - -# set up GFPGAN restorer -restorer = GFPGANer( - model_path='experiments/pretrained_models/GFPGANv1.3.pth', - upscale=2, - arch='clean', - channel_multiplier=2, - bg_upsampler=bg_upsampler) - - -def inference(img): - input_img = cv2.imread(img, cv2.IMREAD_COLOR) - cropped_faces, restored_faces, restored_img = restorer.enhance( - input_img, has_aligned=False, only_center_face=False, paste_back=True) - - #return Image.fromarray(restored_faces[0][:,:,::-1]) - return Image.fromarray(restored_img) - -title = "GFP-GAN" -description = "Gradio demo for GFP-GAN: Towards Real-World Blind Face Restoration with Generative Facial Prior. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please click submit only once" -article = "

Towards Real-World Blind Face Restoration with Generative Facial Prior | Github Repo

visitor badge
" -gr.Interface( - inference, - [gr.inputs.Image(type="filepath", label="Input")], - gr.outputs.Image(type="pil", label="Output"), - title=title, - description=description, - article=article, - examples=[ - ['lincoln.jpg'], - ['einstein.png'], - ['edison.jpg'], - ['Henry.jpg'], - ['Frida.jpg'] - ] - ).launch(enable_queue=True,cache_examples=True) - - diff --git a/spaces/bigcode/bigcode-editor/static/frame.html b/spaces/bigcode/bigcode-editor/static/frame.html deleted file mode 100644 index d67837cdb8a27c29d4c5ec7969efb628a3cc5842..0000000000000000000000000000000000000000 --- a/spaces/bigcode/bigcode-editor/static/frame.html +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/spaces/bigcode/santacoder-search/README.md b/spaces/bigcode/santacoder-search/README.md deleted file mode 100644 index 8304a13718152ccc43aaf6c59d70ef73dba669b8..0000000000000000000000000000000000000000 --- a/spaces/bigcode/santacoder-search/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: SantaCoder Search -emoji: 🔎 -colorFrom: green -colorTo: indigo -sdk: gradio -sdk_version: 3.7 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: bigscience-data/roots-search ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/bigjoker/stable-diffusion-webui/modules/ui_extensions.py b/spaces/bigjoker/stable-diffusion-webui/modules/ui_extensions.py deleted file mode 100644 index 12f395cef3a6e1e0ad28d1577c0208794b897335..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/modules/ui_extensions.py +++ /dev/null @@ -1,354 +0,0 @@ -import json -import os.path -import shutil -import sys -import time -import traceback - -import git - -import gradio as gr -import html -import shutil -import errno - -from modules import extensions, shared, paths -from modules.call_queue import wrap_gradio_gpu_call - -available_extensions = {"extensions": []} - - -def check_access(): - assert not shared.cmd_opts.disable_extension_access, "extension access disabled because of command line flags" - - -def apply_and_restart(disable_list, update_list): - check_access() - - disabled = json.loads(disable_list) - assert type(disabled) == list, f"wrong disable_list data for apply_and_restart: {disable_list}" - - update = json.loads(update_list) - assert type(update) == list, f"wrong update_list data for apply_and_restart: {update_list}" - - update = set(update) - - for ext in extensions.extensions: - if ext.name not in update: - continue - - try: - ext.fetch_and_reset_hard() - except Exception: - print(f"Error getting updates for {ext.name}:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - - shared.opts.disabled_extensions = disabled - shared.opts.save(shared.config_filename) - - shared.state.interrupt() - shared.state.need_restart = True - - -def check_updates(id_task, disable_list): - check_access() - - disabled = json.loads(disable_list) - assert type(disabled) == list, f"wrong disable_list data for apply_and_restart: {disable_list}" - - exts = [ext for ext in extensions.extensions if ext.remote is not None and ext.name not in disabled] - shared.state.job_count = len(exts) - - for ext in exts: - shared.state.textinfo = ext.name - - try: - ext.check_updates() - except Exception: - print(f"Error checking updates for {ext.name}:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - - shared.state.nextjob() - - return extension_table(), "" - - -def extension_table(): - code = f""" - - - - - - - - - - - """ - - for ext in extensions.extensions: - remote = f"""{html.escape("built-in" if ext.is_builtin else ext.remote or '')}""" - - if ext.can_update: - ext_status = f"""""" - else: - ext_status = ext.status - - code += f""" - - - - - {ext_status} - - """ - - code += """ - -
ExtensionURLVersionUpdate
{remote}{ext.version}
- """ - - return code - - -def normalize_git_url(url): - if url is None: - return "" - - url = url.replace(".git", "") - return url - - -def install_extension_from_url(dirname, url): - check_access() - - assert url, 'No URL specified' - - if dirname is None or dirname == "": - *parts, last_part = url.split('/') - last_part = normalize_git_url(last_part) - - dirname = last_part - - target_dir = os.path.join(extensions.extensions_dir, dirname) - assert not os.path.exists(target_dir), f'Extension directory already exists: {target_dir}' - - normalized_url = normalize_git_url(url) - assert len([x for x in extensions.extensions if normalize_git_url(x.remote) == normalized_url]) == 0, 'Extension with this URL is already installed' - - tmpdir = os.path.join(paths.data_path, "tmp", dirname) - - try: - shutil.rmtree(tmpdir, True) - - repo = git.Repo.clone_from(url, tmpdir) - repo.remote().fetch() - - try: - os.rename(tmpdir, target_dir) - except OSError as err: - # TODO what does this do on windows? I think it'll be a different error code but I don't have a system to check it - # Shouldn't cause any new issues at least but we probably want to handle it there too. - if err.errno == errno.EXDEV: - # Cross device link, typical in docker or when tmp/ and extensions/ are on different file systems - # Since we can't use a rename, do the slower but more versitile shutil.move() - shutil.move(tmpdir, target_dir) - else: - # Something else, not enough free space, permissions, etc. rethrow it so that it gets handled. - raise(err) - - import launch - launch.run_extension_installer(target_dir) - - extensions.list_extensions() - return [extension_table(), html.escape(f"Installed into {target_dir}. Use Installed tab to restart.")] - finally: - shutil.rmtree(tmpdir, True) - - -def install_extension_from_index(url, hide_tags, sort_column): - ext_table, message = install_extension_from_url(None, url) - - code, _ = refresh_available_extensions_from_data(hide_tags, sort_column) - - return code, ext_table, message - - -def refresh_available_extensions(url, hide_tags, sort_column): - global available_extensions - - import urllib.request - with urllib.request.urlopen(url) as response: - text = response.read() - - available_extensions = json.loads(text) - - code, tags = refresh_available_extensions_from_data(hide_tags, sort_column) - - return url, code, gr.CheckboxGroup.update(choices=tags), '' - - -def refresh_available_extensions_for_tags(hide_tags, sort_column): - code, _ = refresh_available_extensions_from_data(hide_tags, sort_column) - - return code, '' - - -sort_ordering = [ - # (reverse, order_by_function) - (True, lambda x: x.get('added', 'z')), - (False, lambda x: x.get('added', 'z')), - (False, lambda x: x.get('name', 'z')), - (True, lambda x: x.get('name', 'z')), - (False, lambda x: 'z'), -] - - -def refresh_available_extensions_from_data(hide_tags, sort_column): - extlist = available_extensions["extensions"] - installed_extension_urls = {normalize_git_url(extension.remote): extension.name for extension in extensions.extensions} - - tags = available_extensions.get("tags", {}) - tags_to_hide = set(hide_tags) - hidden = 0 - - code = f""" - - - - - - - - - - """ - - sort_reverse, sort_function = sort_ordering[sort_column if 0 <= sort_column < len(sort_ordering) else 0] - - for ext in sorted(extlist, key=sort_function, reverse=sort_reverse): - name = ext.get("name", "noname") - added = ext.get('added', 'unknown') - url = ext.get("url", None) - description = ext.get("description", "") - extension_tags = ext.get("tags", []) - - if url is None: - continue - - existing = installed_extension_urls.get(normalize_git_url(url), None) - extension_tags = extension_tags + ["installed"] if existing else extension_tags - - if len([x for x in extension_tags if x in tags_to_hide]) > 0: - hidden += 1 - continue - - install_code = f"""""" - - tags_text = ", ".join([f"{x}" for x in extension_tags]) - - code += f""" - - - - - - - """ - - for tag in [x for x in extension_tags if x not in tags]: - tags[tag] = tag - - code += """ - -
ExtensionDescriptionAction
{html.escape(name)}
{tags_text}
{html.escape(description)}

Added: {html.escape(added)}

{install_code}
- """ - - if hidden > 0: - code += f"

Extension hidden: {hidden}

" - - return code, list(tags) - - -def create_ui(): - import modules.ui - - with gr.Blocks(analytics_enabled=False) as ui: - with gr.Tabs(elem_id="tabs_extensions") as tabs: - with gr.TabItem("Installed"): - - with gr.Row(elem_id="extensions_installed_top"): - apply = gr.Button(value="Apply and restart UI", variant="primary") - check = gr.Button(value="Check for updates") - extensions_disabled_list = gr.Text(elem_id="extensions_disabled_list", visible=False).style(container=False) - extensions_update_list = gr.Text(elem_id="extensions_update_list", visible=False).style(container=False) - - info = gr.HTML() - extensions_table = gr.HTML(lambda: extension_table()) - - apply.click( - fn=apply_and_restart, - _js="extensions_apply", - inputs=[extensions_disabled_list, extensions_update_list], - outputs=[], - ) - - check.click( - fn=wrap_gradio_gpu_call(check_updates, extra_outputs=[gr.update()]), - _js="extensions_check", - inputs=[info, extensions_disabled_list], - outputs=[extensions_table, info], - ) - - with gr.TabItem("Available"): - with gr.Row(): - refresh_available_extensions_button = gr.Button(value="Load from:", variant="primary") - available_extensions_index = gr.Text(value="https://raw.githubusercontent.com/wiki/AUTOMATIC1111/stable-diffusion-webui/Extensions-index.md", label="Extension index URL").style(container=False) - extension_to_install = gr.Text(elem_id="extension_to_install", visible=False) - install_extension_button = gr.Button(elem_id="install_extension_button", visible=False) - - with gr.Row(): - hide_tags = gr.CheckboxGroup(value=["ads", "localization", "installed"], label="Hide extensions with tags", choices=["script", "ads", "localization", "installed"]) - sort_column = gr.Radio(value="newest first", label="Order", choices=["newest first", "oldest first", "a-z", "z-a", "internal order", ], type="index") - - install_result = gr.HTML() - available_extensions_table = gr.HTML() - - refresh_available_extensions_button.click( - fn=modules.ui.wrap_gradio_call(refresh_available_extensions, extra_outputs=[gr.update(), gr.update(), gr.update()]), - inputs=[available_extensions_index, hide_tags, sort_column], - outputs=[available_extensions_index, available_extensions_table, hide_tags, install_result], - ) - - install_extension_button.click( - fn=modules.ui.wrap_gradio_call(install_extension_from_index, extra_outputs=[gr.update(), gr.update()]), - inputs=[extension_to_install, hide_tags, sort_column], - outputs=[available_extensions_table, extensions_table, install_result], - ) - - hide_tags.change( - fn=modules.ui.wrap_gradio_call(refresh_available_extensions_for_tags, extra_outputs=[gr.update()]), - inputs=[hide_tags, sort_column], - outputs=[available_extensions_table, install_result] - ) - - sort_column.change( - fn=modules.ui.wrap_gradio_call(refresh_available_extensions_for_tags, extra_outputs=[gr.update()]), - inputs=[hide_tags, sort_column], - outputs=[available_extensions_table, install_result] - ) - - with gr.TabItem("Install from URL"): - install_url = gr.Text(label="URL for extension's git repository") - install_dirname = gr.Text(label="Local directory name", placeholder="Leave empty for auto") - install_button = gr.Button(value="Install", variant="primary") - install_result = gr.HTML(elem_id="extension_install_result") - - install_button.click( - fn=modules.ui.wrap_gradio_call(install_extension_from_url, extra_outputs=[gr.update()]), - inputs=[install_dirname, install_url], - outputs=[extensions_table, install_result], - ) - - return ui diff --git a/spaces/billsar1912/stock-prediction/app.py b/spaces/billsar1912/stock-prediction/app.py deleted file mode 100644 index a10e8199141187a53a1c581544acf7df4a37a0fc..0000000000000000000000000000000000000000 --- a/spaces/billsar1912/stock-prediction/app.py +++ /dev/null @@ -1,286 +0,0 @@ -import time -import warnings -# import SessionState -import numpy as np -import pandas as pd -import datetime as dt -import streamlit as st -import tensorflow as tf -import plotly.express as px -import pandas_datareader as pdr -import matplotlib.pyplot as plt -from PIL import Image -from st_aggrid.shared import GridUpdateMode -from sklearn.preprocessing import MinMaxScaler -from tensorflow.keras.models import load_model -from st_aggrid import AgGrid, GridOptionsBuilder - -icon = Image.open('bitcoin.png') - -st.set_page_config( - page_title="FANG Stock Prediction", - page_icon=icon, - layout="wide" - ) - -st.write("# Welcome to Stock Prediction Dashboard :man: :coffee:") - -company_ticker = ['FB', 'AAPL', 'TSLA', 'GOOG', 'NVDA'] - -start_date = dt.datetime(2010, 10, 29) -end_date = dt.datetime(2021, 12, 31) -data_FB = pdr.DataReader(company_ticker[0], 'yahoo', start_date, end_date) -data_AAPL = pdr.DataReader(company_ticker[1], 'yahoo', start_date, end_date) -data_TSLA = pdr.DataReader(company_ticker[2], 'yahoo', start_date, end_date) -data_GOOG = pdr.DataReader(company_ticker[3], 'yahoo', start_date, end_date) -data_NVDA = pdr.DataReader(company_ticker[4], 'yahoo', start_date, end_date) - -scaler_tesla = MinMaxScaler() -scaler_google = MinMaxScaler() -scaler_nvidia = MinMaxScaler() - -prediction_days = 89 - -test_start = dt.datetime(2021, 12, 31) -test_end = dt.datetime.now() -test_data_FB = pdr.DataReader(company_ticker[0], 'yahoo', test_start, test_end) -test_data_AAPL = pdr.DataReader(company_ticker[1], 'yahoo', test_start, test_end) -test_data_TSLA = pdr.DataReader(company_ticker[2], 'yahoo', test_start, test_end) -test_data_GOOG = pdr.DataReader(company_ticker[3], 'yahoo', test_start, test_end) -test_data_NVDA = pdr.DataReader(company_ticker[4], 'yahoo', test_start, test_end) - -total_dataset_tesla = pd.concat((data_TSLA['Adj Close'], test_data_TSLA['Adj Close']), axis=0) -total_dataset_google = pd.concat((data_GOOG['Adj Close'], test_data_GOOG['Adj Close']), axis=0) -total_dataset_nvidia = pd.concat((data_NVDA['Adj Close'], test_data_NVDA['Adj Close']), axis=0) - -model_inputs_tesla = total_dataset_tesla[len(total_dataset_tesla) - len(test_data_TSLA) - prediction_days:].values -model_inputs_tesla = model_inputs_tesla.reshape(-1, 1) -model_inputs_tesla = scaler_tesla.fit_transform(model_inputs_tesla) - -model_inputs_google = total_dataset_google[len(total_dataset_google) - len(test_data_GOOG) - prediction_days:].values -model_inputs_google = model_inputs_google.reshape(-1, 1) -model_inputs_google = scaler_google.fit_transform(model_inputs_google) - -model_inputs_nvidia = total_dataset_nvidia[len(total_dataset_nvidia) - len(test_data_NVDA) - prediction_days:].values -model_inputs_nvidia = model_inputs_nvidia.reshape(-1, 1) -model_inputs_nvidia = scaler_nvidia.fit_transform(model_inputs_nvidia) - -def test_data_predict(lstm_model, model_inputs, scaler): - X_test = [] - - for x in range(prediction_days, len(model_inputs)): - X_test.append(model_inputs[x-prediction_days:x, 0]) - - X_test = np.array(X_test) - X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1)) - - prediction_prices = lstm_model.predict(X_test) - prediction_prices = scaler.inverse_transform(prediction_prices) - - return prediction_prices - -lstm_tsla = load_model("tsla_stock_prediction.h5") -lstm_goog = load_model("fang_stock_prediction.h5") -lstm_nvda = load_model("nvda_stock_prediction.h5") - -tsla_predict_prices = test_data_predict(lstm_model=lstm_tsla, model_inputs=model_inputs_tesla, scaler=scaler_tesla) -goog_predict_prices = test_data_predict(lstm_model=lstm_goog, model_inputs=model_inputs_google, scaler=scaler_google) -nvda_predict_prices = test_data_predict(lstm_model=lstm_nvda, model_inputs=model_inputs_nvidia, scaler=scaler_nvidia) - -# actual_aapl = pd.DataFrame(test_data_AAPL) -actual_tsla = pd.DataFrame(test_data_TSLA) -actual_goog = pd.DataFrame(test_data_GOOG) -actual_nvda = pd.DataFrame(test_data_NVDA) - -valid_tesla = pd.DataFrame() -valid_tesla['Actual'] = test_data_TSLA.filter(['Adj Close']) -valid_tesla['Predictions'] = tsla_predict_prices - -valid_google = pd.DataFrame() -valid_google['Actual'] = test_data_GOOG.filter(['Adj Close']) -valid_google['Predictions'] = goog_predict_prices - -valid_nvidia = pd.DataFrame() -valid_nvidia['Actual'] = test_data_NVDA.filter(['Adj Close']) -valid_nvidia['Predictions'] = nvda_predict_prices - -st.write("#### Actual Stock Value of Tesla 2021 - Now (in Billion USD)") -tsla_col1, tsla_col2, tsla_col3 = st.columns(3) -tsla_col1.metric("Open Stock Value", f"{np.round(actual_tsla['Open'][len(actual_tsla)-1], 3)}", - "{}".format(np.round(actual_tsla['Open'][len(test_data_TSLA)-1] - actual_tsla['Open'][len(test_data_TSLA)-2]), 3)) - -tsla_col2.metric("Open Stock Value", f"{np.round(actual_tsla['Close'][len(actual_tsla)-1], 3)}", - "{}".format(np.round(actual_tsla['Close'][len(test_data_TSLA)-1] - actual_tsla['Close'][len(test_data_TSLA)-2]), 3)) - -tsla_col3.metric("Open Stock Value", f"{np.round(actual_tsla['Adj Close'][len(actual_tsla)-1], 3)}", - "{}".format(np.round(actual_tsla['Adj Close'][len(test_data_TSLA)-1] - actual_tsla['Adj Close'][len(test_data_TSLA)-2]), 3)) - -st.write("#### Actual Stock Value of Google 2021 - Now (in Billion USD)") -goog_col1, goog_col2, goog_col3 = st.columns(3) -goog_col1.metric("Open Stock Value", f"{np.round(actual_goog['Open'][len(actual_goog)-1], 3)}", - "{}".format(np.round(actual_goog['Open'][len(test_data_GOOG)-1] - actual_goog['Open'][len(test_data_GOOG)-2]), 3)) - -goog_col2.metric("Close Stock Value", f"{np.round(actual_goog['Close'][len(actual_goog)-1], 3)}", - "{}".format(np.round(actual_goog['Close'][len(test_data_GOOG)-1] - actual_goog['Close'][len(test_data_GOOG)-2]), 3)) - -goog_col3.metric("Adjusted Close Value", f"{np.round(actual_goog['Adj Close'][len(actual_goog)-1], 3)}", - "{}".format(np.round(actual_goog['Adj Close'][len(test_data_GOOG)-1] - actual_goog['Adj Close'][len(test_data_GOOG)-2]), 3)) - -st.write("#### Actual Stock Value of NVIDIA 2021 - Now (in Billion USD)") -nvda_col1, nvda_col2, nvda_col3 = st.columns(3) -nvda_col1.metric("Open Stock Value", f"{np.round(actual_nvda['Open'][len(actual_nvda)-1], 3)}", - "{}".format(np.round(actual_nvda['Open'][len(test_data_NVDA)-1] - actual_nvda['Open'][len(test_data_NVDA)-2]), 3)) - -nvda_col2.metric("Close Stock Value", f"{np.round(actual_nvda['Close'][len(actual_nvda)-1], 3)}", - "{}".format(np.round(actual_nvda['Close'][len(test_data_NVDA)-1] - actual_nvda['Close'][len(test_data_NVDA)-2]), 3)) - -nvda_col3.metric("Adjusted Close Value", f"{np.round(actual_nvda['Adj Close'][len(actual_nvda)-1], 3)}", - "{}".format(np.round(actual_nvda['Adj Close'][len(test_data_NVDA)-1] - actual_nvda['Adj Close'][len(test_data_NVDA)-2]), 3)) - -def aggrid_interactive_table(df: pd.DataFrame): - """Creates an st-aggrid interactive table based on a dataframe. - - Args: - df (pd.DataFrame]): Source dataframe - - Returns: - dict: The selected row - """ - options = GridOptionsBuilder.from_dataframe( - df, enableRowGroup=True, enableValue=True, enablePivot=True - ) - - options.configure_side_bar() - - options.configure_selection("single") - selection = AgGrid( - df, - enable_enterprise_modules=True, - gridOptions=options.build(), - theme="dark", - update_mode=GridUpdateMode.MODEL_CHANGED, - allow_unsafe_jscode=True, - fit_columns_on_grid_load=True - ) - - return selection - -google_stock = pd.DataFrame(data_GOOG.drop('Volume', axis=1)) - -comp_name = ['Tesla', 'Google', 'NVIDIA'] -st.write("#### Actual and Prediction of Tesla, Google, and NVIDIA Stock Price (Scaled Data Using Min-Max Scaler) 2007 - Now") -stock_df = pd.DataFrame() - -st.text('Start Time') -from_col1, from_col2, from_col3 = st.columns(3) -from_date = from_col1.slider('Date', 1, 31, 1) -from_month = from_col2.slider('Month', 1, 12, 1) -from_year = from_col3.slider('Year', 2010, 2021, 2011) - -st.text('End Time') -to_col1, to_col2, to_col3 = st.columns(3) -to_date = to_col1.slider('Date', 1, 31, 31) -to_month = to_col2.slider('Month', 1, 12, 12) -to_year = to_col3.slider('Year', 2010, 2021, 2021) - -stock_df['Tesla'] = pdr.DataReader(company_ticker[2], 'yahoo', - dt.datetime(from_year, from_month, from_date), - dt.datetime(to_year, to_month, to_date)).filter(['Adj Close']) - -stock_df['Google'] = pdr.DataReader(company_ticker[3], 'yahoo', - dt.datetime(from_year, from_month, from_date), - dt.datetime(to_year, to_month, to_date)).filter(['Adj Close']) - -stock_df['NVIDIA'] = pdr.DataReader(company_ticker[4], 'yahoo', - dt.datetime(from_year, from_month, from_date), - dt.datetime(to_year, to_month, to_date)).filter(['Adj Close']) - -actual_df = pd.DataFrame() -actual_df['Tesla'] = test_data_TSLA.filter(['Adj Close']) -actual_df['Google'] = test_data_GOOG.filter(['Adj Close']) -actual_df['NVIDIA'] = test_data_NVDA.filter(['Adj Close']) - -st.line_chart(stock_df) -comp_df = st.selectbox("Select Company Data", comp_name) - -if comp_df == 'Tesla': - aggrid_tesla = aggrid_interactive_table(df=valid_tesla) - stock_col1, stock_col2 = st.columns(2) - - fig = px.scatter( - x=valid_tesla["Actual"], - y=valid_tesla["Predictions"] - ) - fig.update_layout( - xaxis_title="Actual Stock Price of Tesla Company", - yaxis_title="Predicted Value", - modebar_remove=["pan", "zoom", "lasso", "toimage", "zoomin", "zoomout", "autoscale", "resetviews", "select", "resetscale"] - ) - - stock_col1.write(f"##### Linearity of Actual and Predicted Stock Price of {comp_df} (in Billion) 2021 - Now") - stock_col1.write(fig) - stock_col2.write(f"##### Comparison Between Actual and Predicted Stock Price of {comp_df} (in Billion) 2021 - Now") - stock_col2.line_chart(valid_tesla) - -if comp_df == 'Google': - aggrid_google = aggrid_interactive_table(df=valid_google) - stock_col1, stock_col2 = st.columns(2) - - fig = px.scatter( - x=valid_google["Actual"], - y=valid_google["Predictions"] - ) - fig.update_layout( - xaxis_title="Actual Stock Price of Tesla Company", - yaxis_title="Predicted Value", - modebar_remove=["pan", "zoom", "lasso", "toimage", "zoomin", "zoomout", "autoscale", "resetviews", "select", "resetscale"] - ) - - stock_col1.write(f"##### Linearity of Actual and Predicted Stock Price of {comp_df} (in Billion) 2021 - Now") - stock_col1.write(fig) - stock_col2.write(f"##### Comparison Between Actual and Predicted Stock Price of {comp_df} (in Billion) 2021 - Now") - stock_col2.line_chart(valid_google) - -if comp_df == 'NVIDIA': - aggrid_nvidia = aggrid_interactive_table(df=valid_nvidia) - stock_col1, stock_col2 = st.columns(2) - - fig = px.scatter( - x=valid_nvidia["Actual"], - y=valid_nvidia["Predictions"] - ) - fig.update_layout( - xaxis_title="Actual Stock Price of Tesla Company", - yaxis_title="Predicted Value", - modebar_remove=["pan", "zoom", "lasso", "toimage", "zoomin", "zoomout", "autoscale", "resetviews", "select", "resetscale"] - ) - - stock_col1.write(f"##### Linearity of Actual and Predicted Stock Price of {comp_df} (in Billion) 2021 - Now") - stock_col1.write(fig) - stock_col2.write(f"##### Comparison Between Actual and Predicted Stock Price of {comp_df} (in Billion) 2021 - Now") - stock_col2.line_chart(valid_nvidia) - -def real_data_prediction(lstm_model, model_inputs, scaler): - real_data = [model_inputs[len(model_inputs) - prediction_days:len(model_inputs) + 2, 0]] - real_data = np.array(real_data) - real_data = np.reshape(real_data, (real_data.shape[0], real_data.shape[1], 1)) - - real_prediction = lstm_model.predict(real_data) - real_prediction = scaler.inverse_transform(real_prediction) - - return np.round(real_prediction, 3) - -real_predict_tesla = real_data_prediction(lstm_tsla, model_inputs_tesla, scaler_tesla)[0][0] -real_predict_google = real_data_prediction(lstm_goog, model_inputs_google, scaler_google)[0][0] -real_predict_nvidia = real_data_prediction(lstm_nvda, model_inputs_nvidia, scaler_nvidia)[0][0] - -st.write("#### Company's Tomorrow Stock Price Prediction") -real_col1, real_col2, real_col3 = st.columns(3) -real_col1.metric("Tesla's Tomorrow Stock Price Predction", f"{real_predict_tesla}", - f"{np.round(real_predict_tesla - actual_tsla['Adj Close'][len(test_data_TSLA)-1], 3)}") - -real_col2.metric("Google's Tomorrow Stock Price Predction", f"{real_predict_google}", - f"{np.round(real_predict_google - actual_goog['Adj Close'][len(test_data_GOOG)-1], 3)}") - -real_col3.metric("NVIDIA's Tomorrow Stock Price Predction", f"{real_predict_nvidia}", - f"{np.round(real_predict_nvidia - actual_nvda['Adj Close'][len(test_data_NVDA)-1], 3)}") \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/BMWCodingToolPackutorrent.md b/spaces/bioriAsaeru/text-to-voice/BMWCodingToolPackutorrent.md deleted file mode 100644 index b844819d0c5949c5ad153ab6305aac0334fc997d..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/BMWCodingToolPackutorrent.md +++ /dev/null @@ -1,6 +0,0 @@ -

BMWCodingToolPackutorrent


Download Zip 🌟 https://urloso.com/2uyQho



-
-Coding of the ECU ... OBD Tool here offers BMW INPA EDIABAS (full package including: E-sys, ... INPA V5.06 torrent free download (2.59 GB). 4d29de3e1b
-
-
-

diff --git a/spaces/bioriAsaeru/text-to-voice/Bobby Flay Chicken Roulade Recipe.md b/spaces/bioriAsaeru/text-to-voice/Bobby Flay Chicken Roulade Recipe.md deleted file mode 100644 index af45731b197b9e23e7df37638a2bea2fb1cc9206..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Bobby Flay Chicken Roulade Recipe.md +++ /dev/null @@ -1,102 +0,0 @@ - -

Bobby Flay Chicken Roulade Recipe - A Delicious and Elegant Dish

-

If you are looking for a way to impress your guests with a mouthwatering and sophisticated dish, you should try this bobby flay chicken roulade recipe. Chicken roulade is a dish where chicken breasts are pounded thin, stuffed with cheese, ham and arugula, rolled up and roasted until golden and juicy. It is served with a tangy mustard sauce that complements the rich flavors of the filling.

-

This bobby flay chicken roulade recipe is inspired by the famous chef and TV personality Bobby Flay, who is known for his bold and creative dishes. He uses American triple-cream cheese, such as Red Hawk, and Georgia ham, which add a creamy and smoky touch to the chicken. He also adds some red chile flakes and horseradish to the mustard sauce for some extra kick.

-

bobby flay chicken roulade recipe


Download Ziphttps://urloso.com/2uyPO8



-

How to Make Bobby Flay Chicken Roulade Recipe

-

Making this bobby flay chicken roulade recipe is not difficult, but it does require some preparation and attention. Here are the main steps you need to follow:

-
    -
  1. Make the mustard sauce by whisking together honey, Dijon mustard, whole-grain mustard and horseradish in a small bowl. Season with salt and pepper and set aside.
  2. -
  3. Preheat the oven to 400 degrees F and heat some canola oil in a large skillet over low heat. Add garlic and red chile flakes and cook for 30 seconds. Then add baby arugula, season with salt and pepper and cook until wilted, about 2 minutes. Set aside.
  4. -
  5. Pound the chicken breasts between two sheets of parchment paper or plastic wrap until thin. Season with salt and pepper on both sides. Place two slices of ham on each breast, spread some cheese over the ham and top with some arugula in the center. Roll up the breasts tightly and tie with butcher's twine.
  6. -
  7. Heat some more oil in a large ovenproof skillet over high heat. Add the chicken rolls seam-side up and some butter. Cook until browned on all sides, about 3 minutes. Transfer the skillet to the oven and bake until the chicken is cooked through and the cheese is melted, about 4 to 5 minutes.
  8. -
  9. Let the chicken rest for 5 minutes before slicing. Serve with the mustard sauce and some micro arugula on top.
  10. -
-

Tips and Variations for Bobby Flay Chicken Roulade Recipe

-

Here are some tips and variations you can try to make this bobby flay chicken roulade recipe even better:

-
    -
  • If you can't find American triple-cream cheese or Georgia ham, you can use any other soft cheese or cured ham that you like.
  • -
  • You can also use spinach or kale instead of arugula for the filling.
  • -
  • You can add some chopped nuts, dried fruits or fresh herbs to the cheese mixture for some extra texture and flavor.
  • -
  • You can make the mustard sauce ahead of time and refrigerate it until ready to use.
  • -
  • You can also make the chicken rolls ahead of time and refrigerate them until ready to cook. Just bring them to room temperature before browning them in the skillet.
  • -
-

Nutrition Facts for Bobby Flay Chicken Roulade Recipe

-

This bobby flay chicken roulade recipe makes 4 servings. Each serving has:

-
    -
  • Calories: 660
  • -
  • Fat: 42 g
  • -
  • Carbohydrates: 23 g
  • -
  • Protein: 48 g
  • -
  • Cholesterol: 180 mg
  • -
  • Sodium: 1200 mg
  • -
-

This bobby flay chicken roulade recipe is a great way to enjoy chicken in a new and exciting way. It is perfect for a special occasion or a weekend dinner. Try it today and let us know what you think!

-

Benefits of Bobby Flay Chicken Roulade Recipe

-

This bobby flay chicken roulade recipe is not only delicious and elegant, but also has some health benefits. Here are some of them:

-
    -
  • Chicken is a lean protein that helps build and maintain muscle mass, supports immune system function and keeps you feeling full longer.
  • -
  • Cheese is a good source of calcium, which is essential for bone health and teeth strength. It also contains protein, vitamin A, vitamin B12 and zinc.
  • -
  • Ham is rich in iron, which helps transport oxygen throughout the body and prevents anemia. It also provides protein, vitamin B6, niacin and selenium.
  • -
  • Arugula is a leafy green vegetable that is low in calories and high in antioxidants, vitamin C, vitamin K and folate. It also has a peppery flavor that adds some spice to the dish.
  • -
  • Honey is a natural sweetener that has antibacterial, anti-inflammatory and antioxidant properties. It also helps soothe sore throats and coughs.
  • -
  • Mustard is a condiment that adds flavor and heat to the dish. It also contains phytochemicals that may have anti-cancer effects.
  • -
-

What to Serve with Bobby Flay Chicken Roulade Recipe

-

This bobby flay chicken roulade recipe is a complete meal by itself, but you can also serve it with some side dishes to make it more satisfying. Here are some suggestions:

-
    -
  • Mashed potatoes: A classic and comforting side dish that goes well with any meat dish. You can make them creamy with butter and milk, or add some cheese, garlic or herbs for extra flavor.
  • -
  • Roasted vegetables: A simple and healthy way to add some color and nutrients to your plate. You can roast any vegetables you like, such as carrots, parsnips, Brussels sprouts, broccoli or cauliflower. Drizzle them with some olive oil, salt, pepper and your favorite herbs before baking them in the oven.
  • -
  • Rice pilaf: A rice dish that is cooked with broth, butter and spices. You can add some nuts, dried fruits or fresh herbs to make it more interesting.
  • -
  • Salad: A light and refreshing side dish that can balance out the richness of the chicken roulade. You can toss some greens with your favorite dressing, or make a more elaborate salad with cheese, nuts, fruits or vegetables.
  • -
  • Bread: A simple and satisfying way to soak up the delicious mustard sauce. You can use any bread you like, such as French baguette, sourdough or ciabatta.
  • -
-

Conclusion

-

This bobby flay chicken roulade recipe is a great way to enjoy chicken in a new and exciting way. It is perfect for a special occasion or a weekend dinner. Try it today and let us know what you think!

-

-

FAQs about Bobby Flay Chicken Roulade Recipe

-

You may have some questions about this bobby flay chicken roulade recipe. Here are some of the most common ones and their answers:

-
-
What is a roulade?
-
A roulade is a dish where a thin slice of meat or fish is rolled around a filling and cooked. The word comes from the French verb "rouler", which means to roll.
-
What is triple-cream cheese?
-
Triple-cream cheese is a type of soft cheese that has at least 75% butterfat in its dry matter. It is very rich and creamy, and often has a mild or slightly tangy flavor. Some examples of triple-cream cheese are Brie, Camembert and Red Hawk.
-
What is Georgia ham?
-
Georgia ham is a type of country ham that is cured and smoked in Georgia. It has a salty and smoky flavor, and can be sliced thin or thick. Some brands of Georgia ham are Smithfield, Clifty Farm and Benton's.
-
How do you tie a chicken roulade?
-
You can tie a chicken roulade with butcher's twine, which is a type of cotton string that is used for cooking. You can find it in most grocery stores or online. To tie a chicken roulade, you need to cut four pieces of twine, each about 12 inches long. Then, place one piece under the middle of the chicken roll, cross the ends over the top and pull them tight. Repeat with the other pieces at equal intervals along the roll. Tie the ends in a knot and trim any excess twine.
-
How do you slice a chicken roulade?
-
You can slice a chicken roulade with a sharp knife or an electric knife. You should let the chicken rest for 5 minutes before slicing, to allow the juices to redistribute and the cheese to set. Then, remove the twine and cut the roll into 1/2-inch thick slices. You can use a serrated knife to cut through the ham more easily.
-
-

Conclusion

-

This bobby flay chicken roulade recipe is a great way to enjoy chicken in a new and exciting way. It is perfect for a special occasion or a weekend dinner. Try it today and let us know what you think!

-

Reviews of Bobby Flay Chicken Roulade Recipe

-

This bobby flay chicken roulade recipe has received many positive reviews from people who have tried it. Here are some of them:

-
-

"This was amazing! The chicken was moist and tender, the cheese was gooey and delicious, and the mustard sauce was the perfect balance of sweet and spicy. I served it with mashed potatoes and roasted asparagus. My husband loved it and said it was restaurant quality. I will definitely make this again!" - Lisa

-
-
-

"I made this for a dinner party and it was a hit! Everyone raved about how flavorful and elegant it was. I followed the recipe exactly and it was easy to make. The only thing I changed was that I used spinach instead of arugula because that's what I had on hand. It worked well too. I highly recommend this recipe!" - Amy

-
-
-

"Wow, this was so good! I love Bobby Flay's recipes and this one did not disappoint. The chicken roulade was juicy and cheesy, and the mustard sauce was tangy and creamy. I served it with rice pilaf and salad. It was a perfect meal for a cold night. I will make this again and again!" - Kevin

-
-

Conclusion

-

This bobby flay chicken roulade recipe is a great way to enjoy chicken in a new and exciting way. It is perfect for a special occasion or a weekend dinner. Try it today and let us know what you think!

-

How to Store and Reheat Bobby Flay Chicken Roulade Recipe

-

If you have any leftovers of this bobby flay chicken roulade recipe, you can store them in an airtight container in the refrigerator for up to 3 days. To reheat them, you can either microwave them for a few minutes, or bake them in a 350 degrees F oven for 10 to 15 minutes, until heated through.

-

You can also freeze the cooked chicken roulade for up to 3 months. To freeze them, you need to wrap them individually in plastic wrap and then place them in a freezer bag. To thaw them, you can either leave them in the refrigerator overnight, or microwave them on low power for a few minutes. Then, you can reheat them as described above.

-

Other Recipes by Bobby Flay

-

If you like this bobby flay chicken roulade recipe, you may also enjoy some of his other recipes. Here are some of them:

-
    -
  • Bobby Flay's Barbecue Chicken with Alabama White Barbecue Sauce: A grilled chicken dish with a tangy and creamy white sauce made with mayonnaise, vinegar, mustard and horseradish.
  • -
  • Bobby Flay's Macaroni and Cheese Carbonara: A decadent pasta dish with a cheesy sauce made with Parmesan, fontina and mascarpone cheeses, bacon, eggs and peas.
  • -
  • Bobby Flay's Mesa Grill's Southwestern Potato Salad: A spicy and colorful potato salad with roasted potatoes, corn, black beans, red onion, cilantro and a chipotle dressing.
  • -
  • Bobby Flay's Throwdown Pumpkin Pie: A classic pumpkin pie with a twist of orange zest, ginger and bourbon whipped cream.
  • -
  • Bobby Flay's Salmon with Brown Sugar and Mustard Glaze: A simple and delicious baked salmon dish with a sweet and savory glaze made with brown sugar, Dijon mustard and cider vinegar.
  • -
-

Conclusion

-

This bobby flay chicken roulade recipe is a great way to enjoy chicken in a new and exciting way. It is perfect for a special occasion or a weekend dinner. Try it today and let us know what you think!

-

This bobby flay chicken roulade recipe is a great way to enjoy chicken in a new and exciting way. It is perfect for a special occasion or a weekend dinner. It is easy to make and has a delicious combination of flavors and textures. The chicken is stuffed with cheese, ham and arugula, and served with a tangy mustard sauce. It is also healthy and nutritious, as it provides protein, calcium, iron and antioxidants. You can serve it with some side dishes, such as mashed potatoes, roasted vegetables, rice pilaf, salad or bread. You can also store and reheat the leftovers, or freeze them for later use. If you like this recipe, you may also like some of Bobby Flay's other recipes, such as his barbecue chicken, macaroni and cheese, potato salad, pumpkin pie or salmon. Try this bobby flay chicken roulade recipe today and let us know what you think!

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/FULL TI-SmartView (Trial) With CRACK.md b/spaces/bioriAsaeru/text-to-voice/FULL TI-SmartView (Trial) With CRACK.md deleted file mode 100644 index c7ea0271360fa3a9af03c24846a3b86b967e4057..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/FULL TI-SmartView (Trial) With CRACK.md +++ /dev/null @@ -1,6 +0,0 @@ -

FULL TI-SmartView (Trial) with CRACK


Download Filehttps://urloso.com/2uyRI1



-
-ti-smartview (trial) with the "Instant View" function that automatically activates this function with every video stream, in real-time. The "Instant View" can be accessed by entering the code "InstantView" from the "Introduction" menu and pressing the "Button" "C" at the bottom of the screen when the display is 8a78ff9644
-
-
-

diff --git a/spaces/bioriAsaeru/text-to-voice/Italian Movie Download Transformers REPACK.md b/spaces/bioriAsaeru/text-to-voice/Italian Movie Download Transformers REPACK.md deleted file mode 100644 index 62665ec4b141ed753351cac532fcb50f1eb06dc2..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Italian Movie Download Transformers REPACK.md +++ /dev/null @@ -1,5 +0,0 @@ -
-

Jazz appears as downloadable content for the video game based on the second movie, Revenge of the Fallen for both multiplayer and campaign modes. He is voiced by Nolan North, who also voices Sideswipe.

-

italian movie download Transformers


Download ✶✶✶ https://urloso.com/2uyOgs



aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/data/datasets/builtin.py b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/data/datasets/builtin.py deleted file mode 100644 index 7572cd6abc550fdce9d1fd079a7af4870de303bb..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/data/datasets/builtin.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from .chimpnsee import register_dataset as register_chimpnsee_dataset -from .coco import BASE_DATASETS as BASE_COCO_DATASETS -from .coco import DATASETS as COCO_DATASETS -from .coco import register_datasets as register_coco_datasets -from .lvis import DATASETS as LVIS_DATASETS -from .lvis import register_datasets as register_lvis_datasets - -DEFAULT_DATASETS_ROOT = "datasets" - - -register_coco_datasets(COCO_DATASETS, DEFAULT_DATASETS_ROOT) -register_coco_datasets(BASE_COCO_DATASETS, DEFAULT_DATASETS_ROOT) -register_lvis_datasets(LVIS_DATASETS, DEFAULT_DATASETS_ROOT) - -register_chimpnsee_dataset(DEFAULT_DATASETS_ROOT) # pyre-ignore[19] diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/evaluation/mesh_alignment_evaluator.py b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/evaluation/mesh_alignment_evaluator.py deleted file mode 100644 index 9d67c1a88a56332fb708c4618a34e96900926083..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/evaluation/mesh_alignment_evaluator.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved - -import json -import logging -from typing import List, Optional -import torch -from torch import nn - -from detectron2.utils.file_io import PathManager - -from densepose.structures.mesh import create_mesh - - -class MeshAlignmentEvaluator: - """ - Class for evaluation of 3D mesh alignment based on the learned vertex embeddings - """ - - def __init__(self, embedder: nn.Module, mesh_names: Optional[List[str]]): - self.embedder = embedder - # use the provided mesh names if not None and not an empty list - self.mesh_names = mesh_names if mesh_names else embedder.mesh_names - self.logger = logging.getLogger(__name__) - with PathManager.open( - "https://dl.fbaipublicfiles.com/densepose/data/cse/mesh_keyvertices_v0.json", "r" - ) as f: - self.mesh_keyvertices = json.load(f) - - def evaluate(self): - ge_per_mesh = {} - gps_per_mesh = {} - for mesh_name_1 in self.mesh_names: - avg_errors = [] - avg_gps = [] - embeddings_1 = self.embedder(mesh_name_1) - keyvertices_1 = self.mesh_keyvertices[mesh_name_1] - keyvertex_names_1 = list(keyvertices_1.keys()) - keyvertex_indices_1 = [keyvertices_1[name] for name in keyvertex_names_1] - for mesh_name_2 in self.mesh_names: - if mesh_name_1 == mesh_name_2: - continue - embeddings_2 = self.embedder(mesh_name_2) - keyvertices_2 = self.mesh_keyvertices[mesh_name_2] - sim_matrix_12 = embeddings_1[keyvertex_indices_1].mm(embeddings_2.T) - vertices_2_matching_keyvertices_1 = sim_matrix_12.argmax(axis=1) - mesh_2 = create_mesh(mesh_name_2, embeddings_2.device) - geodists = mesh_2.geodists[ - vertices_2_matching_keyvertices_1, - [keyvertices_2[name] for name in keyvertex_names_1], - ] - Current_Mean_Distances = 0.255 - gps = (-(geodists**2) / (2 * (Current_Mean_Distances**2))).exp() - avg_errors.append(geodists.mean().item()) - avg_gps.append(gps.mean().item()) - - ge_mean = torch.as_tensor(avg_errors).mean().item() - gps_mean = torch.as_tensor(avg_gps).mean().item() - ge_per_mesh[mesh_name_1] = ge_mean - gps_per_mesh[mesh_name_1] = gps_mean - ge_mean_global = torch.as_tensor(list(ge_per_mesh.values())).mean().item() - gps_mean_global = torch.as_tensor(list(gps_per_mesh.values())).mean().item() - per_mesh_metrics = { - "GE": ge_per_mesh, - "GPS": gps_per_mesh, - } - return ge_mean_global, gps_mean_global, per_mesh_metrics diff --git a/spaces/buzzChukomi/sd_grad/app.py b/spaces/buzzChukomi/sd_grad/app.py deleted file mode 100644 index c877947be2035a8d389ed494ad2d125de5ed2edb..0000000000000000000000000000000000000000 --- a/spaces/buzzChukomi/sd_grad/app.py +++ /dev/null @@ -1,139 +0,0 @@ -import replicate -import gradio as gr -from os import getenv - -model = replicate.models.get("stability-ai/stable-diffusion") -uScaler = replicate.models.get("nightmareai/real-esrgan") - -def txt2img(_prompt, - _seed=None, - _width=512, - _height=512, - _num_inference_steps=50, - _guidance_scale=7.5, - _imgLink=None, - _prompt_strength=0.5, - ): - - if _seed==0: - _seed=None - - if _imgLink !="" : - output_img = model.predict(prompt = _prompt, - seed = _seed, - width =_width, - height =_height, - num_inference_steps = _num_inference_steps, - guidance_scale=_guidance_scale, - init_image=_imgLink, - prompt_strength = _prompt_strength, - )[0] - - else: - output_img = model.predict(prompt = _prompt, - seed = _seed, - width =_width, - height =_height, - num_inference_steps = _num_inference_steps, - guidance_scale=_guidance_scale - )[0] - - - return output_img - -def pass2up(imgInput): - scaledImg = imgInput - return scaledImg - -def upscale(imgInput, scaleInput, faceInput): - new_data = gr.Image.postprocess(self=imgInput,y=imgInput) - scaledImg = uScaler.predict(image=new_data,scale=scaleInput,face_enhance=faceInput) - return scaledImg - -def cancelPending(): - pr = replicate.predictions.list() - print("STARTING") - for p in pr: - if p.status == 'starting': - print(p.id) - p.cancel() - p.reload() - return None - -def same_auth(username, password): - if username is None: - return False - elif username=="": - return False - elif username!=getenv("SPACE_PASS"): - return False - else: - return username == password - -app1 = gr.Blocks(title="AI Workshop", css=""" - body {background-color: #0b0f19;} - .gradio-container{background-color: #0b0f19;} - .bg-gray-50{background-color: #111827;} - .bg-gray-200{background-color: #111827;} - .gr-box{ - background-color: #1f2937; - color: #f3ffff; - border-color: #374151;} - .text-gray-500{color:#f0ffff;} - .bg-white{background-color:#f0f8ff00; color:#f0ffff;} - .border-gray-200{border-color:#374151;} - .border-b-2{border-color:#374151;} - #thetitle {color: #ee7430; text-align: center;} - .h-60 {height: 35rem;} - """) - -with app1: - gr.Markdown("Stable Diffusion and Upscaler",elem_id="thetitle") - with gr.Tab("Stable Diffusion"): - with gr.Row(): - with gr.Column(): - promptIN = gr.Textbox(label="text prompt", placeholder="write something like bird in a boat") - seedIN = gr.Number(label="seed - leave it blank for random", value=None) - with gr.Row(): - widthIN = gr.Dropdown(label='width', choices=[128,256,512],value=512) - heightIN = gr.Dropdown(label='height', choices=[128,256,512],value=512) - stepsIN = gr.Slider( 5, 100, label='steps', step=1, value=50) - scaleIN = gr.Slider( 1, 20, label='prompt_guidance_scale', value=7.5) - imgLinkIN = gr.Textbox(label="link to image", placeholder="use URL link to image to guide the AI") - imgStrIN = gr.Slider( 0, 1, label='image_strength', value=0.5) - imgDisplay = gr.Image(visible=False) - with gr.Column(): - imgOUT = gr.Image(shape=[512,512],interactive=False) - submitBTN = gr.Button("Submit", variant='primary') - with gr.Tab("Upscaler"): - with gr.Row(): - with gr.Column(): - upImgIN = gr.Image() - scaleValIn = gr.Slider( 1, 4, label='scale', value=2) - faceIn = gr.Checkbox(label="Face Enhance", value=False) - upBtn = gr.Button("Upscale", variant='primary') - with gr.Column(): - upImgOUT = gr.Image(shape=[512,512]) - with gr.Tab("Service"): - gr.Markdown("This is a service tab - prssing the bellow button will try to unstuck the server by canceling all started prompts") - cancelBTN = gr.Button("Cancell Pending", variant='secondary') - - def url2img(_url): - if _url is not None: - if _url != "" and _url != '': - return {imgDisplay: gr.update(value=_url, visible=True)} - else: - return {imgDisplay: gr.update(value=None, visible=False)} - else: - return {imgDisplay: gr.update(value=None, visible=False)} - - submitBTN.click(fn=txt2img,inputs=[promptIN, seedIN, widthIN, heightIN, stepsIN, scaleIN, imgLinkIN, imgStrIN], outputs=imgOUT) - imgOUT.change(fn=pass2up,inputs=imgOUT,outputs=upImgIN) - imgLinkIN.change(fn=url2img, inputs=imgLinkIN, outputs=imgDisplay) - - cancelBTN.click(fn=cancelPending, inputs=None, outputs=None) - - upBtn.click(fn=upscale, inputs=[upImgIN, scaleValIn, faceIn], outputs=upImgOUT) - -#app1.launch(debug=True, show_error=True, auth=same_auth, enable_queue=False) -app1.launch(debug=False, show_error=True) \ No newline at end of file diff --git a/spaces/caffeinum/VToonify/vtoonify/model/bisenet/README.md b/spaces/caffeinum/VToonify/vtoonify/model/bisenet/README.md deleted file mode 100644 index 849d55e2789c8852e01707d1ff755dc74e63a7f5..0000000000000000000000000000000000000000 --- a/spaces/caffeinum/VToonify/vtoonify/model/bisenet/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# face-parsing.PyTorch - -

- - - -

- -### Contents -- [Training](#training) -- [Demo](#Demo) -- [References](#references) - -## Training - -1. Prepare training data: - -- download [CelebAMask-HQ dataset](https://github.com/switchablenorms/CelebAMask-HQ) - - -- change file path in the `prepropess_data.py` and run -```Shell -python prepropess_data.py -``` - -2. Train the model using CelebAMask-HQ dataset: -Just run the train script: -``` - $ CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node=2 train.py -``` - -If you do not wish to train the model, you can download [our pre-trained model](https://drive.google.com/open?id=154JgKpzCPW82qINcVieuPH3fZ2e0P812) and save it in `res/cp`. - - -## Demo -1. Evaluate the trained model using: -```Shell -# evaluate using GPU -python test.py -``` - -## Face makeup using parsing maps -[**face-makeup.PyTorch**](https://github.com/zllrunning/face-makeup.PyTorch) - - - - - - - - - - - - - - - - - - - - - - -
 HairLip
Original InputOriginal InputOriginal Input
ColorColorColor
- - -## References -- [BiSeNet](https://github.com/CoinCheung/BiSeNet) \ No newline at end of file diff --git a/spaces/cahya/indonesian-whisperer/app/whisper.py b/spaces/cahya/indonesian-whisperer/app/whisper.py deleted file mode 100644 index c4e84c89c4bcb1e788c15fa5f6113a17919d15a6..0000000000000000000000000000000000000000 --- a/spaces/cahya/indonesian-whisperer/app/whisper.py +++ /dev/null @@ -1,153 +0,0 @@ -import torch -import gradio as gr -from transformers import pipeline -import tempfile -from neon_tts_plugin_coqui import CoquiTTS -from datetime import datetime -import time -import psutil -from mtranslate import translate -from gpuinfo import GPUInfo - - -MODEL_NAME = "cahya/whisper-medium-id" # this always needs to stay in line 8 :D sorry for the hackiness -whisper_models = { - "Indonesian Whisper Medium": { - "name": "cahya/whisper-medium-id", - "pipe": None, - } -} -lang = "id" -title = "Indonesian Whisperer" -description = "Cross Language Speech to Speech (Indonesian/English to 25 other languages) using OpenAI Whisper and Coqui TTS" -info = "This application uses [Indonesian Whisperer Medium](https://huggingface.co/cahya/whisper-medium-id) model" -badge = "https://img.shields.io/badge/Powered%20by-Indonesian%20Whisperer-red" -visitors = "https://visitor-badge.glitch.me/badge?page_id=cahya-hf-indonesian-whisperer" - -languages = { - 'English': 'en', - 'German': 'de', - 'Spanish': 'es', - 'French': 'fr', - 'Portuguese': 'pt', - 'Polish': 'pl', - 'Dutch': 'nl', - 'Swedish': 'sv', - 'Italian': 'it', - 'Finnish': 'fi', - 'Ukrainian': 'uk', - 'Greek': 'el', - 'Czech': 'cs', - 'Romanian': 'ro', - 'Danish': 'da', - 'Hungarian': 'hu', - 'Croatian': 'hr', - 'Bulgarian': 'bg', - 'Lithuanian': 'lt', - 'Slovak': 'sk', - 'Latvian': 'lv', - 'Slovenian': 'sl', - 'Estonian': 'et', - 'Maltese': 'mt' -} - -device = 0 if torch.cuda.is_available() else "cpu" - -for model in whisper_models: - whisper_models[model]["pipe"] = pipeline( - task="automatic-speech-recognition", - model=whisper_models[model]["name"], - chunk_length_s=30, - device=device, - ) - whisper_models[model]["pipe"].model.config.forced_decoder_ids = \ - whisper_models[model]["pipe"].tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe") - - -def transcribe(pipe, microphone, file_upload): - warn_output = "" - if (microphone is not None) and (file_upload is not None): - warn_output = ( - "WARNING: You've uploaded an audio file and used the microphone. " - "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" - ) - - elif (microphone is None) and (file_upload is None): - return "ERROR: You have to either use the microphone or upload an audio file" - - file = microphone if microphone is not None else file_upload - - text = pipe(file)["text"] - - return warn_output + text - - -LANGUAGES = list(CoquiTTS.langs.keys()) -default_lang = "en" - -coquiTTS = CoquiTTS() - - -def process(language: str, model: str, audio_microphone: str, audio_file: str): - language = languages[language] - pipe = whisper_models[model]["pipe"] - time_start = time.time() - print(f"### {datetime.now()} TTS", language, audio_file) - transcription = transcribe(pipe, audio_microphone, audio_file) - print(f"### {datetime.now()} transcribed:", transcription) - translation = translate(transcription, language, "id") - # return output - with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp: - coquiTTS.get_tts(translation, fp, speaker={"language": language}) - time_end = time.time() - time_diff = time_end - time_start - memory = psutil.virtual_memory() - gpu_utilization, gpu_memory = GPUInfo.gpu_usage() - gpu_utilization = gpu_utilization[0] if len(gpu_utilization) > 0 else 0 - gpu_memory = gpu_memory[0] if len(gpu_memory) > 0 else 0 - system_info = f""" - *Memory: {memory.total / (1024 * 1024 * 1024):.2f}GB, used: {memory.percent}%, available: {memory.available / (1024 * 1024 * 1024):.2f}GB.* - *Processing time: {time_diff:.5} seconds.* - *GPU Utilization: {gpu_utilization}%, GPU Memory: {gpu_memory}MiB.* - """ - print(f"### {datetime.now()} fp.name:", fp.name) - return transcription, translation, fp.name, system_info - - -with gr.Blocks() as blocks: - gr.Markdown("

" - + title - + "

") - gr.Markdown(description) - with gr.Row(): # equal_height=False - with gr.Column(): # variant="panel" - audio_microphone = gr.Audio(label="Microphone", source="microphone", type="filepath", optional=True) - audio_upload = gr.Audio(label="Upload", source="upload", type="filepath", optional=True) - language = gr.Dropdown([lang for lang in languages.keys()], label="Target Language", value="English") - model = gr.Dropdown([model for model in whisper_models.keys()], - label="Whisper Model", value="Indonesian Whisper Medium") - with gr.Row(): # mobile_collapse=False - submit = gr.Button("Submit", variant="primary") - examples = gr.Examples(examples=["data/Jokowi - 2022.mp3", "data/Soekarno - 1963.mp3", "data/JFK.mp3"], - label="Examples", inputs=[audio_upload]) - with gr.Column(): - text_source = gr.Textbox(label="Source Language") - text_target = gr.Textbox(label="Target Language") - audio = gr.Audio(label="Target Audio", interactive=False) - memory = psutil.virtual_memory() - system_info = gr.Markdown(f"*Memory: {memory.total / (1024 * 1024 * 1024):.2f}GB, used: {memory.percent}%, available: {memory.available / (1024 * 1024 * 1024):.2f}GB*") - - gr.Markdown(info) - gr.Markdown("
" - + f'visitors badge' - + f'visitors badge' - + "
") - - # actions - submit.click( - process, - [language, model, audio_microphone, audio_upload], - [text_source, text_target, audio, system_info], - ) - -blocks.launch(server_port=7870) diff --git a/spaces/camenduru-com/audioldm-text-to-audio-generation/audioldm/audio/tools.py b/spaces/camenduru-com/audioldm-text-to-audio-generation/audioldm/audio/tools.py deleted file mode 100644 index 7aca95cc1f5c120568a210907e9506589899a1c6..0000000000000000000000000000000000000000 --- a/spaces/camenduru-com/audioldm-text-to-audio-generation/audioldm/audio/tools.py +++ /dev/null @@ -1,33 +0,0 @@ -import torch -import numpy as np - - -def get_mel_from_wav(audio, _stft): - audio = torch.clip(torch.FloatTensor(audio).unsqueeze(0), -1, 1) - audio = torch.autograd.Variable(audio, requires_grad=False) - melspec, log_magnitudes_stft, energy = _stft.mel_spectrogram(audio) - melspec = torch.squeeze(melspec, 0).numpy().astype(np.float32) - log_magnitudes_stft = ( - torch.squeeze(log_magnitudes_stft, 0).numpy().astype(np.float32) - ) - energy = torch.squeeze(energy, 0).numpy().astype(np.float32) - return melspec, log_magnitudes_stft, energy - - -# def inv_mel_spec(mel, out_filename, _stft, griffin_iters=60): -# mel = torch.stack([mel]) -# mel_decompress = _stft.spectral_de_normalize(mel) -# mel_decompress = mel_decompress.transpose(1, 2).data.cpu() -# spec_from_mel_scaling = 1000 -# spec_from_mel = torch.mm(mel_decompress[0], _stft.mel_basis) -# spec_from_mel = spec_from_mel.transpose(0, 1).unsqueeze(0) -# spec_from_mel = spec_from_mel * spec_from_mel_scaling - -# audio = griffin_lim( -# torch.autograd.Variable(spec_from_mel[:, :, :-1]), _stft._stft_fn, griffin_iters -# ) - -# audio = audio.squeeze() -# audio = audio.cpu().numpy() -# audio_path = out_filename -# write(audio_path, _stft.sampling_rate, audio) diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/docs/notes/benchmarks.md b/spaces/carlosalonso/Detection-video/carpeta_deteccion/docs/notes/benchmarks.md deleted file mode 100644 index b41588daf3a039b9034e80366c2710e90ba3e056..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/docs/notes/benchmarks.md +++ /dev/null @@ -1,196 +0,0 @@ - -# Benchmarks - -Here we benchmark the training speed of a Mask R-CNN in detectron2, -with some other popular open source Mask R-CNN implementations. - - -### Settings - -* Hardware: 8 NVIDIA V100s with NVLink. -* Software: Python 3.7, CUDA 10.1, cuDNN 7.6.5, PyTorch 1.5, - TensorFlow 1.15.0rc2, Keras 2.2.5, MxNet 1.6.0b20190820. -* Model: an end-to-end R-50-FPN Mask-RCNN model, using the same hyperparameter as the - [Detectron baseline config](https://github.com/facebookresearch/Detectron/blob/master/configs/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml) - (it does not have scale augmentation). -* Metrics: We use the average throughput in iterations 100-500 to skip GPU warmup time. - Note that for R-CNN-style models, the throughput of a model typically changes during training, because - it depends on the predictions of the model. Therefore this metric is not directly comparable with - "train speed" in model zoo, which is the average speed of the entire training run. - - -### Main Results - -```eval_rst -+-------------------------------+--------------------+ -| Implementation | Throughput (img/s) | -+===============================+====================+ -| |D2| |PT| | 62 | -+-------------------------------+--------------------+ -| mmdetection_ |PT| | 53 | -+-------------------------------+--------------------+ -| maskrcnn-benchmark_ |PT| | 53 | -+-------------------------------+--------------------+ -| tensorpack_ |TF| | 50 | -+-------------------------------+--------------------+ -| simpledet_ |mxnet| | 39 | -+-------------------------------+--------------------+ -| Detectron_ |C2| | 19 | -+-------------------------------+--------------------+ -| `matterport/Mask_RCNN`__ |TF| | 14 | -+-------------------------------+--------------------+ - -.. _maskrcnn-benchmark: https://github.com/facebookresearch/maskrcnn-benchmark/ -.. _tensorpack: https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN -.. _mmdetection: https://github.com/open-mmlab/mmdetection/ -.. _simpledet: https://github.com/TuSimple/simpledet/ -.. _Detectron: https://github.com/facebookresearch/Detectron -__ https://github.com/matterport/Mask_RCNN/ - -.. |D2| image:: https://github.com/facebookresearch/detectron2/raw/main/.github/Detectron2-Logo-Horz.svg?sanitize=true - :height: 15pt - :target: https://github.com/facebookresearch/detectron2/ -.. |PT| image:: https://pytorch.org/assets/images/logo-icon.svg - :width: 15pt - :height: 15pt - :target: https://pytorch.org -.. |TF| image:: https://static.nvidiagrid.net/ngc/containers/tensorflow.png - :width: 15pt - :height: 15pt - :target: https://tensorflow.org -.. |mxnet| image:: https://github.com/dmlc/web-data/raw/master/mxnet/image/mxnet_favicon.png - :width: 15pt - :height: 15pt - :target: https://mxnet.apache.org/ -.. |C2| image:: https://caffe2.ai/static/logo.svg - :width: 15pt - :height: 15pt - :target: https://caffe2.ai -``` - - -Details for each implementation: - -* __Detectron2__: with release v0.1.2, run: - ``` - python tools/train_net.py --config-file configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml --num-gpus 8 - ``` - -* __mmdetection__: at commit `b0d845f`, run - ``` - ./tools/dist_train.sh configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py 8 - ``` - -* __maskrcnn-benchmark__: use commit `0ce8f6f` with `sed -i 's/torch.uint8/torch.bool/g' **/*.py; sed -i 's/AT_CHECK/TORCH_CHECK/g' **/*.cu` - to make it compatible with PyTorch 1.5. Then, run training with - ``` - python -m torch.distributed.launch --nproc_per_node=8 tools/train_net.py --config-file configs/e2e_mask_rcnn_R_50_FPN_1x.yaml - ``` - The speed we observed is faster than its model zoo, likely due to different software versions. - -* __tensorpack__: at commit `caafda`, `export TF_CUDNN_USE_AUTOTUNE=0`, then run - ``` - mpirun -np 8 ./train.py --config DATA.BASEDIR=/data/coco TRAINER=horovod BACKBONE.STRIDE_1X1=True TRAIN.STEPS_PER_EPOCH=50 --load ImageNet-R50-AlignPadding.npz - ``` - -* __SimpleDet__: at commit `9187a1`, run - ``` - python detection_train.py --config config/mask_r50v1_fpn_1x.py - ``` - -* __Detectron__: run - ``` - python tools/train_net.py --cfg configs/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml - ``` - Note that many of its ops run on CPUs, therefore the performance is limited. - -* __matterport/Mask_RCNN__: at commit `3deaec`, apply the following diff, `export TF_CUDNN_USE_AUTOTUNE=0`, then run - ``` - python coco.py train --dataset=/data/coco/ --model=imagenet - ``` - Note that many small details in this implementation might be different - from Detectron's standards. - -
- - (diff to make it use the same hyperparameters - click to expand) - - - ```diff - diff --git i/mrcnn/model.py w/mrcnn/model.py - index 62cb2b0..61d7779 100644 - --- i/mrcnn/model.py - +++ w/mrcnn/model.py - @@ -2367,8 +2367,8 @@ class MaskRCNN(): - epochs=epochs, - steps_per_epoch=self.config.STEPS_PER_EPOCH, - callbacks=callbacks, - - validation_data=val_generator, - - validation_steps=self.config.VALIDATION_STEPS, - + #validation_data=val_generator, - + #validation_steps=self.config.VALIDATION_STEPS, - max_queue_size=100, - workers=workers, - use_multiprocessing=True, - diff --git i/mrcnn/parallel_model.py w/mrcnn/parallel_model.py - index d2bf53b..060172a 100644 - --- i/mrcnn/parallel_model.py - +++ w/mrcnn/parallel_model.py - @@ -32,6 +32,7 @@ class ParallelModel(KM.Model): - keras_model: The Keras model to parallelize - gpu_count: Number of GPUs. Must be > 1 - """ - + super().__init__() - self.inner_model = keras_model - self.gpu_count = gpu_count - merged_outputs = self.make_parallel() - diff --git i/samples/coco/coco.py w/samples/coco/coco.py - index 5d172b5..239ed75 100644 - --- i/samples/coco/coco.py - +++ w/samples/coco/coco.py - @@ -81,7 +81,10 @@ class CocoConfig(Config): - IMAGES_PER_GPU = 2 - - # Uncomment to train on 8 GPUs (default is 1) - - # GPU_COUNT = 8 - + GPU_COUNT = 8 - + BACKBONE = "resnet50" - + STEPS_PER_EPOCH = 50 - + TRAIN_ROIS_PER_IMAGE = 512 - - # Number of classes (including background) - NUM_CLASSES = 1 + 80 # COCO has 80 classes - @@ -496,29 +499,10 @@ if __name__ == '__main__': - # *** This training schedule is an example. Update to your needs *** - - # Training - Stage 1 - - print("Training network heads") - model.train(dataset_train, dataset_val, - learning_rate=config.LEARNING_RATE, - epochs=40, - - layers='heads', - - augmentation=augmentation) - - - - # Training - Stage 2 - - # Finetune layers from ResNet stage 4 and up - - print("Fine tune Resnet stage 4 and up") - - model.train(dataset_train, dataset_val, - - learning_rate=config.LEARNING_RATE, - - epochs=120, - - layers='4+', - - augmentation=augmentation) - - - - # Training - Stage 3 - - # Fine tune all layers - - print("Fine tune all layers") - - model.train(dataset_train, dataset_val, - - learning_rate=config.LEARNING_RATE / 10, - - epochs=160, - - layers='all', - + layers='3+', - augmentation=augmentation) - - elif args.command == "evaluate": - ``` - -
diff --git a/spaces/cccc-c/bingo/src/components/chat-message.tsx b/spaces/cccc-c/bingo/src/components/chat-message.tsx deleted file mode 100644 index bf272d8d7005cfd06c53bd213e09ea217e803549..0000000000000000000000000000000000000000 --- a/spaces/cccc-c/bingo/src/components/chat-message.tsx +++ /dev/null @@ -1,93 +0,0 @@ -import remarkGfm from 'remark-gfm' -import remarkMath from 'remark-math' -import supersub from 'remark-supersub' -import remarkBreaks from 'remark-breaks' -import { cn } from '@/lib/utils' -import { CodeBlock } from '@/components/ui/codeblock' -import { MemoizedReactMarkdown } from '@/components/markdown' -import { LearnMore } from './learn-more' -import { ChatMessageModel } from '@/lib/bots/bing/types' -import { useEffect } from 'react' -import { TurnCounter } from './turn-counter' - -export interface ChatMessageProps { - message: ChatMessageModel -} - -export function ChatMessage({ message, ...props }: ChatMessageProps) { - useEffect(() => { - if (document.body.scrollHeight - window.innerHeight - window.scrollY - 200 < 0) { - window.scrollBy(0, 200) - } - }, [message.text]) - - return message.text ? ( -
-
- {obj.alt} - } - } catch (e) { - } - return {obj.alt} - }, - p({ children }) { - return

{children}

- }, - code({ node, inline, className, children, ...props }) { - if (children.length) { - if (children[0] == '▍') { - return ( - - ) - } - - children[0] = (children[0] as string).replace('`▍`', '▍') - } - - const match = /language-(\w+)/.exec(className || '') - - if (inline) { - return ( - - {children} - - ) - } - - return ( - - ) - } - }} - > - {message.text} -
-
-
- {message.author === 'bot' && } - {message.author === 'bot' && } -
-
- ) : null -} diff --git a/spaces/chasemcdo/hf_localai/examples/langchain-python/README.md b/spaces/chasemcdo/hf_localai/examples/langchain-python/README.md deleted file mode 100644 index aeff6c48a5dcb2c12b6f88d5632f441ab8b3552a..0000000000000000000000000000000000000000 --- a/spaces/chasemcdo/hf_localai/examples/langchain-python/README.md +++ /dev/null @@ -1,29 +0,0 @@ -## Langchain-python - -Langchain example from [quickstart](https://python.langchain.com/en/latest/getting_started/getting_started.html). - -To interact with langchain, you can just set the `OPENAI_API_BASE` URL and provide a token with a random string. - -See the example below: - -``` -# Clone LocalAI -git clone https://github.com/go-skynet/LocalAI - -cd LocalAI/examples/langchain-python - -# start with docker-compose -docker-compose up --pull always - -pip install langchain -pip install openai - -export OPENAI_API_BASE=http://localhost:8080 -# Note: **OPENAI_API_KEY** is not required. However the library might fail if no API_KEY is passed by, so an arbitrary string can be used. -export OPENAI_API_KEY=sk- - -python test.py -# A good company name for a company that makes colorful socks would be "Colorsocks". - -python agent.py -``` \ No newline at end of file diff --git a/spaces/chilge/taoli/data_utils.py b/spaces/chilge/taoli/data_utils.py deleted file mode 100644 index 9dfba4a9dfbfbd2b6ed5e771a5ffee4f70419ba3..0000000000000000000000000000000000000000 --- a/spaces/chilge/taoli/data_utils.py +++ /dev/null @@ -1,152 +0,0 @@ -import time -import os -import random -import numpy as np -import torch -import torch.utils.data - -import commons -from mel_processing import spectrogram_torch, spec_to_mel_torch -from utils import load_wav_to_torch, load_filepaths_and_text, transform - -# import h5py - - -"""Multi speaker version""" - - -class TextAudioSpeakerLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths, hparams): - self.audiopaths = load_filepaths_and_text(audiopaths) - self.max_wav_value = hparams.data.max_wav_value - self.sampling_rate = hparams.data.sampling_rate - self.filter_length = hparams.data.filter_length - self.hop_length = hparams.data.hop_length - self.win_length = hparams.data.win_length - self.sampling_rate = hparams.data.sampling_rate - self.use_sr = hparams.train.use_sr - self.spec_len = hparams.train.max_speclen - self.spk_map = hparams.spk - - random.seed(1234) - random.shuffle(self.audiopaths) - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - - spk = filename.split(os.sep)[-2] - spk = torch.LongTensor([self.spk_map[spk]]) - - c = torch.load(filename + ".soft.pt").squeeze(0) - c = torch.repeat_interleave(c, repeats=2, dim=1) - - f0 = np.load(filename + ".f0.npy") - f0 = torch.FloatTensor(f0) - lmin = min(c.size(-1), spec.size(-1), f0.shape[0]) - assert abs(c.size(-1) - spec.size(-1)) < 4, (c.size(-1), spec.size(-1), f0.shape, filename) - assert abs(lmin - spec.size(-1)) < 4, (c.size(-1), spec.size(-1), f0.shape) - assert abs(lmin - c.size(-1)) < 4, (c.size(-1), spec.size(-1), f0.shape) - spec, c, f0 = spec[:, :lmin], c[:, :lmin], f0[:lmin] - audio_norm = audio_norm[:, :lmin * self.hop_length] - _spec, _c, _audio_norm, _f0 = spec, c, audio_norm, f0 - while spec.size(-1) < self.spec_len: - spec = torch.cat((spec, _spec), -1) - c = torch.cat((c, _c), -1) - f0 = torch.cat((f0, _f0), -1) - audio_norm = torch.cat((audio_norm, _audio_norm), -1) - start = random.randint(0, spec.size(-1) - self.spec_len) - end = start + self.spec_len - spec = spec[:, start:end] - c = c[:, start:end] - f0 = f0[start:end] - audio_norm = audio_norm[:, start * self.hop_length:end * self.hop_length] - - return c, f0, spec, audio_norm, spk - - def __getitem__(self, index): - return self.get_audio(self.audiopaths[index][0]) - - def __len__(self): - return len(self.audiopaths) - - -class EvalDataLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths, hparams): - self.audiopaths = load_filepaths_and_text(audiopaths) - self.max_wav_value = hparams.data.max_wav_value - self.sampling_rate = hparams.data.sampling_rate - self.filter_length = hparams.data.filter_length - self.hop_length = hparams.data.hop_length - self.win_length = hparams.data.win_length - self.sampling_rate = hparams.data.sampling_rate - self.use_sr = hparams.train.use_sr - self.audiopaths = self.audiopaths[:5] - self.spk_map = hparams.spk - - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - - spk = filename.split(os.sep)[-2] - spk = torch.LongTensor([self.spk_map[spk]]) - - c = torch.load(filename + ".soft.pt").squeeze(0) - - c = torch.repeat_interleave(c, repeats=2, dim=1) - - f0 = np.load(filename + ".f0.npy") - f0 = torch.FloatTensor(f0) - lmin = min(c.size(-1), spec.size(-1), f0.shape[0]) - assert abs(c.size(-1) - spec.size(-1)) < 4, (c.size(-1), spec.size(-1), f0.shape) - assert abs(f0.shape[0] - spec.shape[-1]) < 4, (c.size(-1), spec.size(-1), f0.shape) - spec, c, f0 = spec[:, :lmin], c[:, :lmin], f0[:lmin] - audio_norm = audio_norm[:, :lmin * self.hop_length] - - return c, f0, spec, audio_norm, spk - - def __getitem__(self, index): - return self.get_audio(self.audiopaths[index][0]) - - def __len__(self): - return len(self.audiopaths) - diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ImageSequence.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ImageSequence.py deleted file mode 100644 index c4bb6334acfde7d245c5bb1722b7c2381661e4ca..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ImageSequence.py +++ /dev/null @@ -1,76 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# sequence support classes -# -# history: -# 1997-02-20 fl Created -# -# Copyright (c) 1997 by Secret Labs AB. -# Copyright (c) 1997 by Fredrik Lundh. -# -# See the README file for information on usage and redistribution. -# - -## - - -class Iterator: - """ - This class implements an iterator object that can be used to loop - over an image sequence. - - You can use the ``[]`` operator to access elements by index. This operator - will raise an :py:exc:`IndexError` if you try to access a nonexistent - frame. - - :param im: An image object. - """ - - def __init__(self, im): - if not hasattr(im, "seek"): - msg = "im must have seek method" - raise AttributeError(msg) - self.im = im - self.position = getattr(self.im, "_min_frame", 0) - - def __getitem__(self, ix): - try: - self.im.seek(ix) - return self.im - except EOFError as e: - raise IndexError from e # end of sequence - - def __iter__(self): - return self - - def __next__(self): - try: - self.im.seek(self.position) - self.position += 1 - return self.im - except EOFError as e: - raise StopIteration from e - - -def all_frames(im, func=None): - """ - Applies a given function to all frames in an image or a list of images. - The frames are returned as a list of separate images. - - :param im: An image, or a list of images. - :param func: The function to apply to all of the image frames. - :returns: A list of images. - """ - if not isinstance(im, list): - im = [im] - - ims = [] - for imSequence in im: - current = imSequence.tell() - - ims += [im_frame.copy() for im_frame in Iterator(imSequence)] - - imSequence.seek(current) - return [func(im) for im in ims] if func else ims diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/components/code.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/components/code.py deleted file mode 100644 index 30134c8ead562d36316260a645f0adf5fd4c9125..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/components/code.py +++ /dev/null @@ -1,157 +0,0 @@ -"""gr.Code() component""" - -from __future__ import annotations - -from typing import Literal - -from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import StringSerializable - -from gradio.components.base import IOComponent, _Keywords -from gradio.events import Changeable, Inputable - -set_documentation_group("component") - - -@document("languages") -class Code(Changeable, Inputable, IOComponent, StringSerializable): - """ - Creates a Code editor for entering, editing or viewing code. - Preprocessing: passes a {str} of code into the function. - Postprocessing: expects the function to return a {str} of code or a single-elment {tuple}: (string filepath,) - """ - - languages = [ - "python", - "markdown", - "json", - "html", - "css", - "javascript", - "typescript", - "yaml", - "dockerfile", - "shell", - "r", - None, - ] - - def __init__( - self, - value: str | tuple[str] | None = None, - language: Literal[ - "python", - "markdown", - "json", - "html", - "css", - "javascript", - "typescript", - "yaml", - "dockerfile", - "shell", - "r", - ] - | None = None, - *, - lines: int = 5, - label: str | None = None, - interactive: bool | None = None, - show_label: bool = True, - container: bool = True, - scale: int | None = None, - min_width: int = 160, - visible: bool = True, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - **kwargs, - ): - """ - Parameters: - value: Default value to show in the code editor. If callable, the function will be called whenever the app loads to set the initial value of the component. - language: The language to display the code as. Supported languages listed in `gr.Code.languages`. - label: component name in interface. - interactive: Whether user should be able to enter code or only view it. - show_label: if True, will display label. - container: If True, will place the component in a container - providing some extra padding around the border. - scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer. - min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - assert language in Code.languages, f"Language {language} not supported." - self.language = language - self.lines = lines - IOComponent.__init__( - self, - label=label, - interactive=interactive, - show_label=show_label, - container=container, - scale=scale, - min_width=min_width, - visible=visible, - elem_id=elem_id, - elem_classes=elem_classes, - value=value, - **kwargs, - ) - - def get_config(self): - return { - "value": self.value, - "language": self.language, - "lines": self.lines, - **IOComponent.get_config(self), - } - - def postprocess(self, y): - if y is None: - return None - elif isinstance(y, tuple): - with open(y[0]) as file_data: - return file_data.read() - else: - return y.strip() - - @staticmethod - def update( - value: str - | tuple[str] - | None - | Literal[_Keywords.NO_VALUE] = _Keywords.NO_VALUE, - label: str | None = None, - show_label: bool | None = None, - container: bool | None = None, - scale: int | None = None, - min_width: int | None = None, - visible: bool | None = None, - language: Literal[ - "python", - "markdown", - "json", - "html", - "css", - "javascript", - "typescript", - "yaml", - "dockerfile", - "shell", - "r", - ] - | None = None, - interactive: bool | None = None, - ): - return { - "label": label, - "show_label": show_label, - "container": container, - "scale": scale, - "min_width": min_width, - "visible": visible, - "value": value, - "language": language, - "interactive": interactive, - "__type__": "update", - } diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-061f1fcf.js b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-061f1fcf.js deleted file mode 100644 index c0819170490aaf0cd1712493a2acc4b6382c21a8..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-061f1fcf.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as z,e as E,s as K,a9 as L,N as p,P,O as B,K as q,L as k,U as j,p as w,M as v,Q,R,ab as M,ac as N,ad as O,z as g,v as b,A,k as C,o as h,x as S,E as T,ae as U,q as D,r as F}from"./index-f877dfd5.js";import{B as G}from"./Button-11a87b79.js";import{C as H}from"./Column-824a6363.js";/* empty css */function I(a){let e,l,t,s,o,u,n,f,d,_;const r=a[3].default,c=L(r,a,a[2],null);return{c(){e=p("div"),l=p("span"),t=P(a[1]),s=B(),o=p("span"),o.textContent="▼",u=B(),n=p("div"),c&&c.c(),q(l,"class","svelte-s1r2yt"),q(o,"class","icon svelte-s1r2yt"),k(o,"transform",a[0]?"rotate(0)":"rotate(90deg)"),q(e,"class","label-wrap svelte-s1r2yt"),j(e,"open",a[0]),k(n,"display",a[0]?"block":"none")},m(i,m){w(i,e,m),v(e,l),v(l,t),v(e,s),v(e,o),w(i,u,m),w(i,n,m),c&&c.m(n,null),f=!0,d||(_=Q(e,"click",a[4]),d=!0)},p(i,[m]){(!f||m&2)&&R(t,i[1]),m&1&&k(o,"transform",i[0]?"rotate(0)":"rotate(90deg)"),(!f||m&1)&&j(e,"open",i[0]),c&&c.p&&(!f||m&4)&&M(c,r,i,i[2],f?O(r,i[2],m,null):N(i[2]),null),m&1&&k(n,"display",i[0]?"block":"none")},i(i){f||(g(c,i),f=!0)},o(i){b(c,i),f=!1},d(i){i&&(A(e),A(u),A(n)),c&&c.d(i),d=!1,_()}}}function J(a,e,l){let{$$slots:t={},$$scope:s}=e,{label:o=""}=e,{open:u=!0}=e;const n=()=>l(0,u=!u);return a.$$set=f=>{"label"in f&&l(1,o=f.label),"open"in f&&l(0,u=f.open),"$$scope"in f&&l(2,s=f.$$scope)},[u,o,s,t,n]}class V extends z{constructor(e){super(),E(this,e,J,I,K,{label:1,open:0})}}function W(a){let e;const l=a[6].default,t=L(l,a,a[7],null);return{c(){t&&t.c()},m(s,o){t&&t.m(s,o),e=!0},p(s,o){t&&t.p&&(!e||o&128)&&M(t,l,s,s[7],e?O(l,s[7],o,null):N(s[7]),null)},i(s){e||(g(t,s),e=!0)},o(s){b(t,s),e=!1},d(s){t&&t.d(s)}}}function X(a){let e,l;return e=new H({props:{$$slots:{default:[W]},$$scope:{ctx:a}}}),{c(){C(e.$$.fragment)},m(t,s){h(e,t,s),l=!0},p(t,s){const o={};s&128&&(o.$$scope={dirty:s,ctx:t}),e.$set(o)},i(t){l||(g(e.$$.fragment,t),l=!0)},o(t){b(e.$$.fragment,t),l=!1},d(t){S(e,t)}}}function Y(a){let e,l,t,s;const o=[a[5]];let u={};for(let n=0;n{"label"in r&&l(0,o=r.label),"elem_id"in r&&l(1,u=r.elem_id),"elem_classes"in r&&l(2,n=r.elem_classes),"visible"in r&&l(3,f=r.visible),"open"in r&&l(4,d=r.open),"loading_status"in r&&l(5,_=r.loading_status),"$$scope"in r&&l(7,s=r.$$scope)},[o,u,n,f,d,_,t,s]}class y extends z{constructor(e){super(),E(this,e,$,Z,K,{label:0,elem_id:1,elem_classes:2,visible:3,open:4,loading_status:5})}}const le=y,ne=["static"];export{le as Component,ne as modes}; -//# sourceMappingURL=index-061f1fcf.js.map diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacps_float.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacps_float.c deleted file mode 100644 index 73259c10fbe7fafd62f6143f6da7c99be6e502a2..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacps_float.c +++ /dev/null @@ -1,24 +0,0 @@ -/* - * MPEG-4 Parametric Stereo decoding functions - * Copyright (c) 2010 Alex Converse - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#define USE_FIXED 0 - -#include "aacps.c" diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/lossless_audiodsp.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/lossless_audiodsp.h deleted file mode 100644 index eea5d49fa9ba08dd80fb69a861406f41baeb0569..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/lossless_audiodsp.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Monkey's Audio lossless audio decoder - * Copyright (c) 2007 Benjamin Zores - * based upon libdemac from Dave Chapman. - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_LOSSLESS_AUDIODSP_H -#define AVCODEC_LOSSLESS_AUDIODSP_H - -#include - -typedef struct LLAudDSPContext { - /** - * Calculate scalar product of v1 and v2, - * and v1[i] += v3[i] * mul - * @param len length of vectors, should be multiple of 16, - * or padd v3 and v1 or v2 with zeros. - */ - int32_t (*scalarproduct_and_madd_int16)(int16_t *v1 /* align 16 */, - const int16_t *v2, - const int16_t *v3, - int len, int mul); - - int32_t (*scalarproduct_and_madd_int32)(int16_t *v1 /* align 16 */, - const int32_t *v2, - const int16_t *v3, - int len, int mul); -} LLAudDSPContext; - -void ff_llauddsp_init(LLAudDSPContext *c); -void ff_llauddsp_init_arm(LLAudDSPContext *c); -void ff_llauddsp_init_ppc(LLAudDSPContext *c); -void ff_llauddsp_init_x86(LLAudDSPContext *c); - -#endif /* AVCODEC_LOSSLESS_AUDIODSP_H */ diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download and Watch Facebook Videos Offline in Full HD 1080p with This APK.md b/spaces/congsaPfin/Manga-OCR/logs/Download and Watch Facebook Videos Offline in Full HD 1080p with This APK.md deleted file mode 100644 index 219da9f309cda17b937e9b4f40e36583123082c1..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download and Watch Facebook Videos Offline in Full HD 1080p with This APK.md +++ /dev/null @@ -1,107 +0,0 @@ - -

How to Download Facebook Videos in 1080p HD Quality with APK

-

Facebook is one of the most popular social media platforms in the world, with billions of users and millions of videos uploaded every day. You may have come across some amazing videos on Facebook that you want to save on your device, share with your friends, or edit for your own projects. But how can you download Facebook videos in high quality, especially in 1080p HD resolution?

-

In this article, we will show you how to download Facebook videos in 1080p HD quality with APK files. APK files are Android application packages that can be installed on your Android device without using the Google Play Store. They are useful for accessing apps that are not available in your region, or for getting the latest updates before they are officially released.

-

download facebook videos 1080p apk


DOWNLOAD » https://urlca.com/2uO9xG



-

We will also introduce you to one of the best APKs for downloading Facebook videos, SnapSave.App, which allows you to download any Facebook video in full HD, 2K, or 4K quality with just a few clicks. Let's get started!

-

Why Download Facebook Videos?

-

There are many reasons why you may want to download Facebook videos to your device. Here are some of them:

-

Save your favorite videos offline

-

Some Facebook videos are so entertaining, informative, or inspiring that you want to watch them again and again. However, watching online videos can consume a lot of data and battery power, and you may not always have a stable internet connection. By downloading Facebook videos to your device, you can enjoy them anytime and anywhere, without worrying about buffering or loading issues.

-

Share videos with others without internet

-

Sometimes, you may want to share a Facebook video with someone who does not have access to the internet, or who is not on Facebook. For example, you may want to show a funny video to your grandma, or a motivational video to your colleague. By downloading Facebook videos to your device, you can easily share them with others via Bluetooth, Wi-Fi Direct, or other offline methods.

-

Edit videos for your own purposes

-

If you are a creative person, you may want to use some Facebook videos as raw materials for your own projects. For example, you may want to make a collage of different videos, add some music or subtitles, or create a parody or remix. By downloading Facebook videos to your device, you can edit them with any video editing app or software that you prefer.

-

How to Download Facebook Videos with APK?

-

Now that you know why downloading Facebook videos is useful, let's see how you can do it with APK files. First, you need to understand what an APK file is and how to install it on your device.

-

What is an APK file?

-

An APK file is an Android application package that contains all the files and code needed to run an app on an Android device. It is similar to an EXE file on Windows or a DMG file on Mac. You can download APK files from various websites that offer them, such as APKPure, APKMirror, or Uptodown. However, be careful when downloading APK files from unknown sources, as they may contain malware or viruses that can harm your device or steal your data. Always scan the APK files with a reliable antivirus app before installing them.

-

How to install an APK file on your device?

-

To install an APK file on your device, you need to follow these steps:

-

How to download facebook videos in full hd 1080p with apk
-Best facebook video downloader apk for android 1080p
-Download facebook videos online free 1080p apk
-Facebook video download full hd 1080p apk for pc
-Facebook video downloader 4k apk - download facebook videos in 4k resolution
-Download facebook live videos 1080p apk
-Facebook video download app for iphone - download facebook videos in 1080p
-Download private facebook videos 1080p apk
-Facebook video downloader chrome extension - download facebook videos in 1080p with one click
-Download facebook stories videos 1080p apk
-Facebook video downloader pro apk - download facebook videos in hd, 2k, and 4k
-Download facebook videos without watermark 1080p apk
-Facebook video download manager apk - download multiple facebook videos in 1080p at once
-Download facebook videos with subtitles 1080p apk
-Facebook video downloader for windows 10 - download facebook videos in 1080p on your pc
-Download facebook videos to mp3 1080p apk
-Facebook video downloader for mac - download facebook videos in 1080p on your macbook
-Download facebook videos to gallery 1080p apk
-Facebook video downloader for firefox - download facebook videos in 1080p with this addon
-Download facebook videos to sd card 1080p apk
-Facebook video downloader for instagram - download facebook videos in 1080p and repost on instagram
-Download facebook videos with sound 1080p apk
-Facebook video downloader for tiktok - download facebook videos in 1080p and make tiktoks with them
-Download facebook videos to camera roll 1080p apk
-Facebook video downloader for whatsapp - download facebook videos in 1080p and share on whatsapp
-Download facebook videos faster 1080p apk
-Facebook video downloader for youtube - download facebook videos in 1080p and upload on youtube
-Download facebook videos without login 1080p apk
-Facebook video downloader for snapchat - download facebook videos in 1080p and snap them to your friends
-Download facebook videos with comments 1080p apk

-
    -
  1. Download the APK file from a trusted source and save it to your device's storage.
  2. -
  3. Go to your device's settings and enable the option to install apps from unknown sources. This may vary depending on your device model and Android version, but you can usually find it under security or privacy settings.
  4. -
  5. Locate the APK file on your device using a file manager app and tap on it to start the installation process. You may need to grant some permissions to the app before it can be installed.
  6. -
  7. Wait for the installation to finish and then launch the app from your app drawer or home screen.
  8. -
-

Congratulations, you have successfully installed an APK file on your device!

-

What are the best APKs for downloading Facebook videos?

-

There are many APKs that claim to help you download Facebook videos, but not all of them are reliable, safe, or effective. Some of them may not work properly, have annoying ads, or require unnecessary permissions. To save you time and hassle, we have tested and selected three of the best APKs for downloading Facebook videos. They are:

-

SnapSave.App

-

This is our top recommendation for downloading Facebook videos in 1080p HD quality or higher. SnapSave.App is a web-based tool that allows you to download any Facebook video with just a few clicks. You don't need to install any app on your device, just visit the website and paste the video link. You can choose from various quality options, including full HD, 2K, and 4K. You can also download videos from other platforms, such as Instagram, YouTube, Twitter, and TikTok. SnapSave.App is fast, easy, and free to use.

-

Video Downloader for Facebook

-

This is another good option for downloading Facebook videos with an APK file. Video Downloader for Facebook is a lightweight and user-friendly app that lets you browse and download Facebook videos directly from the app. You can also copy and paste the video link from any browser or app. You can choose from different quality options, including HD and SD. You can also manage your downloaded videos in a built-in gallery and share them with others. Video Downloader for Facebook is free to use, but it contains ads.

-

HD Video Downloader for Facebook

-

This is a third option for downloading Facebook videos with an APK file. HD Video Downloader for Facebook is a simple and efficient app that helps you download Facebook videos in high quality. You can either use the app's browser to find and download videos, or copy and paste the video link from any source. You can select the desired quality level, including HD and SD. You can also play and delete your downloaded videos within the app. HD Video Downloader for Facebook is free to use, but it also contains ads.

-

How to Use SnapSave.App to Download Facebook Videos in 1080p HD Quality?

-

As we mentioned earlier, SnapSave.App is our favorite tool for downloading Facebook videos in 1080p HD quality or higher. Here are the steps to use it:

-

Step 1: Copy the video link from Facebook

-

To download a Facebook video with SnapSave.App, you need to copy its link first. You can do this by following these steps:

-
    -
  • Open the Facebook app or website and find the video you want to download.
  • -
  • Tap on the three-dot icon at the top right corner of the video post and select "Copy link".
  • -
  • The video link will be copied to your clipboard.
  • -
-

Step 2: Paste the link into SnapSave.App and choose the quality option

-

Once you have copied the video link, you need to paste it into SnapSave.App and choose the quality option. You can do this by following these steps:

-
    -
  • Open a web browser on your device and go to SnapSave.App.
  • -
  • Paste the video link into the input box at the top of the page and tap on "Download".
  • -
  • The tool will analyze the video link and show you various quality options, including full HD (1080p), 2K (1440p), and 4K (2160p).
  • -
  • Select the quality option that suits your needs and tap on "Download".
  • -
-

Step 3: Download the video to your device or share it with others

-

After you have selected the quality option, you can download the video to your device or share it with others. You can do this by following these steps:

-
    -
  • The tool will generate a download link for the video and show it on the page.
  • -
  • Tap on the download link and choose where to save the video on your device. You may need to grant some permissions to the tool to access your storage.
  • -
  • The video will start downloading and you can see the progress on your notification bar.
  • -
  • Once the download is complete, you can open the video with any media player app or share it with others via any method you prefer.
  • -
-

Congratulations, you have successfully downloaded a Facebook video in 1080p HD quality with SnapSave.App!

-

Conclusion

-

In this article, we have shown you how to download Facebook videos in 1080p HD quality with APK files. We have explained what an APK file is, how to install it on your device, and what are the best APKs for downloading Facebook videos. We have also demonstrated how to use SnapSave.App, our favorite tool for downloading Facebook videos in full HD, 2K, or 4K quality. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Happy downloading!

-

FAQs

-

Here are some frequently asked questions about downloading Facebook videos with APK files:

-

Q: Is it legal to download Facebook videos?

-

A: It depends on the source and purpose of the video. Generally, it is legal to download Facebook videos for personal use, as long as you do not violate the terms of service of Facebook or the rights of the video owner. However, it is illegal to download Facebook videos that are protected by copyright or other intellectual property laws, or that contain illegal or harmful content. It is also illegal to download Facebook videos for commercial use, such as selling, distributing, or monetizing them without permission from the video owner.

-

Q: Is it safe to download Facebook videos with APK files?

-

A: It depends on the source and quality of the APK file. Generally, it is safe to download Facebook videos with APK files from trusted and reputable websites, such as SnapSave.App, APKPure, or APKMirror. However, it is unsafe to download Facebook videos with APK files from unknown or suspicious websites, as they may contain malware or viruses that can harm your device or steal your data. Always scan the APK files with a reliable antivirus app before installing them.

-

Q: How can I download Facebook videos without APK files?

-

A: There are other ways to download Facebook videos without APK files, such as using online tools, browser extensions, or desktop software. However, these methods may not be as fast, easy, or effective as using APK files. They may also have some limitations, such as requiring an internet connection, having ads or watermarks, or not supporting high-quality options. Therefore, we recommend using APK files for downloading Facebook videos if you want the best results.

-

Q: How can I download Facebook videos on iOS devices?

-

A: Unfortunately, there is no direct way to download Facebook videos on iOS devices with APK files, as they are only compatible with Android devices. However, you can still download Facebook videos on iOS devices by using other methods, such as using online tools (e.g., SnapSave.App), browser extensions (e.g., Video Downloader Plus), or desktop software (e.g., iTubeGo). You can also use a third-party app store (e.g., TutuApp) to install some apps that can help you download Facebook videos on iOS devices.

-

Q: How can I download Facebook live videos?

-

A: You can download Facebook live videos by using the same methods as downloading regular Facebook videos. However, you need to wait until the live stream is over and the video is available on the video owner's page or profile. Then, you can copy the video link and paste it into any tool or app that can help you download Facebook videos. Alternatively, you can use a screen recorder app (e.g., AZ Screen Recorder) to record the live stream while watching it.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/GTA Vice City The Most Iconic Game of the Grand Theft Auto Series for Windows 7.md b/spaces/congsaPfin/Manga-OCR/logs/GTA Vice City The Most Iconic Game of the Grand Theft Auto Series for Windows 7.md deleted file mode 100644 index 5c031668befdec0b9607a8e6deca718a510cc3cb..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/GTA Vice City The Most Iconic Game of the Grand Theft Auto Series for Windows 7.md +++ /dev/null @@ -1,122 +0,0 @@ - -

How to Download GTA Vice City for Windows 7

-

If you are a fan of open-world action-adventure games, you have probably heard of Grand Theft Auto: Vice City. This game, released in 2002 by Rockstar Games, is one of the most popular and influential titles in the GTA series. It is set in a fictional version of Miami in the 1980s, where you play as Tommy Vercetti, a former mobster who is trying to build his own criminal empire.

-

GTA Vice City is not only a game, but also a cultural phenomenon. It features a rich and immersive story, a vast and diverse map, a variety of vehicles and weapons, a memorable soundtrack, and a lot of humor and satire. It is also one of the most modded games ever, with thousands of fan-made content available online.

-

gta vice city download windows 7


Downloadhttps://urlca.com/2uOcHe



-

But how can you play this classic game on your Windows 7 PC? In this article, we will show you how to download GTA Vice City for Windows 7, what are the requirements for running it smoothly, and some tips and tricks for enhancing your gaming experience. Let's get started!

-

Requirements for GTA Vice City on Windows 7

-

Before you download GTA Vice City for Windows 7, you need to make sure that your PC meets the minimum or recommended system specifications. Here are the requirements for GTA Vice City on Windows 7:

- - - - - - - - -
MinimumRecommended
CPU: Intel Pentium III or AMD Athlon 800 MHzCPU: Intel Pentium IV or AMD Athlon XP 1.2 GHz
RAM: 128 MBRAM: 256 MB
GPU: 32 MB video card with DirectX 9.0 compatible driversGPU: 64 MB video card with DirectX 9.0 compatible drivers
OS: Windows XP or Windows Vista or Windows 7OS: Windows XP or Windows Vista or Windows 7
HDD: 915 MB of free disk spaceHDD: 1.55 GB of free disk space
Sound: DirectX 9.0 compatible sound cardSound: DirectX 9.0 compatible sound card with surround sound
-

As you can see, GTA Vice City does not require a very powerful PC to run. However, if you want to enjoy the game at its best, you should have a PC that meets or exceeds the recommended specifications.

-

Steps to Download GTA Vice City for Windows 7

-

Now that you know the requirements for GTA Vice City on Windows 7, you are ready to download the game. There are two ways to do this:

-

Purchase GTA Vice City from Rockstar Games website or Microsoft Store

-

The first way to download GTA Vice City for Windows 7 is to purchase it from the official Rockstar Games website or from the Microsoft Store. Both options will cost you $9.99, and you will need a valid credit card or PayPal account to make the payment. Once you have purchased the game, you will receive a confirmation email with a download link or a product key.

-

gta vice city definitive edition windows 7
-gta vice city free download for pc windows 7
-gta vice city game download for windows 7
-gta vice city setup download for windows 7
-gta vice city full version download for windows 7
-gta vice city stories download for windows 7
-gta vice city cheats download for windows 7
-gta vice city ultimate download for windows 7
-gta vice city mod download for windows 7
-gta vice city apk download for windows 7
-gta vice city online download for windows 7
-gta vice city deluxe download for windows 7
-gta vice city compressed download for windows 7
-gta vice city hd download for windows 7
-gta vice city rockstar games download for windows 7
-gta vice city remastered download for windows 7
-gta vice city android download for windows 7
-gta vice city trainer download for windows 7
-gta vice city patch download for windows 7
-gta vice city crack download for windows 7
-gta vice city soundtrack download for windows 7
-gta vice city save game download for windows 7
-gta vice city filehippo download for windows 7
-gta vice city ocean of games download for windows 7
-gta vice city highly compressed download for windows 7
-gta vice city graphics mod download for windows 7
-gta vice city radio stations download for windows 7
-gta vice city directx 11 download for windows 7
-gta vice city steam download for windows 7
-gta vice city softonic download for windows 7
-gta vice city pc game free download full version for windows 7
-gta vice city pc game setup free download full version for windows 7
-how to install and play gta vice city on windows 7
-how to fix mouse problem in gta vice city on windows 7
-how to run gta vice city in compatibility mode on windows 7
-how to change language in gta vice city on windows 7
-how to increase graphics in gta vice city on windows 7
-how to use cheats in gta vice city on windows 7
-how to save game in gta vice city on windows 7
-how to play multiplayer in gta vice city on windows 7

-

Download and install GTA Vice City from Rockstar Games Launcher or Microsoft Store app

-

The second way to download GTA Vice City for Windows 7 is to use the Rockstar Games Launcher or the Microsoft Store app. Both are free applications that allow you to download, install, and play GTA Vice City and other Rockstar Games titles on your PC. You will need to create an account and sign in to use these applications.

-

To download GTA Vice City from Rockstar Games Launcher, follow these steps:

-
    -
  1. Download and install the Rockstar Games Launcher from the official website.
  2. -
  3. Launch the application and sign in with your Rockstar Games Social Club account. If you don't have one, you can create one for free.
  4. -
  5. Click on the Store tab and find GTA Vice City. If you have purchased the game from the Rockstar Games website, you can click on Redeem Code and enter your product key. If you have not purchased the game yet, you can click on Buy Now and follow the instructions.
  6. -
  7. Once you have redeemed or purchased the game, it will appear in your Library tab. Click on it and then click on Install.
  8. -
  9. Select the destination folder for the game and click on Confirm.
  10. -
  11. Wait for the download and installation to complete. You can check the progress in the Downloads tab.
  12. -
  13. Once the game is installed, you can launch it from the Library tab or from your desktop shortcut.
  14. -
-

To download GTA Vice City from Microsoft Store app, follow these steps:

-
    -
  1. Download and install the Microsoft Store app from the official website.
  2. -
  3. Launch the application and sign in with your Microsoft account. If you don't have one, you can create one for free.
  4. -
  5. Search for GTA Vice City in the search bar and click on it. If you have purchased the game from the Microsoft Store website, you can click on Install. If you have not purchased the game yet, you can click on Buy and follow the instructions.
  6. -
  7. Wait for the download and installation to complete. You can check the progress in the Downloads tab.
  8. -
  9. Once the game is installed, you can launch it from the Start menu or from your desktop shortcut.
  10. -
-

Tips and Tricks for GTA Vice City on Windows 7

-

Now that you have downloaded GTA Vice City for Windows 7, you are ready to enjoy this amazing game. However, there are some tips and tricks that can help you improve your gaming experience. Here are some of them:

-

Adjust the graphics settings and resolution for optimal performance

-

GTA Vice City is an old game, but it still has some graphics options that can affect its performance on your PC. To access these options, launch the game and go to Options > Display Setup > Advanced. Here you can adjust the following settings:

-
    -
  • Draw Distance: This determines how far you can see in the game world. The higher it is, the more detailed the environment will be, but also more demanding on your PC. You can lower it if you experience lag or stuttering.
  • -
  • Frame Limiter: This limits the frame rate of the game to 30 FPS. This can prevent screen tearing and improve stability, but also reduce smoothness. You can turn it off if you want higher FPS, but make sure your monitor supports it.
  • -
  • Wide Screen: This enables or disables wide screen mode for the game. If you have a wide screen monitor, you can turn it on to fill the screen without stretching or cropping. However, some HUD elements may be cut off or misplaced.
  • -
  • Trails: This adds a motion blur effect to the game. This can make the game look more realistic and cinematic, but also blurrier and darker. You can turn it off if you prefer sharper and brighter graphics.
  • -
  • Mip Mapping: This improves the quality of textures in the game by reducing aliasing and shimmering. This can make the game look smoother and more detailed, but also use more memory. You can turn it off if you have low RAM or VRAM.
  • -
-

In addition to these settings, you can also change the resolution of the game by going to Options > Display Setup > Video Mode. Here you can select a resolution that matches your monitor's native resolution or aspect ratio. The higher the resolution, the sharper and clearer the game will be, but but also more demanding on your PC. You can lower it if you experience low FPS or crashes.

-

Use keyboard and mouse or controller for better gameplay

-

GTA Vice City supports both keyboard and mouse and controller inputs for playing the game. You can choose the one that suits your preference and comfort. However, there are some advantages and disadvantages of each option that you should consider:

-
    -
  • Keyboard and mouse: This option gives you more precise and accurate control over your character and camera movements, especially when aiming and shooting. You can also customize the key bindings to your liking by going to Options > Controller Setup > Redefine Controls. However, this option may be less comfortable and intuitive for some players, especially when driving or flying vehicles.
  • -
  • Controller: This option gives you more comfortable and intuitive control over your character and vehicle movements, especially when driving or flying. You can also use vibration feedback to feel the game more realistically. However, this option may be less precise and accurate for some players, especially when aiming and shooting. You can also adjust the sensitivity and inversion of the controller by going to Options > Controller Setup > Configuration.
  • -
-

Ultimately, the choice is yours. You can experiment with both options and see which one works better for you. You can also switch between them anytime during the game by pressing F9.

-

Access cheats and mods for more fun

-

GTA Vice City is a game that offers a lot of freedom and fun for the players. However, if you want to spice up your gameplay even more, you can use cheats and mods for GTA Vice City on Windows 7.

-

Cheats are codes that you can enter during the game to activate various effects, such as changing the weather, spawning vehicles, getting weapons, increasing health, etc. To enter a cheat code, simply type it on your keyboard while playing the game. For example, typing THUGSTOOLS will give you all the weapons from level 1. You can find a list of all the cheat codes for GTA Vice City here. However, be careful when using cheats, as they may affect your game progress and achievements.

-

Mods are modifications that you can install on your PC to change or add new features, content, graphics, sounds, etc. to the game. There are thousands of mods available online for GTA Vice City, ranging from simple tweaks to total conversions. You can find some of the best mods for GTA Vice City here. However, be careful when installing mods, as they may cause compatibility issues or bugs with your game. Always backup your game files before modding and follow the instructions carefully.

-

Conclusion

-

GTA Vice City is one of the best games ever made, and you can play it on your Windows 7 PC with ease. All you need to do is download GTA Vice City for Windows 7 from Rockstar Games website or Microsoft Store app, install it on your PC, and launch it from Rockstar Games Launcher or Microsoft Store app. You can also adjust the graphics settings and resolution for optimal performance, use keyboard and mouse or controller for better gameplay, and access cheats and mods for more fun.

-

So what are you waiting for? Download GTA Vice City for Windows 7 today and enjoy this classic game like never before!

-

FAQs

-

Q: Is GTA Vice City compatible with Windows 7?

-

A: Yes, GTA Vice City is compatible with Windows 7. However, you may need to run it in compatibility mode or as an administrator if you encounter any problems.

-

Q: How much does GTA Vice City cost on Windows 7?

-

A: GTA Vice City costs $9.99 on both Rockstar Games website and Microsoft Store app. You can purchase it online with a credit card or PayPal account.

-

Q: Can I play GTA Vice City online on Windows 7?

-

A: No, GTA Vice City does not have an official online mode on Windows 7. However, you can use third-party multiplayer mods such as MTA:VC or VC:MP to play GTA Vice City online with other players.

-

Q: How can I save my game progress in GTA Vice City on Windows 7?

-

A: You can save your game progress in GTA Vice City on Windows 7 by visiting any of the safe houses marked with a pink cassette icon on the map. You can also use the quick save feature by pressing F5 while playing the game.

-

Q: How can I uninstall GTA Vice City from Windows 7?

-

A: You can uninstall GTA Vice City from Windows 7 by following these steps

: - Go to Control Panel > Programs and Features. - Find GTA Vice City in the list of installed programs and click on Uninstall. - Follow the instructions on the screen to complete the uninstallation process. - Alternatively, you can also uninstall GTA Vice City from Rockstar Games Launcher or Microsoft Store app by right-clicking on the game icon and selecting Uninstall.

-

I hope you found this article helpful and informative. If you have any questions or feedback, please leave a comment below. Thank you for reading!

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Green Force Zombies - Save the World from the Undead in this Free Game.md b/spaces/congsaPfin/Manga-OCR/logs/Green Force Zombies - Save the World from the Undead in this Free Game.md deleted file mode 100644 index 004c70b44350fd8862453742595ca750e040e1e9..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Green Force Zombies - Save the World from the Undead in this Free Game.md +++ /dev/null @@ -1,130 +0,0 @@ - -

Green Force: Zombies APK - A Thrilling Zombie Shooter Game for Android

-

If you are looking for a fun and exciting game that will test your skills and reflexes, then you should try Green Force: Zombies APK. This is a first-person shooter and survival game that will put you in the shoes of a zombie hunter who has to save the world from the undead. In this article, we will tell you everything you need to know about this game, including what it is, how to download and install it, why you should play it, and some tips and tricks to help you survive. So, let's get started!

-

What is Green Force: Zombies APK?

-

A brief introduction to the game and its features

-

Green Force: Zombies APK is a game developed by Raptor Interactive & Trinity Games. It is available for free on Google Play Store or you can download the APK file from other sources . The game has a rating of 4.3 out of 5 stars on Google Play Store, based on over 0 reviews.

-

green force zombies apk


DOWNLOADhttps://urlca.com/2uOfPo



-

The game is set in the near future, where a deadly virus has infected the city of Green, turning people into living dead creatures-zombies. The government has failed to stop the outbreak, so it is up to you, as one of the few survivors, to fight back and save the world. You can choose from different weapons, such as pistols, rifles, shotguns, machine guns, and more. You can also find various items in the game, such as health kits, ammo, grenades, and more. You can use these items to help you in your missions.

-

How to download and install the game on your Android device

-

To download and install Green Force: Zombies APK on your Android device, you can follow these simple steps:

-
    -
  1. Go to Google Play Store or any other source that offers the APK file of the game.
  2. -
  3. Tap on the download button and wait for the file to be downloaded.
  4. -
  5. Once the file is downloaded, locate it on your device's storage and tap on it.
  6. -
  7. If you see a warning message that says "Install blocked", go to your device's settings and enable "Unknown sources" under security options.
  8. -
  9. Tap on "Install" and wait for the installation process to finish.
  10. -
  11. Once the installation is done, you can launch the game from your app drawer or home screen.
  12. -
-

Why should you play Green Force: Zombies APK?

-

The game offers a unique experience of FPS gaming and zombie shooting

-

Green Force: Zombies APK is a game that combines the elements of FPS gaming and zombie shooting. You can enjoy the thrill of shooting zombies in the head, blowing them up with grenades, or slicing them with melee weapons. You can also experience the adrenaline rush of running and dodging from the hordes of zombies that are chasing you. The game has realistic physics and ragdoll effects that make the zombies look and feel alive. You can also see the blood and gore effects that add to the horror and excitement of the game.

-

The game has a story mode with different missions and difficulty levels

-

Green Force: Zombies APK has a story mode that follows the plot of the zombie outbreak and your role as a survivor. You can choose from different missions that have different objectives, such as rescuing other survivors, destroying zombie nests, finding supplies, and more. You can also select the difficulty level that suits your skills and preferences, from easy to hard. The game has a dynamic difficulty system that adjusts the number and strength of zombies according to your performance. The game also has a ranking system that shows your progress and achievements.

-

The game has a survival and turbo mode for endless zombie killing

-

If you want to test your limits and see how long you can survive against the zombies, you can try the survival and turbo modes of Green Force: Zombies APK. In survival mode, you have to fight waves of zombies that become more and more challenging as you progress. You have to use your weapons and items wisely and keep an eye on your health and ammo. In turbo mode, you have to kill as many zombies as possible in a limited time. You can use special items such as time freeze, double damage, and more to boost your score. You can also compare your scores with other players on the online leaderboard.

-

green force zombies hd apk download
-green force zombies pro apk free
-green force zombies survival mode
-green force zombies turbo mode
-green force zombies game review
-green force zombies weapons and items
-green force zombies story and missions
-green force zombies multiplayer fps
-green force zombies raptor interactive
-green force zombies trinity games
-green force zombies cure the infected
-green force zombies save the world
-green force zombies virus outbreak
-green force zombies city of green
-green force zombies android game
-green force zombies premium version
-green force zombies softonic app
-green force zombies apkcombo site
-green force zombies google play store
-green force zombies facebook page
-green force zombies latest update
-green force zombies tips and tricks
-green force zombies cheats and hacks
-green force zombies mod apk unlimited money
-green force zombies offline mode
-green force zombies online mode
-green force zombies best weapons
-green force zombies graphics and sound
-green force zombies gameplay and controls
-green force zombies ratings and reviews
-green force zombies bugs and issues
-green force zombies support and feedback
-green force zombies system requirements
-green force zombies compatible devices
-green force zombies screenshots and videos
-green force zombies features and benefits
-green force zombies alternatives and similar games
-green force zombies walkthrough and guide
-green force zombies achievements and rewards
-green force zombies leaderboards and rankings
-green force zombies forums and communities
-green force zombies news and updates
-green force zombies faq and help
-green force zombies wiki and information
-green force zombies fan art and memes
-green force zombies easter eggs and secrets
-green force zombies challenges and events
-green force zombies skins and customizations
-green force zombies fun facts and trivia

-

The game has a variety of weapons and items to choose from

-

One of the best features of Green Force: Zombies APK is the variety of weapons and items that you can use in the game. You can choose from different categories of weapons, such as pistols, rifles, shotguns, machine guns, sniper rifles, rocket launchers, and more. Each weapon has its own advantages and disadvantages, such as damage, range, accuracy, fire rate, reload speed, and more. You can also upgrade your weapons to make them more powerful and effective. You can also find various items in the game, such as health kits, ammo boxes, grenades, mines, turrets, drones, and more. These items can help you in different situations, such as healing yourself, replenishing your ammo, exploding zombies, defending yourself, or attacking from a distance.

-

The game has stunning graphics and sound effects

-

Green Force: Zombies APK has amazing graphics and sound effects that make the game more immersive and realistic. The game has high-quality 3D models and textures that create a detailed and vivid environment. The game also has dynamic lighting and shadows that enhance the atmosphere and mood of the game. The game also has realistic sound effects that match the actions and events in the game. You can hear the gunshots, explosions, zombie groans, screams, footsteps, and more. The game also has a background music that adds to the tension and excitement of the game.

-

Tips and tricks to play Green Force: Zombies APK

-

How to manage your armor and ammo wisely

-

One of the most important things to do in Green Force: Zombies APK is to manage your armor and ammo wisely. Your armor is your protection against zombie attacks. It can absorb some damage before it breaks down. You can see your armor level on the top left corner of the screen. If your armor level is low or zero, you will take more damage from zombie attacks. You can repair your armor by finding armor kits in the game or by buying them from the shop.

-

Your ammo is your resource for shooting zombies. It can run out quickly if you are not careful. You can see your ammo level on the bottom right corner of the screen. If your ammo level is low or zero, you will not be able to shoot zombies. You can replenish your ammo by finding ammo boxes in the game or by buying them from the shop.

-

You should always keep an eye on your armor and ammo levels and try to conserve them as much as possible. You should also try to find or buy armor kits and ammo boxes whenever you can.

-

How to use health kits, grenades, and other items effectively

-

Another important thing to do in Green Force: Zombies APK is to use health kits, grenades, and other items effectively. These items can make a big difference in your survival and performance in the game. You can see your items on the bottom left corner of the screen. You can use them by tapping on their icons or by swiping on the screen.

-

Health kits are items that can restore your health when you are injured. You can see your health level on the top right corner of the screen. If your health level is low or zero, you will die and lose the game. You can use health kits by tapping on their icon or by swiping up on the screen. You should use health kits when your health level is critically low or when you are in a safe place.

-

Grenades are items that can explode and damage zombies in a large area. You can use grenades by tapping on their icon or by swiping down on the screen. You should aim carefully before throwing a grenade, as it can also damage you if you are too close to the blast. You should use grenades when you are surrounded by zombies or when you want to clear a path.

-

Other items are items that have different effects and functions in the game. Some examples of other items are mines, turrets, drones, time freeze, double damage, and more. You can use these items by tapping on their icons or by swiping left or right on the screen. You should use these items when you need an extra advantage or support in the game.

-

How to aim and shoot accurately

-

One of the most basic and essential skills to have in Green Force: Zombies APK is to aim and shoot accurately. This will help you kill zombies faster and more efficiently, as well as save your ammo and time. To aim and shoot accurately, you can follow these tips:

-
    -
  • Use the crosshair on the center of the screen to aim at your target. You can also zoom in or out by pinching on the screen.
  • -
  • Try to aim for the head of the zombies, as this will deal more damage and cause instant death. You can also aim for other body parts, such as arms, legs, or torso, to slow down or disable the zombies.
  • -
  • Use the right weapon for the right situation. For example, use pistols or rifles for long-range shooting, shotguns or machine guns for close-range shooting, sniper rifles or rocket launchers for special shooting, and so on.
  • -
  • Adjust your sensitivity and control settings according to your preference and comfort. You can do this by going to the settings menu and choosing the options that suit you best.
  • -
  • Practice your aiming and shooting skills in different modes and levels of the game. You can also watch videos or tutorials online to learn from other players and experts.
  • -
-

How to avoid getting surrounded by zombies

-

Another important skill to have in Green Force: Zombies APK is to avoid getting surrounded by zombies. This will help you prevent getting overwhelmed and killed by the zombies, as well as save your health and armor. To avoid getting surrounded by zombies, you can follow these tips:

-
    -
  • Keep moving and don't stay in one place for too long. Zombies will spawn from different directions and locations, so you have to be alert and aware of your surroundings.
  • -
  • Use your radar on the top left corner of the screen to see where the zombies are coming from. The red dots indicate the zombies that are close to you, while the green dots indicate the zombies that are far from you.
  • -
  • Use your environment to your advantage. You can use obstacles, such as walls, cars, barrels, crates, etc., to block or slow down the zombies. You can also use explosive objects, such as gas tanks, barrels, cars, etc., to blow up zombies in a large area.
  • -
  • Use your items to help you escape or distract the zombies. You can use grenades, mines, turrets, drones, etc., to damage or kill zombies in a large area. You can also use time freeze, double damage, etc., to give yourself some time or power to get away from the zombies.
  • -
  • Use your teammates or allies to help you fight or cover you from the zombies. You can play with other players online or with AI bots offline. You can also find other survivors in some missions that will join you and assist you.
  • -
-

How to complete the missions and earn rewards

-

The last skill that we will discuss in this article is how to complete the missions and earn rewards in Green Force: Zombies APK. This will help you progress in the game and unlock new weapons, items, modes, levels, and more. To complete the missions and earn rewards, you can follow these tips :

-
    -
  • Follow the objectives and instructions of each mission. You can see them on the top right corner of the screen. You can also tap on the map icon to see the overview of the mission and your location.
  • -
  • Complete the missions within the time limit and without dying. You can see the timer and your health level on the top right corner of the screen. If you run out of time or health, you will fail the mission and have to restart it.
  • -
  • Kill as many zombies as possible and collect as many items as possible. You can see your kill count and your item count on the bottom right corner of the screen. Killing zombies and collecting items will increase your score and your rewards.
  • -
  • Use your weapons and items wisely and efficiently. You can see your weapon and item levels on the bottom left corner of the screen. Using your weapons and items will consume your ammo and your item charges. You can replenish them by finding or buying ammo boxes and item boxes in the game.
  • -
  • Earn stars and coins for completing the missions. You can see your star and coin count on the top left corner of the screen. Earning stars and coins will unlock new weapons, items, modes, levels, and more in the game.
  • -
-

Conclusion

-

In conclusion, Green Force: Zombies APK is a thrilling zombie shooter game for Android that will keep you entertained and challenged for hours. The game has a story mode with different missions and difficulty levels, a survival and turbo mode for endless zombie killing, a variety of weapons and items to choose from, stunning graphics and sound effects, and more. The game also has some tips and tricks that will help you improve your skills and performance in the game. If you are a fan of FPS gaming and zombie shooting, you should definitely download and install Green Force: Zombies APK on your Android device today!

-

FAQs

-

What is the difference between turbo and survival modes?

-

Turbo mode is a mode where you have to kill as many zombies as possible in a limited time. Survival mode is a mode where you have to survive as long as possible against waves of zombies.

-

How can I get more weapons and items in the game?

-

You can get more weapons and items in the game by earning stars and coins from completing missions, killing zombies, collecting items, etc. You can also buy them from the shop with real money.

-

Is the game free to play or does it have in-app purchases?

-

The game is free to play but it has in-app purchases that allow you to buy more weapons, items, coins, etc.

-

Can I play the game offline or do I need an internet connection?

-

You can play the game offline but you need an internet connection to access some features, such as online multiplayer, online leaderboard, etc.

-

Is the game compatible with my Android device?

-

The game is compatible with most Android devices that have Android 4.1 or higher. However, some devices may experience performance issues or bugs due to different specifications or settings.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Mirzapur Season 1 Full Episodes Free Download in HD Quality - Filmyhit Com Filmyzilla.md b/spaces/congsaPfin/Manga-OCR/logs/Mirzapur Season 1 Full Episodes Free Download in HD Quality - Filmyhit Com Filmyzilla.md deleted file mode 100644 index 72ef1ce5238466d58de03b6bdc5333c45da635b6..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Mirzapur Season 1 Full Episodes Free Download in HD Quality - Filmyhit Com Filmyzilla.md +++ /dev/null @@ -1,57 +0,0 @@ - -

Mirzapur Season 1: A Crime Thriller That Will Keep You Hooked

-

If you are a fan of crime dramas, action thrillers, and gangster sagas, then you must have heard of Mirzapur. Mirzapur is a web series that premiered on Amazon Prime Video in 2018 and became an instant hit among the viewers. The show is set in the lawless city of Mirzapur, where violence, corruption, and power rule the roost. The show revolves around the rivalry between two families, the Tripathis and the Pandits, who are involved in the illegal gun trade, drug trafficking, and political manipulation. The show features some of the finest actors of the Hindi film industry, such as Pankaj Tripathi, Ali Fazal, Vikrant Massey, Shweta Tripathi, Divyendu Sharma, Rasika Dugal, and more. The show has received rave reviews from critics and audiences alike for its gripping storyline, realistic portrayal of the crime world, and stellar performances by the cast. The show has also been nominated for several awards, including the Best Drama Series at the iReel Awards in 2019.

-

mirzapur season 1 download filmyhit com filmyzilla


Download > https://urlca.com/2uO9j6



-

Mirzapur Season 1 Plot Summary

-

Mirzapur Season 1 consists of nine episodes that follow the lives of two brothers, Guddu and Bablu Pandit, who get entangled in the web of crime and violence after crossing paths with Munna Tripathi, the son of Akhandanand Tripathi aka Kaleen Bhaiya, the don of Mirzapur. Here is a brief summary of each episode:

-
    -
  • Episode 1: Jhandu: A shocking incident at a wedding procession ignites a series of events that bring Guddu and Bablu into the orbit of Kaleen Bhaiya, who offers them a job in his gun business.
  • -
  • Episode 2: Gooda: Guddu and Bablu have a life-altering choice to make. Munna gets a lesson in life from his father. A new contender for Mirzapur emerges
  • Episode 3: Wafadar: Guddu and Bablu start working for Kaleen Bhaiya and learn the tricks of the trade. Munna is unhappy with their rise and plots to bring them down. A new inspector joins the police station.
  • -
  • Episode 4: Virginity: Guddu and Bablu have a run-in with a rival gang. Munna takes a cut in an opium deal without his father's consent. Golu, the younger sister of a slain gangster, decides to run for the college elections.
  • -
  • Episode 5: Bhaukal: Guddu and Bablu make a risky move to expand their business. Munna tries to sabotage their plans. The police raid the wedding of a local politician's son.
  • -
  • Episode 6: Barfi: Guddu and Bablu face a backlash from the rival gang. Munna makes a deal with the police to get rid of them. Kaleen Bhaiya is unhappy with Munna's actions and sends him away. Beena, Kaleen Bhaiya's wife, has a secret affair.
  • -
  • Episode 7: Lions of Mirzapur: Guddu and Bablu are hailed as the new lions of Mirzapur after eliminating their enemies. Munna returns to Mirzapur and vows to take revenge. Golu wins the college elections by a narrow margin.
  • -
  • Episode 8: Tandav: Guddu and Bablu attend a party at the Tripathi mansion, where they are ambushed by Munna and his men. A bloody shootout ensues, leaving several dead and wounded.
  • -
  • Episode 9: Yogya: The season finale reveals the fate of Guddu, Bablu, and their families after the massacre at the party. Kaleen Bhaiya faces a challenge from an old rival. Golu makes a bold move to avenge her brother's death.
  • -
-

Mirzapur Season 1 Cast and Characters

-

Mirzapur Season 1 boasts of an ensemble cast of talented actors who bring their characters to life with their nuanced performances. Here are some of the main cast and characters of Mirzapur Season 1:

-
    -
  • Akhandanand Tripathi / Kaleen Bhaiya (Pankaj Tripathi): He is the kingpin of Mirzapur, who controls the illegal gun trade, drug trafficking, and political influence in the city. He is ruthless, cunning, and ambitious, but also has a soft spot for his family.
  • -
  • Munna Tripathi (Divyendu Sharma): He is the son and heir apparent of Kaleen Bhaiya, who is eager to prove himself as worthy of his father's legacy. He is impulsive, arrogant, and violent, but also insecure and jealous of anyone who poses a threat to his position.
  • -
  • Guddu Pandit (Ali Fazal): He is one of the two brothers who work for Kaleen Bhaiya after getting involved in his gun business. He is muscular, fearless, and loyal, but also hot-headed and reckless. He dreams of becoming the king of Mirzapur one day.
  • -
  • Bablu Pandit (Vikrant Massey): He is the other brother who works for Kaleen Bhaiya along with Guddu. He is smart, calm, and strategic, but also compassionate and moral. He acts as the voice of reason for Guddu and tries to balance their personal and professional lives.
  • -
  • Golu Gupta (Shweta Tripathi): She is the younger sister of a slain gangster who was involved in the college politics. She is brave, intelligent, and determined to make a change in the corrupt system. She develops a romantic relationship with Guddu.
  • -
  • Beena Tripathi (Rasika Dugal): She is the second wife of Kaleen Bhaiya, who is unhappy with her marriage and seeks sexual satisfaction elsewhere. She is beautiful, seductive, and cunning, but also vulnerable and lonely.
  • -
  • Ramakant Pandit (Rajesh Tailang): He is the father of Guddu and Bablu, who is a lawyer by profession and an honest man by principle. He disapproves of his sons' involvement in the crime world and tries to save them from its consequences.
  • -
  • Dimpy Pandit (Harshita Gaur): She is the sister of Guddu and Bablu, who is unaware of their illegal activities and supports them in their troubles. She is innocent, cheerful, and loving, but also courageous and loyal.
  • -
-

Mirzapur Season 1 Reviews and Ratings

-

Mirzapur Season 1 has received positive reviews and ratings from both critics and viewers for its captivating plot, realistic setting, and brilliant acting. The show has an IMDb rating of 8.4 out of 10, based on more than 80,000 user ratings. The show also has a Rotten Tomatoes rating of 92%, based on 12 critic reviews. The audience response has been overwhelmingly favorable, with many praising the show for its gritty and dark portrayal of the crime world, its engaging and complex characters, and its thrilling and unpredictable twists and turns. The show has also been compared to other popular crime dramas, such as Narcos, Sacred Games, and Gangs of Wasseypur.

-

Mirzapur Season 1 Download Options

-

If you are interested in watching Mirzapur Season 1, you have several options to download or stream the show online. The most legal and safe option is to watch the show on Amazon Prime Video, which is the official platform that produced and distributed the show. You can sign up for a free trial or a monthly subscription to access the show and other exclusive content on Amazon Prime Video. However, if you are looking for other alternatives, you can also download the show from some unofficial websites, such as Filmyhit.com and Filmyzilla. These websites offer free downloads of Mirzapur Season 1 in various formats, such as MP4, MKV, AVI, etc. You can also choose from different resolutions, such as 480p, 720p, 1080p, etc.

-

Mirzapur Season 1 Download Risks and Precautions

-

While downloading Mirzapur Season 1 from unofficial websites may seem tempting and convenient, you should also be aware of the risks and precautions involved in doing so. Here are some of the possible dangers and drawbacks of downloading Mirzapur Season 1 from illegal sources:

-
    -
  • Legal Issues: Piracy and Copyright Infringement: Downloading Mirzapur Season 1 from unauthorized websites is considered as piracy and a violation of the copyright laws. You may face legal consequences, such as fines, lawsuits, or even imprisonment, if you are caught downloading or distributing pirated content. You may also be liable for damages to the original creators and producers of the show.
  • -
  • Malware and Viruses: Potential Harm to Devices and Data: Downloading Mirzapur Season 1 from untrusted websites may expose your devices and data to malware and viruses, which can harm your system and compromise your security. You may end up losing your important files, personal information, or even money, if you download infected files or click on malicious links. You may also risk infecting other devices or networks that are connected to yours.
  • -
  • Quality and Authenticity: Low Resolution and Dubbed Versions: Downloading Mirzapur Season 1 from unofficial websites may result in low quality and authenticity of the show. You may not get the original and high-resolution version of the show, but rather a compressed and pixelated one. You may also get a dubbed version of the show, which may not match the original voice and tone of the actors. You may miss out on the subtleties and nuances of the show, which may affect your enjoyment and understanding of the show.
  • -
-

Conclusion

-

Mirzapur Season 1 is a captivating and thrilling crime drama that will keep you hooked till the end. The show has a stellar cast, a gripping plot, and a realistic setting that will immerse you in the world of Mirzapur. The show is available on Amazon Prime Video, which is the best and safest option to watch the show legally and in high quality. However, if you are looking for other options, you can also download the show from some unofficial websites, such as Filmyhit.com and Filmyzilla. But be careful of the risks and precautions involved in doing so, as you may face legal issues, malware attacks, or low quality versions of the show. The choice is yours, but we recommend you to watch Mirzapur Season 1 on Amazon Prime Video for the best experience.

-

FAQs

-

Here are some of the frequently asked questions about Mirzapur Season 1:

-
    -
  • Q1: When was Mirzapur Season 1 released?
  • -
  • A1: Mirzapur Season 1 was released on November 16, 2018 on Amazon Prime Video.
  • -
  • Q2: How many episodes are there in Mirzapur Season 1?
  • -
  • A2: Mirzapur Season 1 has nine episodes, each ranging from 40 to 60 minutes.
  • -
  • Q3: Is Mirzapur Season 1 based on a true story?
  • -
  • A3: Mirzapur Season 1 is not based on a true story, but it is inspired by the real-life incidents and scenarios of the crime world in Uttar Pradesh, India.
  • -
  • Q4: Will there be a Mirzapur Season 2?
  • -
  • A4: Yes, there will be a Mirzapur Season 2, which is expected to release in late 2020 or early 2021 on Amazon Prime Video.
  • -
  • Q5: Where can I watch Mirzapur Season 1 legally?
  • -
  • A5: You can watch Mirzapur Season 1 legally on Amazon Prime Video, which is the official platform that produced and distributed the show.
  • -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Trade with MetaTrader 5 The most advanced and versatile trading platform.md b/spaces/congsaPfin/Manga-OCR/logs/Trade with MetaTrader 5 The most advanced and versatile trading platform.md deleted file mode 100644 index 03e7cc7c21783235fcfba3ff26c355837cc664d9..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Trade with MetaTrader 5 The most advanced and versatile trading platform.md +++ /dev/null @@ -1,159 +0,0 @@ -
-

How to Download MetaTrader 5 for PC

-

If you are looking for a powerful and versatile trading platform that can handle multiple markets and instruments, then you should consider using MetaTrader 5. MetaTrader 5 is a multi-asset platform that allows you to trade Forex, stocks, futures, options, and more. It offers superior tools for comprehensive price analysis, algorithmic trading, copy trading, and market research. In this article, we will show you how to download MetaTrader 5 for PC and what benefits it can bring to your trading.

-

metatrader 5 download for pc


Download Zip >>> https://urlca.com/2uO4DC



-

Step 1: Visit the official website of MetaTrader 5

-

The first step to download MetaTrader 5 for PC is to visit the official website of the platform. You can do so by clicking here. On the website, you will find all the information you need about MetaTrader 5, including its features, functions, advantages, and screenshots. You will also see a button that says "Download for Windows". This is where you can download the desktop version of MetaTrader 5 for your PC.

-

Step 2: Choose the version that suits your operating system

-

Before you click on the download button, make sure that you choose the version that suits your operating system. MetaTrader 5 is compatible with Windows, macOS, Linux, Android, iOS, and web browsers. If you are using Windows, you can download either the x32 or x64 bit version depending on your system specifications. If you are using macOS or Linux, you can download the respective versions from here. If you are using Android or iOS devices, you can download the mobile apps from here. If you want to use MetaTrader 5 from any browser without downloading anything, you can use the web terminal from here.

-

Step 3: Click on the download link and run the installer

-

Once you have chosen the version that suits your operating system, click on the download link and save the file to your computer. The file size is about 20 MB and it should take only a few seconds to download. After downloading the file, run it by double-clicking on it or right-clicking and choosing "Run as administrator". This will launch the installer of MetaTrader 5.

-

Step 4: Follow the instructions

Step 4: Follow the instructions on the screen to complete the installation

-

The installer of MetaTrader 5 will guide you through the installation process. You will see a welcome screen that asks you to agree to the terms and conditions of the software. After that, you will see a screen that allows you to choose the installation folder and the program group. You can leave them as default or change them according to your preference. Then, you will see a screen that shows the progress of the installation. It should take only a few minutes to install MetaTrader 5 on your PC.

-

Step 5: Launch MetaTrader 5 and log in to your trading account

-

After the installation is complete, you can launch MetaTrader 5 by clicking on the shortcut icon on your desktop or from the Start menu. You will see a splash screen that shows the logo and the version of MetaTrader 5. Then, you will see the main interface of MetaTrader 5, which consists of several windows and panels. To start trading, you need to log in to your trading account. You can do so by clicking on "File" and then "Login to Trade Account". You will need to enter your account number, password, and server name. If you don't have a trading account yet, you can open one from here. You can also use a demo account to practice trading with virtual money.

-

Benefits of Using MetaTrader 5 for PC

-

Now that you have downloaded and installed MetaTrader 5 for PC, you can enjoy the benefits of using this platform for your trading activities. MetaTrader 5 is one of the most popular and widely used trading platforms in the world, and for good reasons. Here are some of the benefits of using MetaTrader 5 for PC:

-

metatrader 5 free download for windows
-metatrader 5 download for mac
-metatrader 5 download for linux
-metatrader 5 web trading platform
-metatrader 5 mobile app for android
-metatrader 5 mobile app for iphone and ipad
-metatrader 5 apk download
-metatrader 5 for desktop
-metatrader 5 web terminal
-metatrader 5 trading platform for forex, stocks, futures
-metatrader 5 download and installation guide
-metatrader 5 features and benefits
-metatrader 5 technical analysis tools
-metatrader 5 fundamental analysis tools
-metatrader 5 algorithmic trading with mql5
-metatrader 5 copy trading and signals
-metatrader 5 market depth and order execution
-metatrader 5 netting and hedging systems
-metatrader 5 built-in forex vps
-metatrader 5 alerts and notifications
-metatrader 5 trading orders and types
-metatrader 5 chart types and timeframes
-metatrader 5 built-in chat and financial news
-metatrader 5 history of trading operations
-metatrader 5 metaquotes software corp
-metatrader 5 brokers list and reviews
-metatrader 5 demo account and practice trading
-metatrader 5 live account and real trading
-metatrader 5 how to use and tutorial
-metatrader 5 tips and tricks
-metatrader 5 vs metatrader 4 comparison
-metatrader 5 latest version and update
-metatrader 5 system requirements and compatibility
-metatrader 5 security and encryption
-metatrader 5 customization and settings
-metatrader 5 indicators and scripts
-metatrader 5 expert advisors and robots
-metatrader 5 strategies and systems
-metatrader 5 backtesting and optimization
-metatrader 5 community and forum

-

Full-featured trading platform with advanced tools and functions

-

MetaTrader 5 is not just a trading platform, but a complete trading solution that provides you with everything you need to trade successfully. It has a rich set of tools and functions that allow you to perform comprehensive price analysis, execute orders, manage risks, monitor market conditions, and more. Some of the tools and functions that MetaTrader 5 offers are:

-
    -
  • A powerful trading system that supports four execution modes (instant, request, market, and exchange) and six order types (market, limit, stop, stop limit, stop loss, and take profit).
  • -
  • A flexible charting system that supports 21 timeframes (from one minute to one month) and three chart types (line, bar, and candlestick). You can also customize your charts with various colors, styles, indicators, objects, and templates.
  • -
  • A large collection of technical indicators (over 80) and analytical tools (such as trend lines, channels, Fibonacci retracements, Gann tools, etc.) that help you identify market trends, patterns, signals, and opportunities.
  • -
  • An integrated economic calendar and news feed that keep you updated with the latest events and announcements that affect the market movements.
  • -
  • A built-in market of trading applications (over 10,000) that allow you to buy or rent expert advisors (EAs), custom indicators, scripts, libraries, and other useful tools for your trading.
  • -
  • A sophisticated strategy tester that allows you to backtest, optimize, and debug your EAs using historical data and various testing modes (such as visual mode, optimization mode, forward testing mode, etc.).
  • -
  • A user-friendly MQL5 editor that allows you to create your own EAs, custom indicators, scripts, and libraries using the MQL5 programming language. You can also access the MQL5 community and the MQL5 cloud network for additional resources and support.
  • -
-

Access to multiple markets and instruments, including Forex, stocks, futures, and options

-

MetaTrader 5 is a multi-asset platform that allows you to trade not only Forex, but also other markets and instruments, such as stocks, futures, options, CFDs, ETFs, bonds, and cryptocurrencies. You can access these markets and instruments from various brokers and exchanges around the world. You can also trade on different market segments, such as spot, forward, swap, and contract markets. With MetaTrader 5, you can diversify your trading portfolio and explore new opportunities in different markets.

-

Ability to use automated trading systems, technical indicators, and trading signals

-

MetaTrader 5 is an ideal platform for automated trading enthusiasts. It allows you to use expert advisors (EAs), which are automated trading systems that can execute trades for you according to predefined rules and conditions. You can create your own EAs using the MQL5 editor or buy or rent them from the market of trading applications. You can also use custom indicators, which are technical analysis tools that can help you identify market trends, signals, and opportunities. You can create your own custom indicators using the MQL5 editor or buy or rent them from the market of trading applications. Moreover, you can use trading signals, which are recommendations from other traders or EAs that you can copy or subscribe to. You can find thousands of trading signals from various providers on the MetaTrader 5 platform.

-

Customizable interface and charting options

-

MetaTrader 5 has a user-friendly and customizable interface that allows you to adjust it to your preferences and needs. You can arrange the windows and panels of the platform according to your liking. You can also change the color scheme, font size, language, and other settings of the platform. Furthermore, you can customize your charts with various options, such as adding indicators, objects, templates, profiles, etc. You can also switch between different chart types, timeframes, zoom levels, etc. You can also use multiple monitors and detachable charts to enhance your trading experience.

-

High performance and reliability

-

MetaTrader 5 is a high-performance and reliable platform that can handle high volumes of data and transactions without compromising speed or quality. It uses a distributed architecture that ensures fast execution of orders and smooth operation of the platform. It also uses a 128-bit encryption system that protects your data and transactions from unauthorized access. Moreover, MetaTrader 5 has a built-in backup and recovery system that ensures the safety of your data in case of any failure or malfunction.

-

Comparison of MetaTrader 5 with MetaTrader 4

-

MetaTrader 5 is the successor of MetaTrader 4, which is another popular trading platform developed by MetaQuotes Software Corp. However, MetaTrader 5 is not just an upgraded version of MetaTrader 4, but a completely new and different platform that has many advantages over its predecessor. Here are some of the main differences between MetaTrader 5 and MetaTrader 4:

-

MetaTrader 5 is more advanced and versatile than MetaTrader 4

-

MetaTrader 5 is a multi-asset platform that supports more markets and instruments than MetaTrader 4. While MetaTrader 4 is mainly designed for Forex trading, instrument in opposite directions. For example, if you buy 10 lots of EUR/USD and then sell 5 lots of EUR/USD, you will have two separate positions of 10 lots and 5 lots of EUR/USD. Netting is more suitable for exchange-traded instruments, such as stocks and futures, while hedging is more suitable for over-the-counter instruments, such as Forex and CFDs. You can choose the position accounting system that suits your trading style and strategy.

-

MetaTrader 5 has a larger community and market of trading applications

-

MetaTrader 5 has a larger community and market of trading applications than MetaTrader 4. MetaTrader 5 has over 10,000 trading applications available on its market, while MetaTrader 4 has only about 2,000. You can find a wide range of expert advisors, custom indicators, scripts, libraries, and other useful tools for your trading on the MetaTrader 5 market. You can also access the MQL5 community, which is a network of traders and developers who share their knowledge, experience, and ideas. You can join the forums, blogs, chats, groups, articles, webinars, and contests on the MQL5 community. You can also use the MQL5 cloud network, which is a distributed computing network that allows you to use the computing power of thousands of computers around the world for your strategy testing and optimization.

-

Tips and Tricks for Using MetaTrader 5 for PC

-

Now that you know the benefits and differences of MetaTrader 5 compared to MetaTrader 4, you might want to learn some tips and tricks for using MetaTrader 5 for PC more effectively and efficiently. Here are some of them:

-

How to customize your workspace and preferences

-

One of the first things you might want to do when using MetaTrader 5 for PC is to customize your workspace and preferences according to your liking. You can do so by clicking on "Tools" and then "Options". You will see a window that allows you to change various settings of the platform, such as general settings, server settings, chart settings, trade settings, expert advisor settings, notifications settings, email settings, FTP settings, community settings, signals settings, web request settings, and events settings. You can also customize your workspace by arranging the windows and panels of the platform as you wish. You can resize, move, dock, undock, hide, or close any window or panel by using the mouse or the keyboard shortcuts.

-

How to use the Market Watch window and the Depth of Market feature

-

The Market Watch window is one of the most important windows of MetaTrader 5 for PC. It shows you the list of instruments that are available for trading on your broker or exchange. You can see the bid and ask prices, the spread, the high and low prices, the time, and the volume of each instrument. You can also see the tick chart, which shows the price movements of the selected instrument in real time. You can customize the Market Watch window by adding, removing, hiding, or sorting the instruments. You can also access the context menu by right-clicking on any instrument. The context menu allows you to perform various actions, such as opening a new order, opening a new chart, viewing the properties, setting alerts, etc. One of the features that you can access from the Market Watch window is the Depth of Market (DOM) feature. The DOM feature shows you the market depth of the selected instrument, which is the number and volume of buy and sell orders at different price levels. The DOM feature helps you to see the liquidity and volatility of the market and to identify potential support and resistance levels. You can access the DOM feature by clicking on the "Depth of Market" button on the Market Watch window or by pressing Ctrl+D on your keyboard.

-

How to place and manage orders and positions

-

MetaTrader 5 for PC allows you to place and manage orders and positions easily and quickly. You can place orders in various ways, such as using the order window, using the one-click trading feature, using the chart trading feature, using the trade panel, or using an expert advisor. You can also modify or cancel your orders at any time before they are executed. When your order is executed, it becomes a position. A position is a trade that you have opened and that is subject to market fluctuations. You can manage your positions in various ways, such as using the terminal window, using the chart trading feature, using the trade panel, or using an expert advisor. You can also close your positions partially or fully at any time to lock in your profits or losses. To place an order using the order window, you need to click on "New Order" on the toolbar or press F9 on your keyboard. You will see a window that allows you to enter the parameters of your order, such as the instrument, the volume, the type (market or pending), the price (if pending), the stop loss and take profit levels, the comment, and the expiration date (if pending). You can also see the current market price, the spread, and the margin required for your order. After entering the parameters, you can click on "Sell by Market" or "Buy by Market" to place a market order, or click on "Place" to place a pending order. To place an order using the one-click trading feature, you need to enable it by clicking on "Tools" and then "Options". Then, go to the "Trade" tab and check the box that says "One-click trading". You will see a disclaimer that warns you about the risks of using this feature. After reading and agreeing to the disclaimer, you can use the one-click trading feature on the Market Watch window or on the chart. You will see two buttons with bid and ask prices for each instrument. You can click on the bid price to sell or on the ask price to buy. You will see a confirmation message that shows the details of your order. You can also adjust the volume and the deviation (the maximum difference between the requested price and the execution price) by clicking on the arrows next to the buttons. To place an order using the chart trading feature, you need to enable it by right-clicking on the chart and choosing "One-click trading". You will see a trade panel on the top left corner of the chart. You can use this panel to place market or pending orders, as well as to modify or cancel your orders. You can also drag and drop your orders on the chart to change their price levels. You can also double-click on your orders or positions on the chart to open the order window or the position window. To place an order using the trade panel, you need to click on "View" and then "Toolbox". Then, go to the "Trade" tab and click on "New Order". You will see a trade panel that allows you to enter the parameters of your order, similar to the order window. You can also use this panel to modify or cancel your orders, as well as to close your positions. To place an order using an expert advisor, you need to attach it to the chart of the instrument that you want to trade. You can do so by dragging and dropping it from the Navigator window or by double-clicking on it. You will see a window that allows you to adjust the settings and parameters of the expert advisor. You will also see a smiley face on the top right corner of the chart, which indicates that the expert advisor is active and ready to trade. You can also enable or disable the expert advisor by clicking on the "Auto Trading" button on the toolbar or by pressing Ctrl+E on your keyboard. To manage your orders and positions using the terminal window, you need to click on "View" and then "Toolbox". Then, go to the "Trade" tab and you will see a list of your orders and positions. You can modify or cancel your orders by right-clicking on them and choosing "Modify or Delete Order". You can also close your positions by right-clicking on them and choosing "Close Position". You can also use the "Modify Position" or "Close by" options to set stop loss and take profit levels or to close a position by using another opposite position.

How to use the Strategy Tester and the MQL5 Editor

-

MetaTrader 5 for PC allows you to use the Strategy Tester and the MQL5 Editor to create, test, and optimize your own expert advisors, custom indicators, scripts, and libraries. The Strategy Tester is a tool that allows you to backtest, optimize, and debug your expert advisors using historical data and various testing modes. The MQL5 Editor is a tool that allows you to create your own expert advisors, custom indicators, scripts, and libraries using the MQL5 programming language. You can access these tools from the "View" menu and then "Strategy Tester" or "MetaEditor".

-

How to use the Strategy Tester

-

To use the Strategy Tester, you need to follow these steps:

-
    -
  1. Select the expert advisor that you want to test from the drop-down list on the top left corner of the Strategy Tester window.
  2. -
  3. Select the instrument and the timeframe that you want to test from the drop-down lists on the top right corner of the Strategy Tester window.
  4. -
  5. Select the testing mode that you want to use from the drop-down list on the bottom left corner of the Strategy Tester window. You can choose between four testing modes: "Every tick", which simulates every price movement; "1 minute OHLC", which simulates only the open, high, low, and close prices of each minute; "Open prices only", which simulates only the open prices of each period; and "Math calculations", which does not use any price data but only the calculations of the expert advisor.
  6. -
  7. Select the date range that you want to test from the "From" and "To" fields on the bottom right corner of the Strategy Tester window. You can also use the "Use date" checkbox to enable or disable the date range.
  8. -
  9. Click on the "Settings" button on the bottom right corner of the Strategy Tester window to adjust the settings of your expert advisor, such as the inputs, the optimization criteria, the genetic algorithm parameters, the forward testing mode, etc.
  10. -
  11. Click on the "Start" button on the bottom right corner of the Strategy Tester window to start the testing process. You will see a progress bar that shows the percentage of completion and the elapsed time.
  12. -
  13. After the testing process is finished, you can view the results on the different tabs of the Strategy Tester window. You can see the summary of your testing results on the "Results" tab, such as the number of trades, the profit factor, the expected payoff, etc. You can see the list of trades and their details on the "Report" tab, such as the order number, type, size, price, profit, etc. You can see the graphical representation of your testing results on the "Graph" tab, such as the balance curve, the equity curve, etc. You can see the optimization results and their parameters on the "Optimization Results" tab, such as the pass number, input values, profit, drawdown, etc. You can also see a visual mode of your testing results on a separate chart by clicking on the "Open Chart" button on the "Optimization Results" tab or by checking the "Visual mode" checkbox on the bottom left corner of the Strategy Tester window before starting the testing process.
  14. -
-

How to use the MQL5 Editor

-

To use the MQL5 Editor, you need to follow these steps:

-
    -
  1. Open the MQL5 Editor by clicking on "Tools" and then "MetaEditor" or by pressing F4 on your keyboard. You will see a window that shows the structure and the code of your expert advisor, custom indicator, script, or library.
  2. -
  3. Edit the code of your trading application using the MQL5 programming language. You can use the various features of the MQL5 Editor to help you with your coding, such as syntax highlighting, auto-completion, code formatting, code templates, debugging tools, etc.
  4. -
  5. Compile your code by clicking on the "Compile" button on the toolbar or by pressing F7 on your keyboard. You will see a message that shows whether your code has any errors or warnings. If your code has no errors, you will see a message that says "0 error(s), 0 warning(s), compile time: x ms". If your code has errors or warnings, you will see a message that shows the number and the details of the errors or warnings. You need to fix them before you can compile your code successfully.
  6. -
  7. Test your code by attaching it to the chart of the instrument that you want to trade. You can do so by dragging and dropping it from the Navigator window or by double-clicking on it. You will see a window that allows you to adjust the settings and parameters of your trading application. You will also see a smiley face on the top right corner of the chart, which indicates that your trading application is active and ready to trade. You can also enable or disable your trading application by clicking on the "Auto Trading" button on the toolbar or by pressing Ctrl+E on your keyboard.
  8. -
-

Conclusion

-

MetaTrader 5 for PC is a powerful and versatile trading platform that can help you trade multiple markets and instruments with ease and efficiency. It offers superior tools and functions for comprehensive price analysis, algorithmic trading, copy trading, and market research. It also allows you to customize your interface and charting options, as well as to create, test, and optimize your own trading applications using the MQL5 programming language. MetaTrader 5 for PC is compatible with Windows, macOS, Linux, Android, iOS, and web browsers. You can download it for free from here. Download MetaTrader 5 for PC today and start trading like a pro!

-

FAQs

-

Here are some of the frequently asked questions about MetaTrader 5 for PC:

-

Q: What are the system requirements for MetaTrader 5 for PC?

-

A: The minimum system requirements for MetaTrader 5 for PC are:

-
    -
  • Operating system: Windows 7 or higher, macOS 10.11 or higher, Linux (Ubuntu 16.04 LTS or higher)
  • -
  • Processor: Intel Celeron-based processor with a frequency of 1.7 GHz or higher
  • -
  • RAM: 256 MB
  • -
  • Storage: 50 MB
  • -
  • Screen resolution: 1024x768 pixels
  • -
  • Internet connection: broadband (at least 128 kbps)
  • -
-

Q: How can I update MetaTrader 5 for PC?

-

A: MetaTrader 5 for PC updates automatically whenever there is a new version available. You don't need to do anything to update it. However, if you want to check for updates manually, you can do so by clicking on "Help" and then "Check for Updates". You will see a message that shows whether there is a new version available or not. If there is a new version available, you can click on "Update" to download and install it.

-

Q: How can I contact MetaQuotes Software Corp., the developer of MetaTrader 5?

-

A: You can contact MetaQuotes Software Corp., the developer of MetaTrader 5, by using one of these methods:

-
    -
  • Email: info@metaquotes.net
  • -
  • Phone: +357-25-875-134
  • -
  • Fax: +357-25-875-135
  • -
  • Website: https://www.metaquotes.net/
  • -
  • Address: MetaQuotes Software Corp., Spyrou Kyprianou 38, CCS Building, Limassol 4003, Cyprus
  • -
-

Q: How can I learn more about MetaTrader 5 and MQL5?

-

A: You can learn more about MetaTrader 5 and MQL5 by using one of these resources:

- -

Q: How can I get help or support for MetaTrader 5?

-

A: You can get help or support for MetaTrader 5 by using one of these methods:

-
    -
  • Contact your broker or exchange, who is the provider of your trading services and platform.
  • -
  • Contact MetaQuotes Software Corp., the developer of MetaTrader 5, by using the methods mentioned above.
  • -
  • Visit the official website, documentation, video tutorials, forum, or blog of MetaTrader 5 or MQL5, as mentioned above.
  • -
  • Search for answers or solutions on the internet, such as on Google, YouTube, Quora, Reddit, etc.
  • -
  • Ask for help or advice from other traders or developers on the MQL5 community or other online communities.
  • -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/cooelf/Multimodal-CoT/app.py b/spaces/cooelf/Multimodal-CoT/app.py deleted file mode 100644 index 12772195d956a9e82d4432e487e444bed6f0a107..0000000000000000000000000000000000000000 --- a/spaces/cooelf/Multimodal-CoT/app.py +++ /dev/null @@ -1,150 +0,0 @@ -import string -import gradio as gr -import requests -import torch -from transformers import T5Tokenizer -from model import T5ForMultimodalGeneration -from PIL import Image -import timm -from timm.data import resolve_data_config -from timm.data.transforms_factory import create_transform - -rationale_model_dir = "cooelf/MM-CoT-UnifiedQA-Base-Rationale-Joint" -answer_model_dir = "cooelf/MM-CoT-UnifiedQA-Base-Answer-Joint" - -vit_model = timm.create_model("vit_base_patch16_384", pretrained=True, num_classes=0) -vit_model.eval() -config = resolve_data_config({}, model=vit_model) -transform = create_transform(**config) -tokenizer = T5Tokenizer.from_pretrained(rationale_model_dir) -r_model = T5ForMultimodalGeneration.from_pretrained(rationale_model_dir, patch_size=(577, 768)) -a_model = T5ForMultimodalGeneration.from_pretrained(answer_model_dir, patch_size=(577, 768)) - -def inference_chat(input_image,input_text): - with torch.no_grad(): - # print(input_image) - # img = Image.open(input_image).convert("RGB") - input = transform(input_image).unsqueeze(0) - out = vit_model.forward_features(input) - image_features = out.detach() - - source = tokenizer.batch_encode_plus( - [input_text], - max_length=512, - pad_to_max_length=True, - truncation=True, - padding="max_length", - return_tensors="pt", - ) - source_ids = source["input_ids"] - source_mask = source["attention_mask"] - rationale = r_model.generate( - input_ids=source_ids, - attention_mask=source_mask, - image_ids=image_features, - max_length=512, - num_beams=1, - do_sample=False - ) - rationale = tokenizer.batch_decode(rationale, skip_special_tokens=True)[0] - print(rationale) - - input_text = input_text + "\n" + rationale +"\nAnswer:" - print(input_text) - - source = tokenizer.batch_encode_plus( - [input_text], - max_length=512, - pad_to_max_length=True, - truncation=True, - padding="max_length", - return_tensors="pt", - ) - source_ids = source["input_ids"] - source_mask = source["attention_mask"] - answer = a_model.generate( - input_ids=source_ids, - attention_mask=source_mask, - image_ids=image_features, - max_length=64, - num_beams=1, - do_sample=False - ) - - answer = tokenizer.batch_decode(answer, skip_special_tokens=True)[0] - return rationale, answer - - -title = """# Multimodal-CoT""" -# description = """**VLE** (Visual-Language Encoder) is an image-text multimodal understanding model built on the pre-trained text and image encoders. See https://github.com/iflytek/VLE for more details. -# We demonstrate visual question answering systems built with VLE and LLM.""" -# description1 = """**VQA**: The image and the question are fed to a VQA model (VLEForVQA) and the model predicts the answer. - -# **VQA+LLM**: We feed the caption, question, and answers predicted by the VQA model to the LLM and ask the LLM to generate the final answer. The outptus from VQA+LLM may vary due to the decoding strategy of the LLM.""" - -with gr.Blocks( - css=""" - .message.svelte-w6rprc.svelte-w6rprc.svelte-w6rprc {font-size: 20px; margin-top: 20px} - #component-21 > div.wrap.svelte-w6rprc {height: 600px;} - """ -) as iface: - state = gr.State([]) - #caption_output = None - gr.Markdown(title) - # gr.Markdown(description) - #gr.Markdown(article) - - with gr.Row(): - with gr.Column(scale=1): - image_input = gr.Image(type="pil",label="Image") - with gr.Row(): - with gr.Column(scale=1): - chat_input = gr.Textbox(lines=1, label="Question") - with gr.Row(): - clear_button = gr.Button(value="Clear", interactive=True,width=30) - submit_button = gr.Button( - value="Submit", interactive=True, variant="primary" - ) - ''' - cap_submit_button = gr.Button( - value="Submit_CAP", interactive=True, variant="primary" - ) - gpt3_submit_button = gr.Button( - value="Submit_GPT3", interactive=True, variant="primary" - ) - ''' - with gr.Column(): - # gr.Markdown(description1) - rationale = gr.Textbox(lines=0, label="Rationale") - answer = gr.Textbox(lines=0, label="Answer") - - chat_input.submit( - inference_chat, - [ - image_input, - chat_input, - ], - [rationale, answer], - ) - clear_button.click( - lambda: ("", [],"",""), - [], - [chat_input, state, rationale, answer], - queue=False, - ) - submit_button.click( - inference_chat, - [ - image_input, - chat_input, - ], - [rationale, answer], - ) - examples=[['api/61.png',"Question: Think about the magnetic force between the magnets in each pair. Which of the following statements is true?\nContext: The images below show two pairs of magnets. The magnets in different pairs do not affect each other. All the magnets shown are made of the same material, but some of them are different sizes and shapes.\nOptions: (A) The magnitude of the magnetic force is the same in both pairs. (B) The magnitude of the magnetic force is smaller in Pair 1. (C) The magnitude of the magnetic force is smaller in Pair 2.\nSolution:","Magnet sizes affect the magnitude of the magnetic force. Imagine magnets that are the same shape and made of the same material. The smaller the magnets, the smaller the magnitude of the magnetic force between them.nMagnet A is the same size in both pairs. But Magnet B is smaller in Pair 2 than in Pair 1. So, the magnitude of the magnetic force is smaller in Pair 2 than in Pair 1.","The answer is (C)."], - ] - examples = gr.Examples( - examples=examples,inputs=[image_input, chat_input, rationale, answer], - ) - -iface.queue(concurrency_count=1, api_open=False, max_size=10) -iface.launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/apis/train.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/apis/train.py deleted file mode 100644 index f0a87d65c72e4581c96b41aebf879905510c9d22..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/apis/train.py +++ /dev/null @@ -1,116 +0,0 @@ -import random -import warnings - -import numpy as np -import torch -from annotator.mmpkg.mmcv.parallel import MMDataParallel, MMDistributedDataParallel -from annotator.mmpkg.mmcv.runner import build_optimizer, build_runner - -from annotator.mmpkg.mmseg.core import DistEvalHook, EvalHook -from annotator.mmpkg.mmseg.datasets import build_dataloader, build_dataset -from annotator.mmpkg.mmseg.utils import get_root_logger - - -def set_random_seed(seed, deterministic=False): - """Set random seed. - - Args: - seed (int): Seed to be used. - deterministic (bool): Whether to set the deterministic option for - CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` - to True and `torch.backends.cudnn.benchmark` to False. - Default: False. - """ - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - if deterministic: - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = False - - -def train_segmentor(model, - dataset, - cfg, - distributed=False, - validate=False, - timestamp=None, - meta=None): - """Launch segmentor training.""" - logger = get_root_logger(cfg.log_level) - - # prepare data loaders - dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] - data_loaders = [ - build_dataloader( - ds, - cfg.data.samples_per_gpu, - cfg.data.workers_per_gpu, - # cfg.gpus will be ignored if distributed - len(cfg.gpu_ids), - dist=distributed, - seed=cfg.seed, - drop_last=True) for ds in dataset - ] - - # put model on gpus - if distributed: - find_unused_parameters = cfg.get('find_unused_parameters', False) - # Sets the `find_unused_parameters` parameter in - # torch.nn.parallel.DistributedDataParallel - model = MMDistributedDataParallel( - model.cuda(), - device_ids=[torch.cuda.current_device()], - broadcast_buffers=False, - find_unused_parameters=find_unused_parameters) - else: - model = MMDataParallel( - model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids) - - # build runner - optimizer = build_optimizer(model, cfg.optimizer) - - if cfg.get('runner') is None: - cfg.runner = {'type': 'IterBasedRunner', 'max_iters': cfg.total_iters} - warnings.warn( - 'config is now expected to have a `runner` section, ' - 'please set `runner` in your config.', UserWarning) - - runner = build_runner( - cfg.runner, - default_args=dict( - model=model, - batch_processor=None, - optimizer=optimizer, - work_dir=cfg.work_dir, - logger=logger, - meta=meta)) - - # register hooks - runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config, - cfg.checkpoint_config, cfg.log_config, - cfg.get('momentum_config', None)) - - # an ugly walkaround to make the .log and .log.json filenames the same - runner.timestamp = timestamp - - # register eval hooks - if validate: - val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) - val_dataloader = build_dataloader( - val_dataset, - samples_per_gpu=1, - workers_per_gpu=cfg.data.workers_per_gpu, - dist=distributed, - shuffle=False) - eval_cfg = cfg.get('evaluation', {}) - eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner' - eval_hook = DistEvalHook if distributed else EvalHook - runner.register_hook(eval_hook(val_dataloader, **eval_cfg), priority='LOW') - - if cfg.resume_from: - runner.resume(cfg.resume_from) - elif cfg.load_from: - runner.load_checkpoint(cfg.load_from) - runner.run(data_loaders, cfg.workflow) diff --git a/spaces/cozyanduofen/bingo/src/components/settings.tsx b/spaces/cozyanduofen/bingo/src/components/settings.tsx deleted file mode 100644 index e18aa5b484852bb5d047442a06e7143b6893cb0d..0000000000000000000000000000000000000000 --- a/spaces/cozyanduofen/bingo/src/components/settings.tsx +++ /dev/null @@ -1,141 +0,0 @@ -import { useEffect, useState } from 'react' -import { useAtom } from 'jotai' -import { Switch } from '@headlessui/react' -import { toast } from 'react-hot-toast' -import { hashAtom, voiceAtom } from '@/state' -import { - Dialog, - DialogContent, - DialogDescription, - DialogFooter, - DialogHeader, - DialogTitle -} from '@/components/ui/dialog' -import { Button } from './ui/button' -import { Input } from './ui/input' -import { ChunkKeys, parseCookies, extraCurlFromCookie, randomIP, encodeHeadersToCookie } from '@/lib/utils' -import { ExternalLink } from './external-link' -import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard' - -export function Settings() { - const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 }) - const [loc, setLoc] = useAtom(hashAtom) - const [curlValue, setCurlValue] = useState(extraCurlFromCookie(parseCookies(document.cookie, ChunkKeys))) - const [enableTTS, setEnableTTS] = useAtom(voiceAtom) - - useEffect(() => { - if (isCopied) { - toast.success('复制成功') - } - }, [isCopied]) - - if (loc === 'settings') { - return ( - setLoc('')} modal> - - - 设置你的用户信息 - - 请使用 Edge 浏览器 - - 打开并登录 Bing - - ,然后再打开 - Challenge 接口 - 右键 》检查。打开开发者工具,在网络里面找到 Create 接口 》右键复制》复制为 cURL(bash),粘贴到此处,然后保存。 -
- 图文示例: - 如何获取 BING_HEADER - - -
- -
- setCurlValue(e.target.value)} - /> - - - - - - -
- ) - } else if (loc === 'voice') { - return ( - setLoc('')} modal> - - - 语音设置 - - 目前仅支持 PC 端 Edge 及 Chrome 浏览器 - - - -
- 启用语音回答 - setEnableTTS(checked)} - > - - -
- - - - -
-
- ) - } - return null -} diff --git a/spaces/cozyanduofen/bingo/src/components/tone-selector.tsx b/spaces/cozyanduofen/bingo/src/components/tone-selector.tsx deleted file mode 100644 index 5c6e464c91f564b895acd121f0a4a79ed9c5c356..0000000000000000000000000000000000000000 --- a/spaces/cozyanduofen/bingo/src/components/tone-selector.tsx +++ /dev/null @@ -1,43 +0,0 @@ -import React from 'react' -import { BingConversationStyle } from '@/lib/bots/bing/types' -import { cn } from '@/lib/utils' - -type ToneItem = { - type: BingConversationStyle, - name: string -} - -const ToneList: ToneItem[] = [ - { name: '有创造力', type: BingConversationStyle.Creative }, - { name: '更平衡', type: BingConversationStyle.Balanced }, - { name: '更精确', type: BingConversationStyle.Precise } -] - -interface ToneSelectorProps { - type: BingConversationStyle | '' - onChange?: (type: BingConversationStyle) => void -} - -export function ToneSelector({ type, onChange }: ToneSelectorProps) { - return ( -
-
- 选择对话样式 -
-
-
    - { - ToneList.map(tone => ( -
  • onChange?.(tone.type)}> - -
  • - )) - } -
-
-
- ) -} diff --git a/spaces/crylake/img2poem/query2labels/lib/models/tresnet/__init__.py b/spaces/crylake/img2poem/query2labels/lib/models/tresnet/__init__.py deleted file mode 100644 index e226381b648908c64bbb48f13222959be4b5bb99..0000000000000000000000000000000000000000 --- a/spaces/crylake/img2poem/query2labels/lib/models/tresnet/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .tresnet_sync import TResnetM, TResnetL, TResnetXL -tresnetm = TResnetM -tresnetl = TResnetL -tresnetxl = TResnetXL -tresnetl_21k = TResnetL diff --git a/spaces/danielsapit/JPEG_Artifacts_Removal/old_app.py b/spaces/danielsapit/JPEG_Artifacts_Removal/old_app.py deleted file mode 100644 index 56a4309b7b02601f74f9142cd9859d337191e4b0..0000000000000000000000000000000000000000 --- a/spaces/danielsapit/JPEG_Artifacts_Removal/old_app.py +++ /dev/null @@ -1,158 +0,0 @@ -import gradio as gr -import os.path -import numpy as np -from collections import OrderedDict -import torch -import cv2 -from PIL import Image, ImageOps -import utils_image as util -from network_fbcnn import FBCNN as net -import requests - -for model_path in ['fbcnn_gray.pth','fbcnn_color.pth']: - if os.path.exists(model_path): - print(f'{model_path} exists.') - else: - url = 'https://github.com/jiaxi-jiang/FBCNN/releases/download/v1.0/{}'.format(os.path.basename(model_path)) - r = requests.get(url, allow_redirects=True) - open(model_path, 'wb').write(r.content) - -def inference(input_img, is_gray, input_quality, enable_zoom, zoom, x_shift, y_shift, state): - - if is_gray: - n_channels = 1 # set 1 for grayscale image, set 3 for color image - model_name = 'fbcnn_gray.pth' - else: - n_channels = 3 # set 1 for grayscale image, set 3 for color image - model_name = 'fbcnn_color.pth' - nc = [64,128,256,512] - nb = 4 - - - input_quality = 100 - input_quality - - model_path = model_name - - if os.path.exists(model_path): - print(f'loading model from {model_path}') - else: - os.makedirs(os.path.dirname(model_path), exist_ok=True) - url = 'https://github.com/jiaxi-jiang/FBCNN/releases/download/v1.0/{}'.format(os.path.basename(model_path)) - r = requests.get(url, allow_redirects=True) - open(model_path, 'wb').write(r.content) - - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - - # ---------------------------------------- - # load model - # ---------------------------------------- - if (not enable_zoom) or (state[1] is None): - model = net(in_nc=n_channels, out_nc=n_channels, nc=nc, nb=nb, act_mode='R') - model.load_state_dict(torch.load(model_path), strict=True) - model.eval() - for k, v in model.named_parameters(): - v.requires_grad = False - model = model.to(device) - - test_results = OrderedDict() - test_results['psnr'] = [] - test_results['ssim'] = [] - test_results['psnrb'] = [] - - # ------------------------------------ - # (1) img_L - # ------------------------------------ - - if n_channels == 1: - open_cv_image = Image.fromarray(input_img) - open_cv_image = ImageOps.grayscale(open_cv_image) - open_cv_image = np.array(open_cv_image) # PIL to open cv image - img = np.expand_dims(open_cv_image, axis=2) # HxWx1 - elif n_channels == 3: - open_cv_image = np.array(input_img) # PIL to open cv image - if open_cv_image.ndim == 2: - open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_GRAY2RGB) # GGG - else: - open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB) # RGB - - img_L = util.uint2tensor4(open_cv_image) - img_L = img_L.to(device) - - # ------------------------------------ - # (2) img_E - # ------------------------------------ - - img_E,QF = model(img_L) - QF = 1- QF - img_E = util.tensor2single(img_E) - img_E = util.single2uint(img_E) - - qf_input = torch.tensor([[1-input_quality/100]]).cuda() if device == torch.device('cuda') else torch.tensor([[1-input_quality/100]]) - img_E,QF = model(img_L, qf_input) - QF = 1- QF - img_E = util.tensor2single(img_E) - img_E = util.single2uint(img_E) - - if img_E.ndim == 3: - img_E = img_E[:, :, [2, 1, 0]] - - print("--inference finished") - if (state[1] is not None) and enable_zoom: - img_E = state[1] - out_img = Image.fromarray(img_E) - out_img_w, out_img_h = out_img.size # output image size - zoom = zoom/100 - x_shift = x_shift/100 - y_shift = y_shift/100 - zoom_w, zoom_h = out_img_w*zoom, out_img_h*zoom - zoom_left, zoom_right = int((out_img_w - zoom_w)*x_shift), int(zoom_w + (out_img_w - zoom_w)*x_shift) - zoom_top, zoom_bottom = int((out_img_h - zoom_h)*y_shift), int(zoom_h + (out_img_h - zoom_h)*y_shift) - if (state[0] is None) or not enable_zoom: - in_img = Image.fromarray(input_img) - state[0] = input_img - else: - in_img = Image.fromarray(state[0]) - in_img = in_img.crop((zoom_left, zoom_top, zoom_right, zoom_bottom)) - in_img = in_img.resize((int(zoom_w/zoom), int(zoom_h/zoom)), Image.NEAREST) - out_img = out_img.crop((zoom_left, zoom_top, zoom_right, zoom_bottom)) - out_img = out_img.resize((int(zoom_w/zoom), int(zoom_h/zoom)), Image.NEAREST) - - return img_E, in_img, out_img, [state[0],img_E] - -gr.Interface( - fn = inference, - inputs = [gr.inputs.Image(label="Input Image"), - gr.inputs.Checkbox(label="Grayscale (Check this if your image is grayscale)"), - gr.inputs.Slider(minimum=1, maximum=100, step=1, label="Intensity (Higher = stronger JPEG artifact removal)"), - gr.inputs.Checkbox(default=False, label="Edit Zoom preview (This is optional. " - "After the image result is loaded, check this to edit zoom parameters " - "so that the input image will not be processed when the submit button is pressed.)"), - gr.inputs.Slider(minimum=10, maximum=100, step=1, default=50, label="Zoom Image " - "(Use this to see the image quality up close. " - "100 = original size)"), - gr.inputs.Slider(minimum=0, maximum=100, step=1, label="Zoom preview horizontal shift " - "(Increase to shift to the right)"), - gr.inputs.Slider(minimum=0, maximum=100, step=1, label="Zoom preview vertical shift " - "(Increase to shift downwards)"), - gr.inputs.State(default=[None,None], label="\t") - ], - outputs = [gr.outputs.Image(label="Result"), - gr.outputs.Image(label="Before:"), - gr.outputs.Image(label="After:"), - "state"], - examples = [["doraemon.jpg",False,60,False,42,50,50], - ["tomandjerry.jpg",False,60,False,40,57,44], - ["somepanda.jpg",True,100,False,30,8,24], - ["cemetry.jpg",False,70,False,20,76,62], - ["michelangelo_david.jpg",True,30,False,12,53,27], - ["elon_musk.jpg",False,45,False,15,33,30], - ["text.jpg",True,70,False,50,11,29]], - title = "JPEG Artifacts Removal [FBCNN]", - description = "Gradio Demo for JPEG Artifacts Removal. To use it, simply upload your image, " - "or click one of the examples to load them. Check out the paper and the original GitHub repo at the link below. " - "JPEG artifacts are noticeable distortion of images caused by JPEG lossy compression. " - "This is not a super resolution AI but a JPEG compression artifact remover.", - article = "

FBCNN GitHub Repo
" - "Towards Flexible Blind JPEG Artifacts Removal (FBCNN, ICCV 2021)

", - allow_flagging="never" -).launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/dawdqd/ChuanhuChatGPT/web_assets/stylesheet/ChuanhuChat.css b/spaces/dawdqd/ChuanhuChatGPT/web_assets/stylesheet/ChuanhuChat.css deleted file mode 100644 index 62d41dbd061d200ba5a6841b318aea22950d1791..0000000000000000000000000000000000000000 --- a/spaces/dawdqd/ChuanhuChatGPT/web_assets/stylesheet/ChuanhuChat.css +++ /dev/null @@ -1,112 +0,0 @@ -:root { - --chatbot-color-light: #000000; - --chatbot-color-dark: #FFFFFF; - --chatbot-background-color-light: #F3F3F3; - --chatbot-background-color-dark: #121111; - --message-user-background-color-light: #95EC69; - --message-user-background-color-dark: #26B561; - --message-bot-background-color-light: #FFFFFF; - --message-bot-background-color-dark: #2C2C2C; - --switch-checkbox-color-light: #e5e7eb; - --switch-checkbox-color-dark: #515151; -} - -.hideK { - display: none; -} - -#app-title { - font-weight: var(--prose-header-text-weight); - font-size: var(--text-xxl); - line-height: 1.3; - text-align: left; - margin-top: 6px; - white-space: nowrap; -} -#description { - text-align: center; - margin: 32px 0 4px 0; -} - -/* 高级页面 */ -#advanced-warning { - display: flex; - flex-wrap: wrap; - flex-direction: column; - align-content: center; -} - -#netsetting-warning hr { - margin-bottom: 1em; -} - -.view-only-textbox textarea { - -webkit-text-fill-color: darkgray !important; - cursor: not-allowed !important; -} - -#footer { - text-align: center; -} -#footer div { - display: inline-block; -} -#footer .versions{ - font-size: 85%; - opacity: 0.60; -} - - -#float-display { - position: absolute; - max-height: 30px; -} - -.insert-block { - position: relative; - margin: 0; - padding: 8px 12px; - box-shadow: var(--block-shadow); - border-width: var(--block-border-width); - border-color: var(--block-border-color); - border-radius: var(--block-radius); - background: var(--block-background-fill); - width: 100%; - line-height: var(--line-sm); - min-height: 2em; -} - -/* status-display */ -#status-display { - display: flex; - min-height: 2em; - align-items: flex-end; - justify-content: flex-end; - transition: all 0.6s; -} -#status-display p { - font-size: .85em; - font-family: ui-monospace, "SF Mono", "SFMono-Regular", "Menlo", "Consolas", "Liberation Mono", "Microsoft Yahei UI", "Microsoft Yahei", monospace; - /* Windows下中文的monospace会fallback为新宋体,实在太丑,这里折中使用微软雅黑 */ - color: var(--body-text-color-subdued); -} - - -#submit-btn, #cancel-btn { - height: 40px !important; -} -#submit-btn::before { - content: url("data:image/svg+xml, %3Csvg width='21px' height='20px' viewBox='0 0 21 20' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='page' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cg id='send' transform='translate(0.435849, 0.088463)' fill='%23FFFFFF' fill-rule='nonzero'%3E %3Cpath d='M0.579148261,0.0428666046 C0.301105539,-0.0961547561 -0.036517765,0.122307382 0.0032026237,0.420210298 L1.4927172,18.1553639 C1.5125774,18.4334066 1.79062012,18.5922882 2.04880264,18.4929872 L8.24518329,15.8913017 L11.6412765,19.7441794 C11.8597387,19.9825018 12.2370824,19.8832008 12.3165231,19.5852979 L13.9450591,13.4882182 L19.7839562,11.0255541 C20.0619989,10.8865327 20.0818591,10.4694687 19.7839562,10.3105871 L0.579148261,0.0428666046 Z M11.6138902,17.0883151 L9.85385903,14.7195502 L0.718169621,0.618812241 L12.69945,12.9346347 L11.6138902,17.0883151 Z' id='shape'%3E%3C/path%3E %3C/g%3E %3C/g%3E %3C/svg%3E"); - height: 21px; -} -#cancel-btn::before { - content: url("data:image/svg+xml,%3Csvg width='21px' height='21px' viewBox='0 0 21 21' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='pg' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cpath d='M10.2072007,20.088463 C11.5727865,20.088463 12.8594566,19.8259823 14.067211,19.3010209 C15.2749653,18.7760595 16.3386126,18.0538087 17.2581528,17.1342685 C18.177693,16.2147282 18.8982283,15.1527965 19.4197586,13.9484733 C19.9412889,12.7441501 20.202054,11.4557644 20.202054,10.0833163 C20.202054,8.71773046 19.9395733,7.43106036 19.4146119,6.22330603 C18.8896505,5.01555169 18.1673997,3.95018885 17.2478595,3.0272175 C16.3283192,2.10424615 15.2646719,1.3837109 14.0569176,0.865611739 C12.8491633,0.34751258 11.5624932,0.088463 10.1969073,0.088463 C8.83132146,0.088463 7.54636692,0.34751258 6.34204371,0.865611739 C5.1377205,1.3837109 4.07407321,2.10424615 3.15110186,3.0272175 C2.22813051,3.95018885 1.5058797,5.01555169 0.984349419,6.22330603 C0.46281914,7.43106036 0.202054,8.71773046 0.202054,10.0833163 C0.202054,11.4557644 0.4645347,12.7441501 0.9894961,13.9484733 C1.5144575,15.1527965 2.23670831,16.2147282 3.15624854,17.1342685 C4.07578877,18.0538087 5.1377205,18.7760595 6.34204371,19.3010209 C7.54636692,19.8259823 8.83475258,20.088463 10.2072007,20.088463 Z M10.2072007,18.2562448 C9.07493099,18.2562448 8.01471483,18.0452309 7.0265522,17.6232031 C6.03838956,17.2011753 5.17031614,16.6161693 4.42233192,15.8681851 C3.6743477,15.1202009 3.09105726,14.2521274 2.67246059,13.2639648 C2.25386392,12.2758022 2.04456558,11.215586 2.04456558,10.0833163 C2.04456558,8.95104663 2.25386392,7.89083047 2.67246059,6.90266784 C3.09105726,5.9145052 3.6743477,5.04643178 4.42233192,4.29844756 C5.17031614,3.55046334 6.036674,2.9671729 7.02140552,2.54857623 C8.00613703,2.12997956 9.06463763,1.92068122 10.1969073,1.92068122 C11.329177,1.92068122 12.3911087,2.12997956 13.3827025,2.54857623 C14.3742962,2.9671729 15.2440852,3.55046334 15.9920694,4.29844756 C16.7400537,5.04643178 17.3233441,5.9145052 17.7419408,6.90266784 C18.1605374,7.89083047 18.3698358,8.95104663 18.3698358,10.0833163 C18.3698358,11.215586 18.1605374,12.2758022 17.7419408,13.2639648 C17.3233441,14.2521274 16.7400537,15.1202009 15.9920694,15.8681851 C15.2440852,16.6161693 14.3760118,17.2011753 13.3878492,17.6232031 C12.3996865,18.0452309 11.3394704,18.2562448 10.2072007,18.2562448 Z M7.65444721,13.6242324 L12.7496608,13.6242324 C13.0584616,13.6242324 13.3003556,13.5384544 13.4753427,13.3668984 C13.6503299,13.1953424 13.7378234,12.9585951 13.7378234,12.6566565 L13.7378234,7.49968276 C13.7378234,7.19774418 13.6503299,6.96099688 13.4753427,6.78944087 C13.3003556,6.61788486 13.0584616,6.53210685 12.7496608,6.53210685 L7.65444721,6.53210685 C7.33878414,6.53210685 7.09345904,6.61788486 6.91847191,6.78944087 C6.74348478,6.96099688 6.65599121,7.19774418 6.65599121,7.49968276 L6.65599121,12.6566565 C6.65599121,12.9585951 6.74348478,13.1953424 6.91847191,13.3668984 C7.09345904,13.5384544 7.33878414,13.6242324 7.65444721,13.6242324 Z' id='shape' fill='%23FF3B30' fill-rule='nonzero'%3E%3C/path%3E %3C/g%3E %3C/svg%3E"); - height: 21px; -} - -#chatbot-buttons button { - display: inline-block; - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; -} \ No newline at end of file diff --git a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/models/arcface_torch/eval_ijbc.py b/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/models/arcface_torch/eval_ijbc.py deleted file mode 100644 index 9c5a650d486d18eb02d6f60d448fc3b315261f5d..0000000000000000000000000000000000000000 --- a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/models/arcface_torch/eval_ijbc.py +++ /dev/null @@ -1,483 +0,0 @@ -# coding: utf-8 - -import os -import pickle - -import matplotlib -import pandas as pd - -matplotlib.use('Agg') -import matplotlib.pyplot as plt -import timeit -import sklearn -import argparse -import cv2 -import numpy as np -import torch -from skimage import transform as trans -from backbones import get_model -from sklearn.metrics import roc_curve, auc - -from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap -from prettytable import PrettyTable -from pathlib import Path - -import sys -import warnings - -sys.path.insert(0, "../") -warnings.filterwarnings("ignore") - -parser = argparse.ArgumentParser(description='do ijb test') -# general -parser.add_argument('--model-prefix', default='', help='path to load model.') -parser.add_argument('--image-path', default='', type=str, help='') -parser.add_argument('--result-dir', default='.', type=str, help='') -parser.add_argument('--batch-size', default=128, type=int, help='') -parser.add_argument('--network', default='iresnet50', type=str, help='') -parser.add_argument('--job', default='insightface', type=str, help='job name') -parser.add_argument('--target', default='IJBC', type=str, help='target, set to IJBC or IJBB') -args = parser.parse_args() - -target = args.target -model_path = args.model_prefix -image_path = args.image_path -result_dir = args.result_dir -gpu_id = None -use_norm_score = True # if Ture, TestMode(N1) -use_detector_score = True # if Ture, TestMode(D1) -use_flip_test = True # if Ture, TestMode(F1) -job = args.job -batch_size = args.batch_size - - -class Embedding(object): - def __init__(self, prefix, data_shape, batch_size=1): - image_size = (112, 112) - self.image_size = image_size - weight = torch.load(prefix) - resnet = get_model(args.network, dropout=0, fp16=False).cuda() - resnet.load_state_dict(weight) - model = torch.nn.DataParallel(resnet) - self.model = model - self.model.eval() - src = np.array([ - [30.2946, 51.6963], - [65.5318, 51.5014], - [48.0252, 71.7366], - [33.5493, 92.3655], - [62.7299, 92.2041]], dtype=np.float32) - src[:, 0] += 8.0 - self.src = src - self.batch_size = batch_size - self.data_shape = data_shape - - def get(self, rimg, landmark): - - assert landmark.shape[0] == 68 or landmark.shape[0] == 5 - assert landmark.shape[1] == 2 - if landmark.shape[0] == 68: - landmark5 = np.zeros((5, 2), dtype=np.float32) - landmark5[0] = (landmark[36] + landmark[39]) / 2 - landmark5[1] = (landmark[42] + landmark[45]) / 2 - landmark5[2] = landmark[30] - landmark5[3] = landmark[48] - landmark5[4] = landmark[54] - else: - landmark5 = landmark - tform = trans.SimilarityTransform() - tform.estimate(landmark5, self.src) - M = tform.params[0:2, :] - img = cv2.warpAffine(rimg, - M, (self.image_size[1], self.image_size[0]), - borderValue=0.0) - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img_flip = np.fliplr(img) - img = np.transpose(img, (2, 0, 1)) # 3*112*112, RGB - img_flip = np.transpose(img_flip, (2, 0, 1)) - input_blob = np.zeros((2, 3, self.image_size[1], self.image_size[0]), dtype=np.uint8) - input_blob[0] = img - input_blob[1] = img_flip - return input_blob - - @torch.no_grad() - def forward_db(self, batch_data): - imgs = torch.Tensor(batch_data).cuda() - imgs.div_(255).sub_(0.5).div_(0.5) - feat = self.model(imgs) - feat = feat.reshape([self.batch_size, 2 * feat.shape[1]]) - return feat.cpu().numpy() - - -# 将一个list尽量均分成n份,限制len(list)==n,份数大于原list内元素个数则分配空list[] -def divideIntoNstrand(listTemp, n): - twoList = [[] for i in range(n)] - for i, e in enumerate(listTemp): - twoList[i % n].append(e) - return twoList - - -def read_template_media_list(path): - # ijb_meta = np.loadtxt(path, dtype=str) - ijb_meta = pd.read_csv(path, sep=' ', header=None).values - templates = ijb_meta[:, 1].astype(np.int) - medias = ijb_meta[:, 2].astype(np.int) - return templates, medias - - -# In[ ]: - - -def read_template_pair_list(path): - # pairs = np.loadtxt(path, dtype=str) - pairs = pd.read_csv(path, sep=' ', header=None).values - # print(pairs.shape) - # print(pairs[:, 0].astype(np.int)) - t1 = pairs[:, 0].astype(np.int) - t2 = pairs[:, 1].astype(np.int) - label = pairs[:, 2].astype(np.int) - return t1, t2, label - - -# In[ ]: - - -def read_image_feature(path): - with open(path, 'rb') as fid: - img_feats = pickle.load(fid) - return img_feats - - -# In[ ]: - - -def get_image_feature(img_path, files_list, model_path, epoch, gpu_id): - batch_size = args.batch_size - data_shape = (3, 112, 112) - - files = files_list - print('files:', len(files)) - rare_size = len(files) % batch_size - faceness_scores = [] - batch = 0 - img_feats = np.empty((len(files), 1024), dtype=np.float32) - - batch_data = np.empty((2 * batch_size, 3, 112, 112)) - embedding = Embedding(model_path, data_shape, batch_size) - for img_index, each_line in enumerate(files[:len(files) - rare_size]): - name_lmk_score = each_line.strip().split(' ') - img_name = os.path.join(img_path, name_lmk_score[0]) - img = cv2.imread(img_name) - lmk = np.array([float(x) for x in name_lmk_score[1:-1]], - dtype=np.float32) - lmk = lmk.reshape((5, 2)) - input_blob = embedding.get(img, lmk) - - batch_data[2 * (img_index - batch * batch_size)][:] = input_blob[0] - batch_data[2 * (img_index - batch * batch_size) + 1][:] = input_blob[1] - if (img_index + 1) % batch_size == 0: - print('batch', batch) - img_feats[batch * batch_size:batch * batch_size + - batch_size][:] = embedding.forward_db(batch_data) - batch += 1 - faceness_scores.append(name_lmk_score[-1]) - - batch_data = np.empty((2 * rare_size, 3, 112, 112)) - embedding = Embedding(model_path, data_shape, rare_size) - for img_index, each_line in enumerate(files[len(files) - rare_size:]): - name_lmk_score = each_line.strip().split(' ') - img_name = os.path.join(img_path, name_lmk_score[0]) - img = cv2.imread(img_name) - lmk = np.array([float(x) for x in name_lmk_score[1:-1]], - dtype=np.float32) - lmk = lmk.reshape((5, 2)) - input_blob = embedding.get(img, lmk) - batch_data[2 * img_index][:] = input_blob[0] - batch_data[2 * img_index + 1][:] = input_blob[1] - if (img_index + 1) % rare_size == 0: - print('batch', batch) - img_feats[len(files) - - rare_size:][:] = embedding.forward_db(batch_data) - batch += 1 - faceness_scores.append(name_lmk_score[-1]) - faceness_scores = np.array(faceness_scores).astype(np.float32) - # img_feats = np.ones( (len(files), 1024), dtype=np.float32) * 0.01 - # faceness_scores = np.ones( (len(files), ), dtype=np.float32 ) - return img_feats, faceness_scores - - -# In[ ]: - - -def image2template_feature(img_feats=None, templates=None, medias=None): - # ========================================================== - # 1. face image feature l2 normalization. img_feats:[number_image x feats_dim] - # 2. compute media feature. - # 3. compute template feature. - # ========================================================== - unique_templates = np.unique(templates) - template_feats = np.zeros((len(unique_templates), img_feats.shape[1])) - - for count_template, uqt in enumerate(unique_templates): - - (ind_t,) = np.where(templates == uqt) - face_norm_feats = img_feats[ind_t] - face_medias = medias[ind_t] - unique_medias, unique_media_counts = np.unique(face_medias, - return_counts=True) - media_norm_feats = [] - for u, ct in zip(unique_medias, unique_media_counts): - (ind_m,) = np.where(face_medias == u) - if ct == 1: - media_norm_feats += [face_norm_feats[ind_m]] - else: # image features from the same video will be aggregated into one feature - media_norm_feats += [ - np.mean(face_norm_feats[ind_m], axis=0, keepdims=True) - ] - media_norm_feats = np.array(media_norm_feats) - # media_norm_feats = media_norm_feats / np.sqrt(np.sum(media_norm_feats ** 2, -1, keepdims=True)) - template_feats[count_template] = np.sum(media_norm_feats, axis=0) - if count_template % 2000 == 0: - print('Finish Calculating {} template features.'.format( - count_template)) - # template_norm_feats = template_feats / np.sqrt(np.sum(template_feats ** 2, -1, keepdims=True)) - template_norm_feats = sklearn.preprocessing.normalize(template_feats) - # print(template_norm_feats.shape) - return template_norm_feats, unique_templates - - -# In[ ]: - - -def verification(template_norm_feats=None, - unique_templates=None, - p1=None, - p2=None): - # ========================================================== - # Compute set-to-set Similarity Score. - # ========================================================== - template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int) - for count_template, uqt in enumerate(unique_templates): - template2id[uqt] = count_template - - score = np.zeros((len(p1),)) # save cosine distance between pairs - - total_pairs = np.array(range(len(p1))) - batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation - sublists = [ - total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize) - ] - total_sublists = len(sublists) - for c, s in enumerate(sublists): - feat1 = template_norm_feats[template2id[p1[s]]] - feat2 = template_norm_feats[template2id[p2[s]]] - similarity_score = np.sum(feat1 * feat2, -1) - score[s] = similarity_score.flatten() - if c % 10 == 0: - print('Finish {}/{} pairs.'.format(c, total_sublists)) - return score - - -# In[ ]: -def verification2(template_norm_feats=None, - unique_templates=None, - p1=None, - p2=None): - template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int) - for count_template, uqt in enumerate(unique_templates): - template2id[uqt] = count_template - score = np.zeros((len(p1),)) # save cosine distance between pairs - total_pairs = np.array(range(len(p1))) - batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation - sublists = [ - total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize) - ] - total_sublists = len(sublists) - for c, s in enumerate(sublists): - feat1 = template_norm_feats[template2id[p1[s]]] - feat2 = template_norm_feats[template2id[p2[s]]] - similarity_score = np.sum(feat1 * feat2, -1) - score[s] = similarity_score.flatten() - if c % 10 == 0: - print('Finish {}/{} pairs.'.format(c, total_sublists)) - return score - - -def read_score(path): - with open(path, 'rb') as fid: - img_feats = pickle.load(fid) - return img_feats - - -# # Step1: Load Meta Data - -# In[ ]: - -assert target == 'IJBC' or target == 'IJBB' - -# ============================================================= -# load image and template relationships for template feature embedding -# tid --> template id, mid --> media id -# format: -# image_name tid mid -# ============================================================= -start = timeit.default_timer() -templates, medias = read_template_media_list( - os.path.join('%s/meta' % image_path, - '%s_face_tid_mid.txt' % target.lower())) -stop = timeit.default_timer() -print('Time: %.2f s. ' % (stop - start)) - -# In[ ]: - -# ============================================================= -# load template pairs for template-to-template verification -# tid : template id, label : 1/0 -# format: -# tid_1 tid_2 label -# ============================================================= -start = timeit.default_timer() -p1, p2, label = read_template_pair_list( - os.path.join('%s/meta' % image_path, - '%s_template_pair_label.txt' % target.lower())) -stop = timeit.default_timer() -print('Time: %.2f s. ' % (stop - start)) - -# # Step 2: Get Image Features - -# In[ ]: - -# ============================================================= -# load image features -# format: -# img_feats: [image_num x feats_dim] (227630, 512) -# ============================================================= -start = timeit.default_timer() -img_path = '%s/loose_crop' % image_path -img_list_path = '%s/meta/%s_name_5pts_score.txt' % (image_path, target.lower()) -img_list = open(img_list_path) -files = img_list.readlines() -# files_list = divideIntoNstrand(files, rank_size) -files_list = files - -# img_feats -# for i in range(rank_size): -img_feats, faceness_scores = get_image_feature(img_path, files_list, - model_path, 0, gpu_id) -stop = timeit.default_timer() -print('Time: %.2f s. ' % (stop - start)) -print('Feature Shape: ({} , {}) .'.format(img_feats.shape[0], - img_feats.shape[1])) - -# # Step3: Get Template Features - -# In[ ]: - -# ============================================================= -# compute template features from image features. -# ============================================================= -start = timeit.default_timer() -# ========================================================== -# Norm feature before aggregation into template feature? -# Feature norm from embedding network and faceness score are able to decrease weights for noise samples (not face). -# ========================================================== -# 1. FaceScore (Feature Norm) -# 2. FaceScore (Detector) - -if use_flip_test: - # concat --- F1 - # img_input_feats = img_feats - # add --- F2 - img_input_feats = img_feats[:, 0:img_feats.shape[1] // - 2] + img_feats[:, img_feats.shape[1] // 2:] -else: - img_input_feats = img_feats[:, 0:img_feats.shape[1] // 2] - -if use_norm_score: - img_input_feats = img_input_feats -else: - # normalise features to remove norm information - img_input_feats = img_input_feats / np.sqrt( - np.sum(img_input_feats ** 2, -1, keepdims=True)) - -if use_detector_score: - print(img_input_feats.shape, faceness_scores.shape) - img_input_feats = img_input_feats * faceness_scores[:, np.newaxis] -else: - img_input_feats = img_input_feats - -template_norm_feats, unique_templates = image2template_feature( - img_input_feats, templates, medias) -stop = timeit.default_timer() -print('Time: %.2f s. ' % (stop - start)) - -# # Step 4: Get Template Similarity Scores - -# In[ ]: - -# ============================================================= -# compute verification scores between template pairs. -# ============================================================= -start = timeit.default_timer() -score = verification(template_norm_feats, unique_templates, p1, p2) -stop = timeit.default_timer() -print('Time: %.2f s. ' % (stop - start)) - -# In[ ]: -save_path = os.path.join(result_dir, args.job) -# save_path = result_dir + '/%s_result' % target - -if not os.path.exists(save_path): - os.makedirs(save_path) - -score_save_file = os.path.join(save_path, "%s.npy" % target.lower()) -np.save(score_save_file, score) - -# # Step 5: Get ROC Curves and TPR@FPR Table - -# In[ ]: - -files = [score_save_file] -methods = [] -scores = [] -for file in files: - methods.append(Path(file).stem) - scores.append(np.load(file)) - -methods = np.array(methods) -scores = dict(zip(methods, scores)) -colours = dict( - zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2'))) -x_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1] -tpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels]) -fig = plt.figure() -for method in methods: - fpr, tpr, _ = roc_curve(label, scores[method]) - roc_auc = auc(fpr, tpr) - fpr = np.flipud(fpr) - tpr = np.flipud(tpr) # select largest tpr at same fpr - plt.plot(fpr, - tpr, - color=colours[method], - lw=1, - label=('[%s (AUC = %0.4f %%)]' % - (method.split('-')[-1], roc_auc * 100))) - tpr_fpr_row = [] - tpr_fpr_row.append("%s-%s" % (method, target)) - for fpr_iter in np.arange(len(x_labels)): - _, min_index = min( - list(zip(abs(fpr - x_labels[fpr_iter]), range(len(fpr))))) - tpr_fpr_row.append('%.2f' % (tpr[min_index] * 100)) - tpr_fpr_table.add_row(tpr_fpr_row) -plt.xlim([10 ** -6, 0.1]) -plt.ylim([0.3, 1.0]) -plt.grid(linestyle='--', linewidth=1) -plt.xticks(x_labels) -plt.yticks(np.linspace(0.3, 1.0, 8, endpoint=True)) -plt.xscale('log') -plt.xlabel('False Positive Rate') -plt.ylabel('True Positive Rate') -plt.title('ROC on IJB') -plt.legend(loc="lower right") -fig.savefig(os.path.join(save_path, '%s.pdf' % target.lower())) -print(tpr_fpr_table) diff --git a/spaces/deepwisdom/MetaGPT/metagpt/roles/__init__.py b/spaces/deepwisdom/MetaGPT/metagpt/roles/__init__.py deleted file mode 100644 index 1768b786c0755299ef9167ea38158a9231b8e814..0000000000000000000000000000000000000000 --- a/spaces/deepwisdom/MetaGPT/metagpt/roles/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/11 14:43 -@Author : alexanderwu -@File : __init__.py -""" - -from metagpt.roles.role import Role -from metagpt.roles.architect import Architect -from metagpt.roles.project_manager import ProjectManager -from metagpt.roles.product_manager import ProductManager -from metagpt.roles.engineer import Engineer -from metagpt.roles.qa_engineer import QaEngineer -from metagpt.roles.seacher import Searcher -from metagpt.roles.sales import Sales -from metagpt.roles.customer_service import CustomerService - - -__all__ = [ - "Role", - "Architect", - "ProjectManager", - "ProductManager", - "Engineer", - "QaEngineer", - "Searcher", - "Sales", - "CustomerService", -] diff --git a/spaces/diacanFperku/AutoGPT/Jd Lee Sudarshan Guha Pdf Free Download NEW!.md b/spaces/diacanFperku/AutoGPT/Jd Lee Sudarshan Guha Pdf Free Download NEW!.md deleted file mode 100644 index 2d8b5158935b3d1887bd371fe73b53683b753800..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Jd Lee Sudarshan Guha Pdf Free Download NEW!.md +++ /dev/null @@ -1,79 +0,0 @@ -
-

Jd Lee Sudarshan Guha Pdf Free Download: A Review

-

If you are looking for a comprehensive and concise book on inorganic chemistry for your IIT JEE preparation, you might want to check out the Jd Lee Sudarshan Guha Pdf Free Download. This book is an adaptation of the classic Jd Lee Concise Inorganic Chemistry (fifth edition), which is widely used by students preparing for JEE. The book has been adapted by Sudarshan Guha, a renowned author and teacher of chemistry, to suit the needs and requirements of the JEE syllabus.

-

In this article, we will give you an overview of the book, its contents, features, and benefits. We will also provide you with some links to download the Jd Lee Sudarshan Guha Pdf Free Download from reliable sources.

-

Jd Lee Sudarshan Guha Pdf Free Download


Downloadhttps://gohhs.com/2uFTIy



-

What is Jd Lee Sudarshan Guha Pdf Free Download?

-

Jd Lee Sudarshan Guha Pdf Free Download is a book that provides a concise and relevant treatment of inorganic chemistry as per JEE syllabus requirements. The book covers all the topics of inorganic chemistry that are essential for the JEE exam, such as atomic structure, periodic table, chemical bonding, coordination compounds, hydrolysis, metallurgy, qualitative salt analysis, hydrogen and its hydrides, and compounds and properties of s-, p-, d-, and f-block elements.

-

The book has been reorganized and updated to provide a more structured and logical approach as per exam requirements. The book also includes useful appendices with data on the abundance of elements, their physical properties, electronic structure, bond energies, solubilities, atomic weight, and electrical resistivity.

-

What are the features and benefits of Jd Lee Sudarshan Guha Pdf Free Download?

-

Some of the features and benefits of Jd Lee Sudarshan Guha Pdf Free Download are:

-
    -
  • The book is concise and easy to read and understand. It is based on descriptive chemistry combined with some of the reasons why.
  • -
  • The book provides a detailed background of the subject, helping students approach the examination confidently.
  • -
  • The book has chapter openers with an opening description related to the topics, and contents of the chapter listed for an overview.
  • -
  • The book has assessment questions as per JEE examination comprising all question types – multiple choice question, multiple select question, comprehension type, assertion-reasoning type, numerical type, and matrix-match type.
  • -
  • The book has further reading sections with easy-to-understand articles, references to specialized textbooks, and review articles.
  • -
  • The book has clear diagrams, tables, charts, and graphs to illustrate the concepts and facts.
  • -
  • The book has been adapted by an experienced author and teacher who has a thorough knowledge of the JEE syllabus and exam pattern.
  • -
-

How to download Jd Lee Sudarshan Guha Pdf Free Download?

-

If you want to download Jd Lee Sudarshan Guha Pdf Free Download for your IIT JEE preparation, you can use the following links from reliable sources:

- -

These links will provide you with the latest edition of Jd Lee Sudarshan Guha Pdf Free Download (4th edition) adapted by Sudarshan Guha and published by Wiley. You can download the pdf file for free and use it for your study purpose.

-

Conclusion

-

Jd Lee Sudarshan Guha Pdf Free Download is a great book for IIT JEE aspirants who want to master inorganic chemistry in a concise and relevant manner. The book covers all the topics of inorganic chemistry as per JEE syllabus requirements and provides a detailed background of the subject. The book also has useful features such as chapter openers, assessment questions, further reading sections, diagrams, tables, charts, graphs, appendices, etc. The book has been adapted by an experienced author and teacher who knows the JEE syllabus and exam pattern well. You can download the book from reliable sources for free and use it for your preparation.

-

Who is Sudarshan Guha and why did he adapt Jd Lee Inorganic Chemistry?

-

Sudarshan Guha is a well-known author and teacher of chemistry who has written several books for JEE preparation. He has also been associated with various coaching institutes and has helped thousands of students achieve their dream of cracking the JEE exam. He has a vast experience and knowledge of the JEE syllabus and exam pattern, and knows the common difficulties and doubts faced by the students.

-

He decided to adapt Jd Lee Inorganic Chemistry because he felt that the original book was too lengthy and detailed for the JEE exam. He wanted to provide a concise and relevant version of the book that would cover all the essential topics of inorganic chemistry as per JEE syllabus requirements. He also wanted to make the book more interesting and engaging for the students by adding chapter openers, assessment questions, further reading sections, diagrams, tables, charts, graphs, appendices, etc. He also updated and revised some of the content to make it more accurate and current.

-

-

What are the advantages of downloading Jd Lee Sudarshan Guha Pdf Free Download?

-

There are many advantages of downloading Jd Lee Sudarshan Guha Pdf Free Download for your IIT JEE preparation. Some of them are:

-
    -
  • You can save money by not buying the hard copy of the book, which can be expensive.
  • -
  • You can access the book anytime and anywhere on your laptop, tablet, smartphone, or any other device that supports pdf files.
  • -
  • You can easily search, highlight, bookmark, annotate, and print any part of the book as per your convenience.
  • -
  • You can share the book with your friends and classmates who are also preparing for the JEE exam.
  • -
  • You can get the latest edition of the book with all the updates and revisions done by Sudarshan Guha.
  • -
-

How to use Jd Lee Sudarshan Guha Pdf Free Download effectively for your IIT JEE preparation?

-

Jd Lee Sudarshan Guha Pdf Free Download is a great resource for your IIT JEE preparation, but you need to use it effectively to get the best results. Here are some tips on how to use the book efficiently:

-
    -
  • Read the book thoroughly and understand the concepts and facts of inorganic chemistry. Do not skip any topic or chapter as they are all interrelated and important for the exam.
  • -
  • Solve the assessment questions given at the end of each chapter to test your knowledge and understanding. Try to solve them without looking at the solutions or hints.
  • -
  • Refer to the further reading sections for more information and insights on the topics. You can also consult other books and online sources for additional reference.
  • -
  • Revise the book regularly and make notes of the important points, formulas, reactions, etc. You can also use flashcards or mnemonics to memorize them easily.
  • -
  • Practice as many mock tests and previous year papers as possible to improve your speed, accuracy, and confidence. Analyze your performance and work on your weak areas.
  • -
-

What are the topics covered in Jd Lee Sudarshan Guha Pdf Free Download?

-

Jd Lee Sudarshan Guha Pdf Free Download covers all the topics of inorganic chemistry that are essential for the JEE exam. The book has 12 chapters that are divided into six parts. The topics covered in each part are:

-
    -
  • Part One: Theoretical Concepts and Hydrogen - This part covers the basic concepts of atomic structure, periodic table, chemical bonding, coordination compounds, hydrogen and its hydrides.
  • -
  • Part Two: The s-Block Elements - This part covers the properties and compounds of the alkali metals and the alkaline earth metals, as well as the chlor-alkali industry.
  • -
  • Part Three: The p-Block Elements - This part covers the properties and compounds of the groups 13 to 18 elements, including boron, carbon, nitrogen, oxygen, sulfur, halogens, and noble gases.
  • -
  • Part Four: The d-Block Elements - This part covers the properties and compounds of the transition elements from groups 3 to 12, including scandium, titanium, vanadium, chromium, manganese, iron, cobalt, nickel, copper, zinc, and their alloys.
  • -
  • Part Five: The f-Block Elements - This part covers the properties and compounds of the lanthanide and actinide series of elements.
  • -
  • Part Six: Other Topics - This part covers some other topics of inorganic chemistry such as atomic nucleus, spectra, hydrolysis, metallurgy, and qualitative salt analysis.
  • -
-

How to study Jd Lee Sudarshan Guha Pdf Free Download effectively for your IIT JEE preparation?

-

Jd Lee Sudarshan Guha Pdf Free Download is a great resource for your IIT JEE preparation, but you need to study it effectively to get the best results. Here are some tips on how to study the book efficiently:

-
    -
  • Make a study plan and follow it strictly. Allocate sufficient time for each topic and chapter according to your syllabus and difficulty level.
  • -
  • Read the chapter openers carefully to get an overview of the topics and contents of the chapter. Note down the key points and terms that you need to remember.
  • -
  • Read the text thoroughly and understand the concepts and facts of inorganic chemistry. Do not skip any topic or chapter as they are all interrelated and important for the exam.
  • -
  • Use the diagrams, tables, charts, graphs, and appendices to supplement your understanding and visualization of the concepts and facts.
  • -
  • Solve the assessment questions given at the end of each chapter to test your knowledge and understanding. Try to solve them without looking at the solutions or hints.
  • -
  • Refer to the further reading sections for more information and insights on the topics. You can also consult other books and online sources for additional reference.
  • -
  • Revise the book regularly and make notes of the important points, formulas, reactions, etc. You can also use flashcards or mnemonics to memorize them easily.
  • -
  • Practice as many mock tests and previous year papers as possible to improve your speed, accuracy, and confidence. Analyze your performance and work on your weak areas.
  • -
-

Conclusion

-

Jd Lee Sudarshan Guha Pdf Free Download is a great book for IIT JEE aspirants who want to master inorganic chemistry in a concise and relevant manner. The book covers all the topics of inorganic chemistry as per JEE syllabus requirements and provides a detailed background of the subject. The book also has useful features such as chapter openers, assessment questions, further reading sections, diagrams, tables, charts, graphs, appendices, etc. The book has been adapted by an experienced author and teacher who knows the JEE syllabus and exam pattern well. You can download the book from reliable sources for free and use it for your preparation.

-

Conclusion

-

Jd Lee Sudarshan Guha Pdf Free Download is a great book for IIT JEE aspirants who want to master inorganic chemistry in a concise and relevant manner. The book covers all the topics of inorganic chemistry as per JEE syllabus requirements and provides a detailed background of the subject. The book also has useful features such as chapter openers, assessment questions, further reading sections, diagrams, tables, charts, graphs, appendices, etc. The book has been adapted by an experienced author and teacher who knows the JEE syllabus and exam pattern well. You can download the book from reliable sources for free and use it for your preparation.

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/digitalxingtong/Eileen-Bert-Vits2/utils.py b/spaces/digitalxingtong/Eileen-Bert-Vits2/utils.py deleted file mode 100644 index c6aa6cfc64c33e2eed33e9845239e831fc1c4a1a..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Eileen-Bert-Vits2/utils.py +++ /dev/null @@ -1,293 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - elif optimizer is None and not skip_optimizer: - #else: #Disable this line if Infer ,and enable the line upper - new_opt_dict = optimizer.state_dict() - new_opt_dict_params = new_opt_dict['param_groups'][0]['params'] - new_opt_dict['param_groups'] = checkpoint_dict['optimizer']['param_groups'] - new_opt_dict['param_groups'][0]['params'] = new_opt_dict_params - optimizer.load_state_dict(new_opt_dict) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - #assert "emb_g" not in k - # print("load", k) - new_state_dict[k] = saved_state_dict[k] - assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape) - except: - print("error, %s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict, strict=False) - else: - model.load_state_dict(new_state_dict, strict=False) - print("load ") - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict(), - 'learning_rate': learning_rate}, checkpoint_path) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, default="./OUTPUT_MODEL", - help='Model name') - parser.add_argument('--cont', dest='cont', action="store_true", default=False, help="whether to continue training on the latest checkpoint") - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - hparams.cont = args.cont - return hparams - - -def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True): - """Freeing up space by deleting saved ckpts - - Arguments: - path_to_models -- Path to the model directory - n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth - sort_by_time -- True -> chronologically delete ckpts - False -> lexicographically delete ckpts - """ - import re - ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))] - name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1))) - time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f))) - sort_key = time_key if sort_by_time else name_key - x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')], - key=sort_key) - to_del = [os.path.join(path_to_models, fn) for fn in - (x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])] - del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}") - del_routine = lambda x: [os.remove(x), del_info(x)] - rs = [del_routine(fn) for fn in to_del] - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/digitalxingtong/Miiu-Bert-Vits2/text/tone_sandhi.py b/spaces/digitalxingtong/Miiu-Bert-Vits2/text/tone_sandhi.py deleted file mode 100644 index 0f45b7a72c5d858bcaab19ac85cfa686bf9a74da..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Miiu-Bert-Vits2/text/tone_sandhi.py +++ /dev/null @@ -1,351 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import List -from typing import Tuple - -import jieba -from pypinyin import lazy_pinyin -from pypinyin import Style - - -class ToneSandhi(): - def __init__(self): - self.must_neural_tone_words = { - '麻烦', '麻利', '鸳鸯', '高粱', '骨头', '骆驼', '马虎', '首饰', '馒头', '馄饨', '风筝', - '难为', '队伍', '阔气', '闺女', '门道', '锄头', '铺盖', '铃铛', '铁匠', '钥匙', '里脊', - '里头', '部分', '那么', '道士', '造化', '迷糊', '连累', '这么', '这个', '运气', '过去', - '软和', '转悠', '踏实', '跳蚤', '跟头', '趔趄', '财主', '豆腐', '讲究', '记性', '记号', - '认识', '规矩', '见识', '裁缝', '补丁', '衣裳', '衣服', '衙门', '街坊', '行李', '行当', - '蛤蟆', '蘑菇', '薄荷', '葫芦', '葡萄', '萝卜', '荸荠', '苗条', '苗头', '苍蝇', '芝麻', - '舒服', '舒坦', '舌头', '自在', '膏药', '脾气', '脑袋', '脊梁', '能耐', '胳膊', '胭脂', - '胡萝', '胡琴', '胡同', '聪明', '耽误', '耽搁', '耷拉', '耳朵', '老爷', '老实', '老婆', - '老头', '老太', '翻腾', '罗嗦', '罐头', '编辑', '结实', '红火', '累赘', '糨糊', '糊涂', - '精神', '粮食', '簸箕', '篱笆', '算计', '算盘', '答应', '笤帚', '笑语', '笑话', '窟窿', - '窝囊', '窗户', '稳当', '稀罕', '称呼', '秧歌', '秀气', '秀才', '福气', '祖宗', '砚台', - '码头', '石榴', '石头', '石匠', '知识', '眼睛', '眯缝', '眨巴', '眉毛', '相声', '盘算', - '白净', '痢疾', '痛快', '疟疾', '疙瘩', '疏忽', '畜生', '生意', '甘蔗', '琵琶', '琢磨', - '琉璃', '玻璃', '玫瑰', '玄乎', '狐狸', '状元', '特务', '牲口', '牙碜', '牌楼', '爽快', - '爱人', '热闹', '烧饼', '烟筒', '烂糊', '点心', '炊帚', '灯笼', '火候', '漂亮', '滑溜', - '溜达', '温和', '清楚', '消息', '浪头', '活泼', '比方', '正经', '欺负', '模糊', '槟榔', - '棺材', '棒槌', '棉花', '核桃', '栅栏', '柴火', '架势', '枕头', '枇杷', '机灵', '本事', - '木头', '木匠', '朋友', '月饼', '月亮', '暖和', '明白', '时候', '新鲜', '故事', '收拾', - '收成', '提防', '挖苦', '挑剔', '指甲', '指头', '拾掇', '拳头', '拨弄', '招牌', '招呼', - '抬举', '护士', '折腾', '扫帚', '打量', '打算', '打点', '打扮', '打听', '打发', '扎实', - '扁担', '戒指', '懒得', '意识', '意思', '情形', '悟性', '怪物', '思量', '怎么', '念头', - '念叨', '快活', '忙活', '志气', '心思', '得罪', '张罗', '弟兄', '开通', '应酬', '庄稼', - '干事', '帮手', '帐篷', '希罕', '师父', '师傅', '巴结', '巴掌', '差事', '工夫', '岁数', - '屁股', '尾巴', '少爷', '小气', '小伙', '将就', '对头', '对付', '寡妇', '家伙', '客气', - '实在', '官司', '学问', '学生', '字号', '嫁妆', '媳妇', '媒人', '婆家', '娘家', '委屈', - '姑娘', '姐夫', '妯娌', '妥当', '妖精', '奴才', '女婿', '头发', '太阳', '大爷', '大方', - '大意', '大夫', '多少', '多么', '外甥', '壮实', '地道', '地方', '在乎', '困难', '嘴巴', - '嘱咐', '嘟囔', '嘀咕', '喜欢', '喇嘛', '喇叭', '商量', '唾沫', '哑巴', '哈欠', '哆嗦', - '咳嗽', '和尚', '告诉', '告示', '含糊', '吓唬', '后头', '名字', '名堂', '合同', '吆喝', - '叫唤', '口袋', '厚道', '厉害', '千斤', '包袱', '包涵', '匀称', '勤快', '动静', '动弹', - '功夫', '力气', '前头', '刺猬', '刺激', '别扭', '利落', '利索', '利害', '分析', '出息', - '凑合', '凉快', '冷战', '冤枉', '冒失', '养活', '关系', '先生', '兄弟', '便宜', '使唤', - '佩服', '作坊', '体面', '位置', '似的', '伙计', '休息', '什么', '人家', '亲戚', '亲家', - '交情', '云彩', '事情', '买卖', '主意', '丫头', '丧气', '两口', '东西', '东家', '世故', - '不由', '不在', '下水', '下巴', '上头', '上司', '丈夫', '丈人', '一辈', '那个', '菩萨', - '父亲', '母亲', '咕噜', '邋遢', '费用', '冤家', '甜头', '介绍', '荒唐', '大人', '泥鳅', - '幸福', '熟悉', '计划', '扑腾', '蜡烛', '姥爷', '照顾', '喉咙', '吉他', '弄堂', '蚂蚱', - '凤凰', '拖沓', '寒碜', '糟蹋', '倒腾', '报复', '逻辑', '盘缠', '喽啰', '牢骚', '咖喱', - '扫把', '惦记' - } - self.must_not_neural_tone_words = { - "男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子", "人人", "虎虎" - } - self.punc = ":,;。?!“”‘’':,;.?!" - - # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041 - # e.g. - # word: "家里" - # pos: "s" - # finals: ['ia1', 'i3'] - def _neural_sandhi(self, word: str, pos: str, - finals: List[str]) -> List[str]: - - # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺 - for j, item in enumerate(word): - if j - 1 >= 0 and item == word[j - 1] and pos[0] in { - "n", "v", "a" - } and word not in self.must_not_neural_tone_words: - finals[j] = finals[j][:-1] + "5" - ge_idx = word.find("个") - if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶": - finals[-1] = finals[-1][:-1] + "5" - elif len(word) >= 1 and word[-1] in "的地得": - finals[-1] = finals[-1][:-1] + "5" - # e.g. 走了, 看着, 去过 - # elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}: - # finals[-1] = finals[-1][:-1] + "5" - elif len(word) > 1 and word[-1] in "们子" and pos in { - "r", "n" - } and word not in self.must_not_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 桌上, 地下, 家里 - elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 上来, 下去 - elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开": - finals[-1] = finals[-1][:-1] + "5" - # 个做量词 - elif (ge_idx >= 1 and - (word[ge_idx - 1].isnumeric() or - word[ge_idx - 1] in "几有两半多各整每做是")) or word == '个': - finals[ge_idx] = finals[ge_idx][:-1] + "5" - else: - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - - word_list = self._split_word(word) - finals_list = [finals[:len(word_list[0])], finals[len(word_list[0]):]] - for i, word in enumerate(word_list): - # conventional neural in Chinese - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals_list[i][-1] = finals_list[i][-1][:-1] + "5" - finals = sum(finals_list, []) - return finals - - def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]: - # e.g. 看不懂 - if len(word) == 3 and word[1] == "不": - finals[1] = finals[1][:-1] + "5" - else: - for i, char in enumerate(word): - # "不" before tone4 should be bu2, e.g. 不怕 - if char == "不" and i + 1 < len(word) and finals[i + - 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - return finals - - def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]: - # "一" in number sequences, e.g. 一零零, 二一零 - if word.find("一") != -1 and all( - [item.isnumeric() for item in word if item != "一"]): - return finals - # "一" between reduplication words shold be yi5, e.g. 看一看 - elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]: - finals[1] = finals[1][:-1] + "5" - # when "一" is ordinal word, it should be yi1 - elif word.startswith("第一"): - finals[1] = finals[1][:-1] + "1" - else: - for i, char in enumerate(word): - if char == "一" and i + 1 < len(word): - # "一" before tone4 should be yi2, e.g. 一段 - if finals[i + 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - # "一" before non-tone4 should be yi4, e.g. 一天 - else: - # "一" 后面如果是标点,还读一声 - if word[i + 1] not in self.punc: - finals[i] = finals[i][:-1] + "4" - return finals - - def _split_word(self, word: str) -> List[str]: - word_list = jieba.cut_for_search(word) - word_list = sorted(word_list, key=lambda i: len(i), reverse=False) - first_subword = word_list[0] - first_begin_idx = word.find(first_subword) - if first_begin_idx == 0: - second_subword = word[len(first_subword):] - new_word_list = [first_subword, second_subword] - else: - second_subword = word[:-len(first_subword)] - new_word_list = [second_subword, first_subword] - return new_word_list - - def _three_sandhi(self, word: str, finals: List[str]) -> List[str]: - if len(word) == 2 and self._all_tone_three(finals): - finals[0] = finals[0][:-1] + "2" - elif len(word) == 3: - word_list = self._split_word(word) - if self._all_tone_three(finals): - # disyllabic + monosyllabic, e.g. 蒙古/包 - if len(word_list[0]) == 2: - finals[0] = finals[0][:-1] + "2" - finals[1] = finals[1][:-1] + "2" - # monosyllabic + disyllabic, e.g. 纸/老虎 - elif len(word_list[0]) == 1: - finals[1] = finals[1][:-1] + "2" - else: - finals_list = [ - finals[:len(word_list[0])], finals[len(word_list[0]):] - ] - if len(finals_list) == 2: - for i, sub in enumerate(finals_list): - # e.g. 所有/人 - if self._all_tone_three(sub) and len(sub) == 2: - finals_list[i][0] = finals_list[i][0][:-1] + "2" - # e.g. 好/喜欢 - elif i == 1 and not self._all_tone_three(sub) and finals_list[i][0][-1] == "3" and \ - finals_list[0][-1][-1] == "3": - - finals_list[0][-1] = finals_list[0][-1][:-1] + "2" - finals = sum(finals_list, []) - # split idiom into two words who's length is 2 - elif len(word) == 4: - finals_list = [finals[:2], finals[2:]] - finals = [] - for sub in finals_list: - if self._all_tone_three(sub): - sub[0] = sub[0][:-1] + "2" - finals += sub - - return finals - - def _all_tone_three(self, finals: List[str]) -> bool: - return all(x[-1] == "3" for x in finals) - - # merge "不" and the word behind it - # if don't merge, "不" sometimes appears alone according to jieba, which may occur sandhi error - def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - last_word = "" - for word, pos in seg: - if last_word == "不": - word = last_word + word - if word != "不": - new_seg.append((word, pos)) - last_word = word[:] - if last_word == "不": - new_seg.append((last_word, 'd')) - last_word = "" - return new_seg - - # function 1: merge "一" and reduplication words in it's left and right, e.g. "听","一","听" ->"听一听" - # function 2: merge single "一" and the word behind it - # if don't merge, "一" sometimes appears alone according to jieba, which may occur sandhi error - # e.g. - # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')] - # output seg: [['听一听', 'v']] - def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - # function 1 - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "一" and i + 1 < len(seg) and seg[i - 1][ - 0] == seg[i + 1][0] and seg[i - 1][1] == "v": - new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0] - else: - if i - 2 >= 0 and seg[i - 1][0] == "一" and seg[i - 2][ - 0] == word and pos == "v": - continue - else: - new_seg.append([word, pos]) - seg = new_seg - new_seg = [] - # function 2 - for i, (word, pos) in enumerate(seg): - if new_seg and new_seg[-1][0] == "一": - new_seg[-1][0] = new_seg[-1][0] + word - else: - new_seg.append([word, pos]) - return new_seg - - # the first and the second words are all_tone_three - def _merge_continuous_three_tones( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and self._all_tone_three( - sub_finals_list[i - 1]) and self._all_tone_three( - sub_finals_list[i]) and not merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - - return new_seg - - def _is_reduplication(self, word: str) -> bool: - return len(word) == 2 and word[0] == word[1] - - # the last char of first word and the first char of second word is tone_three - def _merge_continuous_three_tones_2( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and sub_finals_list[i - 1][-1][-1] == "3" and sub_finals_list[i][0][-1] == "3" and not \ - merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "儿" and seg[i-1][0] != "#": - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_reduplication( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if new_seg and word == new_seg[-1][0]: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def pre_merge_for_modify( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - seg = self._merge_bu(seg) - try: - seg = self._merge_yi(seg) - except: - print("_merge_yi failed") - seg = self._merge_reduplication(seg) - seg = self._merge_continuous_three_tones(seg) - seg = self._merge_continuous_three_tones_2(seg) - seg = self._merge_er(seg) - return seg - - def modified_tone(self, word: str, pos: str, - finals: List[str]) -> List[str]: - finals = self._bu_sandhi(word, finals) - finals = self._yi_sandhi(word, finals) - finals = self._neural_sandhi(word, pos, finals) - finals = self._three_sandhi(word, finals) - return finals diff --git a/spaces/digitalxingtong/Nailv-Bert-Vits2/text/english_bert_mock.py b/spaces/digitalxingtong/Nailv-Bert-Vits2/text/english_bert_mock.py deleted file mode 100644 index 3b894ced5b6d619a18d6bdd7d7606ba9e6532050..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Nailv-Bert-Vits2/text/english_bert_mock.py +++ /dev/null @@ -1,5 +0,0 @@ -import torch - - -def get_bert_feature(norm_text, word2ph): - return torch.zeros(1024, sum(word2ph)) diff --git a/spaces/dorkai/text-generation-webui-main/extensions/superbooga/script.py b/spaces/dorkai/text-generation-webui-main/extensions/superbooga/script.py deleted file mode 100644 index a1d66add9945a9cc300345c0e3cb3f0360c04362..0000000000000000000000000000000000000000 --- a/spaces/dorkai/text-generation-webui-main/extensions/superbooga/script.py +++ /dev/null @@ -1,249 +0,0 @@ -import logging -import re -import textwrap - -import gradio as gr -from bs4 import BeautifulSoup -from modules import chat, shared - -from .chromadb import add_chunks_to_collector, make_collector -from .download_urls import download_urls - - -params = { - 'chunk_count': 5, - 'chunk_length': 700, - 'chunk_separator': '', - 'strong_cleanup': False, - 'threads': 4, -} - -collector = make_collector() -chat_collector = make_collector() -chunk_count = 5 - - -def feed_data_into_collector(corpus, chunk_len, chunk_sep): - global collector - - # Defining variables - chunk_len = int(chunk_len) - chunk_sep = chunk_sep.replace(r'\n', '\n') - cumulative = '' - - # Breaking the data into chunks and adding those to the db - cumulative += "Breaking the input dataset...\n\n" - yield cumulative - if chunk_sep: - data_chunks = corpus.split(chunk_sep) - data_chunks = [[data_chunk[i:i + chunk_len] for i in range(0, len(data_chunk), chunk_len)] for data_chunk in data_chunks] - data_chunks = [x for y in data_chunks for x in y] - else: - data_chunks = [corpus[i:i + chunk_len] for i in range(0, len(corpus), chunk_len)] - cumulative += f"{len(data_chunks)} chunks have been found.\n\nAdding the chunks to the database...\n\n" - yield cumulative - add_chunks_to_collector(data_chunks, collector) - cumulative += "Done." - yield cumulative - - -def feed_file_into_collector(file, chunk_len, chunk_sep): - yield 'Reading the input dataset...\n\n' - text = file.decode('utf-8') - for i in feed_data_into_collector(text, chunk_len, chunk_sep): - yield i - - -def feed_url_into_collector(urls, chunk_len, chunk_sep, strong_cleanup, threads): - all_text = '' - cumulative = '' - - urls = urls.strip().split('\n') - cumulative += f'Loading {len(urls)} URLs with {threads} threads...\n\n' - yield cumulative - for update, contents in download_urls(urls, threads=threads): - yield cumulative + update - - cumulative += 'Processing the HTML sources...' - yield cumulative - for content in contents: - soup = BeautifulSoup(content, features="html.parser") - for script in soup(["script", "style"]): - script.extract() - - strings = soup.stripped_strings - if strong_cleanup: - strings = [s for s in strings if re.search("[A-Za-z] ", s)] - - text = '\n'.join([s.strip() for s in strings]) - all_text += text - - for i in feed_data_into_collector(all_text, chunk_len, chunk_sep): - yield i - - -def apply_settings(_chunk_count): - global chunk_count - chunk_count = int(_chunk_count) - settings_to_display = { - 'chunk_count': chunk_count, - } - - yield f"The following settings are now active: {str(settings_to_display)}" - - -def custom_generate_chat_prompt(user_input, state, **kwargs): - global chat_collector - - if state['mode'] == 'instruct': - results = collector.get_sorted(user_input, n_results=chunk_count) - additional_context = '\nYour reply should be based on the context below:\n\n' + '\n'.join(results) - user_input += additional_context - else: - - def make_single_exchange(id_): - output = '' - output += f"{state['name1']}: {shared.history['internal'][id_][0]}\n" - output += f"{state['name2']}: {shared.history['internal'][id_][1]}\n" - return output - - if len(shared.history['internal']) > chunk_count and user_input != '': - chunks = [] - hist_size = len(shared.history['internal']) - for i in range(hist_size-1): - chunks.append(make_single_exchange(i)) - - add_chunks_to_collector(chunks, chat_collector) - query = '\n'.join(shared.history['internal'][-1] + [user_input]) - try: - best_ids = chat_collector.get_ids_sorted(query, n_results=chunk_count) - additional_context = '\n' - for id_ in best_ids: - if shared.history['internal'][id_][0] != '<|BEGIN-VISIBLE-CHAT|>': - additional_context += make_single_exchange(id_) - - logging.warning(f'Adding the following new context:\n{additional_context}') - state['context'] = state['context'].strip() + '\n' + additional_context - state['history'] = [shared.history['internal'][i] for i in range(hist_size) if i not in best_ids] - except RuntimeError: - logging.error("Couldn't query the database, moving on...") - - return chat.generate_chat_prompt(user_input, state, **kwargs) - - -def remove_special_tokens(string): - pattern = r'(<\|begin-user-input\|>|<\|end-user-input\|>|<\|injection-point\|>)' - return re.sub(pattern, '', string) - - -def input_modifier(string): - if shared.is_chat(): - return string - - # Find the user input - pattern = re.compile(r"<\|begin-user-input\|>(.*?)<\|end-user-input\|>", re.DOTALL) - match = re.search(pattern, string) - if match: - user_input = match.group(1).strip() - - # Get the most similar chunks - results = collector.get_sorted(user_input, n_results=chunk_count) - - # Make the injection - string = string.replace('<|injection-point|>', '\n'.join(results)) - - return remove_special_tokens(string) - - -def ui(): - with gr.Accordion("Click for more information...", open=False): - gr.Markdown(textwrap.dedent(""" - - ## About - - This extension takes a dataset as input, breaks it into chunks, and adds the result to a local/offline Chroma database. - - The database is then queried during inference time to get the excerpts that are closest to your input. The idea is to create an arbitrarily large pseudo context. - - The core methodology was developed and contributed by kaiokendev, who is working on improvements to the method in this repository: https://github.com/kaiokendev/superbig - - ## Data input - - Start by entering some data in the interface below and then clicking on "Load data". - - Each time you load some new data, the old chunks are discarded. - - ## Chat mode - - #### Instruct - - On each turn, the chunks will be compared to your current input and the most relevant matches will be appended to the input in the following format: - - ``` - Consider the excerpts below as additional context: - ... - ``` - - The injection doesn't make it into the chat history. It is only used in the current generation. - - #### Regular chat - - The chunks from the external data sources are ignored, and the chroma database is built based on the chat history instead. The most relevant past exchanges relative to the present input are added to the context string. This way, the extension acts as a long term memory. - - ## Notebook/default modes - - Your question must be manually specified between `<|begin-user-input|>` and `<|end-user-input|>` tags, and the injection point must be specified with `<|injection-point|>`. - - The special tokens mentioned above (`<|begin-user-input|>`, `<|end-user-input|>`, and `<|injection-point|>`) are removed in the background before the text generation begins. - - Here is an example in Vicuna 1.1 format: - - ``` - A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. - - USER: - - <|begin-user-input|> - What datasets are mentioned in the text below? - <|end-user-input|> - - <|injection-point|> - - ASSISTANT: - ``` - - ⚠️ For best results, make sure to remove the spaces and new line characters after `ASSISTANT:`. - - *This extension is currently experimental and under development.* - - """)) - - with gr.Row(): - with gr.Column(min_width=600): - with gr.Tab("Text input"): - data_input = gr.Textbox(lines=20, label='Input data') - update_data = gr.Button('Load data') - - with gr.Tab("URL input"): - url_input = gr.Textbox(lines=10, label='Input URLs', info='Enter one or more URLs separated by newline characters.') - strong_cleanup = gr.Checkbox(value=params['strong_cleanup'], label='Strong cleanup', info='Only keeps html elements that look like long-form text.') - threads = gr.Number(value=params['threads'], label='Threads', info='The number of threads to use while downloading the URLs.', precision=0) - update_url = gr.Button('Load data') - - with gr.Tab("File input"): - file_input = gr.File(label='Input file', type='binary') - update_file = gr.Button('Load data') - - with gr.Tab("Generation settings"): - chunk_count = gr.Number(value=params['chunk_count'], label='Chunk count', info='The number of closest-matching chunks to include in the prompt.') - update_settings = gr.Button('Apply changes') - - chunk_len = gr.Number(value=params['chunk_length'], label='Chunk length', info='In characters, not tokens. This value is used when you click on "Load data".') - chunk_sep = gr.Textbox(value=params['chunk_separator'], label='Chunk separator', info='Used to manually split chunks. Manually split chunks longer than chunk length are split again. This value is used when you click on "Load data".') - with gr.Column(): - last_updated = gr.Markdown() - - update_data.click(feed_data_into_collector, [data_input, chunk_len, chunk_sep], last_updated, show_progress=False) - update_url.click(feed_url_into_collector, [url_input, chunk_len, chunk_sep, strong_cleanup, threads], last_updated, show_progress=False) - update_file.click(feed_file_into_collector, [file_input, chunk_len, chunk_sep], last_updated, show_progress=False) - update_settings.click(apply_settings, [chunk_count], last_updated, show_progress=False) diff --git a/spaces/dylanebert/igf/viewer/src/routes/viewer/[slug]/BabylonViewer.ts b/spaces/dylanebert/igf/viewer/src/routes/viewer/[slug]/BabylonViewer.ts deleted file mode 100644 index b58d2293d8608e8f61896968371e58a5ac3c9948..0000000000000000000000000000000000000000 --- a/spaces/dylanebert/igf/viewer/src/routes/viewer/[slug]/BabylonViewer.ts +++ /dev/null @@ -1,154 +0,0 @@ -import type { IViewer } from "./IViewer"; -import * as BABYLON from "@babylonjs/core"; -import "@babylonjs/loaders/glTF"; -import "@babylonjs/loaders/OBJ"; - -export class BabylonViewer implements IViewer { - canvas: HTMLCanvasElement; - - engine: BABYLON.Engine; - scene: BABYLON.Scene; - camera: BABYLON.ArcRotateCamera; - - triangleCount: number = 0; - - constructor(canvas: HTMLCanvasElement) { - this.canvas = canvas; - - this.engine = new BABYLON.Engine(canvas, true); - - this.scene = new BABYLON.Scene(this.engine); - this.scene.clearColor = BABYLON.Color4.FromHexString("#1A1B1EFF"); - - this.camera = new BABYLON.ArcRotateCamera( - "camera", - Math.PI / 3, - Math.PI / 3, - 30, - BABYLON.Vector3.Zero(), - this.scene - ); - this.camera.angularSensibilityY = 1000; - this.camera.panningSensibility = 500; - this.camera.wheelPrecision = 5; - this.camera.inertia = 0.9; - this.camera.panningInertia = 0.9; - this.camera.lowerRadiusLimit = 3; - this.camera.upperRadiusLimit = 100; - this.camera.setTarget(BABYLON.Vector3.Zero()); - this.camera.attachControl(this.canvas, true); - this.camera.onAfterCheckInputsObservable.add(() => { - this.camera.wheelPrecision = 150 / this.camera.radius; - this.camera.panningSensibility = 10000 / this.camera.radius; - }); - - this.handleResize = this.handleResize.bind(this); - window.addEventListener("resize", this.handleResize); - } - - handleResize() { - this.engine.resize(); - } - - async loadScene(url: string, loadingBarCallback?: (progress: number) => void) { - // Load scene - await BABYLON.SceneLoader.AppendAsync("", url, this.scene, (event) => { - const progress = event.loaded / event.total; - loadingBarCallback?.(progress); - }); - - // Dispose of all cameras and lights - this.scene.cameras.forEach((camera) => { - if (camera !== this.camera) { - camera.dispose(); - } - }); - this.scene.lights.forEach((light) => { - light.dispose(); - }); - - // Add lights - const light = new BABYLON.HemisphericLight("hemi", new BABYLON.Vector3(0, 1, 0), this.scene); - light.intensity = 1; - light.diffuse = new BABYLON.Color3(1, 1, 1); - light.groundColor = new BABYLON.Color3(0.3, 0.3, 0.3); - - const sun = new BABYLON.DirectionalLight("sun", new BABYLON.Vector3(-0.5, -1, -0.5), this.scene); - sun.intensity = 2; - sun.diffuse = new BABYLON.Color3(1, 1, 1); - - // Center and scale model - const parentNode = new BABYLON.TransformNode("parent", this.scene); - const standardSize = 10; - let scaleFactor = 1; - let center = BABYLON.Vector3.Zero(); - if (this.scene.meshes.length > 0) { - let bounds = this.scene.meshes[0].getBoundingInfo().boundingBox; - let min = bounds.minimumWorld; - let max = bounds.maximumWorld; - - for (let i = 1; i < this.scene.meshes.length; i++) { - bounds = this.scene.meshes[i].getBoundingInfo().boundingBox; - min = BABYLON.Vector3.Minimize(min, bounds.minimumWorld); - max = BABYLON.Vector3.Maximize(max, bounds.maximumWorld); - } - - const extent = max.subtract(min).scale(0.5); - const size = extent.length(); - - center = BABYLON.Vector3.Center(min, max); - - scaleFactor = standardSize / size; - } - this.triangleCount = 0; - this.scene.meshes.forEach((mesh) => { - mesh.setParent(parentNode); - if (mesh.getTotalVertices() > 0) { - this.triangleCount += mesh.getTotalIndices() / 3; - } - }); - parentNode.position = center.scale(-1 * scaleFactor); - parentNode.scaling.scaleInPlace(scaleFactor); - - // Run render loop - this.engine.runRenderLoop(() => { - this.scene.render(); - }); - } - - dispose() { - if (this.scene) { - this.scene.dispose(); - } - if (this.engine) { - this.engine.dispose(); - } - window.removeEventListener("resize", this.handleResize); - } - - async capture(): Promise { - if (!this.engine || !this.camera) return null; - const cachedColor = this.scene.clearColor; - this.scene.clearColor = BABYLON.Color4.FromHexString("#00000000"); - let data = await new Promise((resolve) => { - BABYLON.Tools.CreateScreenshotUsingRenderTarget(this.engine, this.camera, 512, (result) => { - resolve(result); - }); - }); - this.scene.clearColor = cachedColor; - return data; - } - - setRenderMode(mode: string) { - this.scene.forceWireframe = mode === "wireframe"; - } - - getStats(): { name: string; value: any }[] { - const fps = this.engine.getFps().toFixed(); - const triangleCount = this.triangleCount.toLocaleString(); - return [ - { name: "FPS", value: fps }, - { name: "Triangles", value: triangleCount }, - ]; - } -} diff --git a/spaces/echozf/dfsg/README.md b/spaces/echozf/dfsg/README.md deleted file mode 100644 index cbdef5e53b3671b7e750454980d74aa1b6778e59..0000000000000000000000000000000000000000 --- a/spaces/echozf/dfsg/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Dfsg -emoji: 📚 -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false -license: bigscience-openrail-m ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/eddydpan/clip-recycling/app.py b/spaces/eddydpan/clip-recycling/app.py deleted file mode 100644 index aafe03ed9c07e9cef62a6e8da5c0c982664973ad..0000000000000000000000000000000000000000 --- a/spaces/eddydpan/clip-recycling/app.py +++ /dev/null @@ -1,70 +0,0 @@ -import gradio as gr -import torch -from PIL import Image -import open_clip - -model, _, preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k') -tokenizer = open_clip.get_tokenizer('ViT-B-32') - -material_list = [] -disposal_list = [] - -with open("results.txt", "r") as f: - results = f.readlines() - for line in results: - disposal = line.split("\'")[1] - disposal_list.append(disposal.strip()) - - material = line.split(" [")[0] - material_list.append(material.strip()) # Trim any leading/trailing whitespace - f.close() - -text = tokenizer(material_list) - -def process_image(image_input, material_input): - - results = {} - float_values = [] - image = preprocess(image_input).unsqueeze(0) - - with torch.no_grad(), torch.cuda.amp.autocast(): - image_features = model.encode_image(image) - text_features = model.encode_text(text) - image_features /= image_features.norm(dim=-1, keepdim=True) - text_features /= text_features.norm(dim=-1, keepdim=True) - text_probs = (100.0 * image_features @ text_features.T).softmax(dim=-1) - - #print("Label probs:", text_probs) - - counter = 0 - for row in text_probs: - for column in row: - float_values.append(float(column)) - results[float(column)] = [ material_list[counter], disposal_list[counter] ] - counter += 1 - - sorted_float_values = sorted(float_values, reverse=True) - # print(sorted_float_values) - - - index = -1 - for i in range(len(material_list)): - if material_list[i] == material_input: - index = i - break - if index == -1: - material_accuracy = None - else: - material_accuracy = disposal_list[index] + " - " + material_list[index] + ": " + str(float_values[index]) - first = results[sorted_float_values[0]][1] + " - " + results[sorted_float_values[0]][0] + ": " + str(sorted_float_values[0]) - second = results[sorted_float_values[1]][1] + " - " + results[sorted_float_values[1]][0] + ": " + str(sorted_float_values[1]) - third = results[sorted_float_values[2]][1] + " - " + results[sorted_float_values[2]][0] + ": " + str(sorted_float_values[2]) - - return [first, second, third, material_accuracy] - - -inputs = [gr.inputs.Image(type="pil"), gr.inputs.Dropdown(material_list)] -outputs = [gr.outputs.Textbox(label="Top Result"), gr.outputs.Textbox(label="Second Result"), gr.outputs.Textbox(label="Third Result"), gr.outputs.Textbox(label="Material Accuracy")] - -interface = gr.Interface(fn=process_image, inputs=inputs, outputs=outputs) -interface.launch() diff --git a/spaces/editing-images/project/static/css/bulma-carousel.min.css b/spaces/editing-images/project/static/css/bulma-carousel.min.css deleted file mode 100644 index 4d4b7d103e0013f64e4dedd2ad0b2947cc0d11a5..0000000000000000000000000000000000000000 --- a/spaces/editing-images/project/static/css/bulma-carousel.min.css +++ /dev/null @@ -1 +0,0 @@ -@-webkit-keyframes spinAround{from{-webkit-transform:rotate(0);transform:rotate(0)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes spinAround{from{-webkit-transform:rotate(0);transform:rotate(0)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.slider{position:relative;width:100%}.slider-container{display:flex;flex-wrap:nowrap;flex-direction:row;overflow:hidden;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0);min-height:100%}.slider-container.is-vertical{flex-direction:column}.slider-container .slider-item{flex:none}.slider-container .slider-item .image.is-covered img{-o-object-fit:cover;object-fit:cover;-o-object-position:center center;object-position:center center;height:100%;width:100%}.slider-container .slider-item .video-container{height:0;padding-bottom:0;padding-top:56.25%;margin:0;position:relative}.slider-container .slider-item .video-container.is-1by1,.slider-container .slider-item .video-container.is-square{padding-top:100%}.slider-container .slider-item .video-container.is-4by3{padding-top:75%}.slider-container .slider-item .video-container.is-21by9{padding-top:42.857143%}.slider-container .slider-item .video-container embed,.slider-container .slider-item .video-container iframe,.slider-container .slider-item .video-container object{position:absolute;top:0;left:0;width:100%!important;height:100%!important}.slider-navigation-next,.slider-navigation-previous{display:flex;justify-content:center;align-items:center;position:absolute;width:42px;height:42px;background:#fff center center no-repeat;background-size:20px 20px;border:1px solid #fff;border-radius:25091983px;box-shadow:0 2px 5px #3232321a;top:50%;margin-top:-20px;left:0;cursor:pointer;transition:opacity .3s,-webkit-transform .3s;transition:transform .3s,opacity .3s;transition:transform .3s,opacity .3s,-webkit-transform .3s}.slider-navigation-next:hover,.slider-navigation-previous:hover{-webkit-transform:scale(1.2);transform:scale(1.2)}.slider-navigation-next.is-hidden,.slider-navigation-previous.is-hidden{display:none;opacity:0}.slider-navigation-next svg,.slider-navigation-previous svg{width:25%}.slider-navigation-next{left:auto;right:0;background:#fff center center no-repeat;background-size:20px 20px}.slider-pagination{display:none;justify-content:center;align-items:center;position:absolute;bottom:0;left:0;right:0;padding:.5rem 1rem;text-align:center}.slider-pagination .slider-page{background:#fff;width:10px;height:10px;border-radius:25091983px;display:inline-block;margin:0 3px;box-shadow:0 2px 5px #3232321a;transition:-webkit-transform .3s;transition:transform .3s;transition:transform .3s,-webkit-transform .3s;cursor:pointer}.slider-pagination .slider-page.is-active,.slider-pagination .slider-page:hover{-webkit-transform:scale(1.4);transform:scale(1.4)}@media screen and (min-width:800px){.slider-pagination{display:flex}}.hero.has-carousel{position:relative}.hero.has-carousel+.hero-body,.hero.has-carousel+.hero-footer,.hero.has-carousel+.hero-head{z-index:10;overflow:hidden}.hero.has-carousel .hero-carousel{position:absolute;top:0;left:0;bottom:0;right:0;height:auto;border:none;margin:auto;padding:0;z-index:0}.hero.has-carousel .hero-carousel .slider{width:100%;max-width:100%;overflow:hidden;height:100%!important;max-height:100%;z-index:0}.hero.has-carousel .hero-carousel .slider .has-background{max-height:100%}.hero.has-carousel .hero-carousel .slider .has-background .is-background{-o-object-fit:cover;object-fit:cover;-o-object-position:center center;object-position:center center;height:100%;width:100%}.hero.has-carousel .hero-body{margin:0 3rem;z-index:10} \ No newline at end of file diff --git a/spaces/eson/tokenizer-arena/vocab/prompt_clue/__init__.py b/spaces/eson/tokenizer-arena/vocab/prompt_clue/__init__.py deleted file mode 100644 index 4e9e5df8e3ad9516f906abc5ea68c226a9da93f7..0000000000000000000000000000000000000000 --- a/spaces/eson/tokenizer-arena/vocab/prompt_clue/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ - -import os -from transformers import AutoTokenizer, T5Tokenizer - - -os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python" - -CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) -TOKENIZER_DIR = os.path.join(CURRENT_DIR, "PromptCLUE-base-v1-5") - - -# tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_DIR) -tokenizer = T5Tokenizer.from_pretrained(TOKENIZER_DIR) diff --git a/spaces/evaluate-comparison/mcnemar/app.py b/spaces/evaluate-comparison/mcnemar/app.py deleted file mode 100644 index 6d5b0970c9d00743c31656a2d5c993402d3e9263..0000000000000000000000000000000000000000 --- a/spaces/evaluate-comparison/mcnemar/app.py +++ /dev/null @@ -1,6 +0,0 @@ -import evaluate -from evaluate.utils import launch_gradio_widget - - -module = evaluate.load("mcnemar", module_type="comparison") -launch_gradio_widget(module) diff --git a/spaces/evaluate-measurement/regard/regard.py b/spaces/evaluate-measurement/regard/regard.py deleted file mode 100644 index 5189f0b938e465e0b0554d0a14c5e96e8f1b4a66..0000000000000000000000000000000000000000 --- a/spaces/evaluate-measurement/regard/regard.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright 2020 The HuggingFace Evaluate Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" Regard measurement. """ - -from collections import defaultdict -from operator import itemgetter -from statistics import mean - -import datasets -from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline - -import evaluate - - -logger = evaluate.logging.get_logger(__name__) - - -_CITATION = """ -@article{https://doi.org/10.48550/arxiv.1909.01326, - doi = {10.48550/ARXIV.1909.01326}, - url = {https://arxiv.org/abs/1909.01326}, - author = {Sheng, Emily and Chang, Kai-Wei and Natarajan, Premkumar and Peng, Nanyun}, - title = {The Woman Worked as a Babysitter: On Biases in Language Generation}, - publisher = {arXiv}, - year = {2019} -} - -""" - -_DESCRIPTION = """\ -Regard aims to measure language polarity towards and social perceptions of a demographic (e.g. gender, race, sexual orientation). -""" - -_KWARGS_DESCRIPTION = """ -Compute the regard of the input sentences. - -Args: - `data` (list of str): prediction/candidate sentences, e.g. sentences describing a given demographic group. - `references` (list of str) (optional): reference/comparison sentences, e.g. sentences describing a different demographic group to compare against. - `aggregation` (str) (optional): determines the type of aggregation performed. - If set to `None`, the difference between the regard scores for the two categories is returned. - Otherwise: - - 'average' : returns the average regard for each category (negative, positive, neutral, other) for each group - - 'maximum': returns the maximum regard for each group - -Returns: - With only `data` as input (default config): - `regard` : the regard scores of each string in the input list (if no aggregation is specified) - `average_regard`: the average regard for each category (negative, positive, neutral, other) (if `aggregation` = `average`) - `max_regard`: the maximum regard across all input strings (if `aggregation` = `maximum`) - With `data` and `references` as input (`compare` config): - `regard_difference`: the difference between the regard scores for the two groups (if no aggregation is specified) - `average_data_regard` and 'average_references_regard': the average regard for each category (negative, positive, neutral, other) (if `aggregation` = `average`) - `max_data_regard` and 'max_references_regard': the maximum regard for each group (if `aggregation` = `maximum`) - -Examples: - -Example 1 (single input): ->>> regard = evaluate.load("regard") ->>> group1 = ['xyz are described as mean', 'xyz are thought of as being too ambitious'] ->>> results = regard.compute(data = group1) ->>> for d in results['regard']: -... print({l['label']: round(l['score'],2) for l in d}) -{'neutral': 0.95, 'positive': 0.02, 'negative': 0.02, 'other': 0.01} -{'negative': 0.97, 'other': 0.02, 'neutral': 0.01, 'positive': 0.0} - -Example 2 (comparison mode): ->>> regard = evaluate.load("regard", "compare") ->>> group1 = ['xyz are described as mean', 'xyz are thought of as being too ambitious'] ->>> group2 = ['xyz are known for making too much noise', 'xyz are described as often violent'] ->>> results = regard.compute(data = group1, references = group2) ->>> print({k: round(v, 2) for k, v in results['regard_difference'].items()}) -{'neutral': 0.46, 'positive': 0.01, 'negative': -0.46, 'other': -0.01} - -Example 3 (returns the maximum regard score per category): ->>> regard = evaluate.load("regard", "compare") ->>> group1 = ['xyz are described as mean', 'xyz are thought of as being too ambitious'] ->>> group2 = ['xyz are known for making too much noise', 'xyz are described as often violent'] ->>> results = regard.compute(data = group1, references = group2, aggregation = "maximum") ->>> print({k: round(v, 2) for k, v in results['max_data_regard'].items()}) -{'neutral': 0.95, 'positive': 0.02, 'negative': 0.97, 'other': 0.02} ->>> print({k: round(v, 2) for k, v in results['max_references_regard'].items()}) -{'negative': 0.98, 'other': 0.04, 'neutral': 0.03, 'positive': 0.0} - -Example 4 (returns the average regard score): ->>> regard = evaluate.load("regard", "compare") ->>> group1 = ['xyz are described as mean', 'xyz are thought of as being too ambitious'] ->>> group2 = ['xyz are known for making too much noise', 'xyz are described as often violent'] ->>> results = regard.compute(data = group1, references = group2, aggregation = "average") ->>> print({k: round(v, 2) for k, v in results['average_data_regard'].items()}) -{'neutral': 0.48, 'positive': 0.01, 'negative': 0.5, 'other': 0.01} ->>> print({k: round(v, 2) for k, v in results['average_references_regard'].items()}) -{'negative': 0.96, 'other': 0.02, 'neutral': 0.02, 'positive': 0.0} -""" - - -def regard(group, regard_classifier): - group_scores = defaultdict(list) - group_regard = regard_classifier(group) - for pred in group_regard: - for pred_score in pred: - group_scores[pred_score["label"]].append(pred_score["score"]) - return group_regard, dict(group_scores) - - -@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) -class Regard(evaluate.Measurement): - def _info(self): - if self.config_name not in ["compare", "default"]: - raise KeyError("You should supply a configuration name selected in " '["config", "default"]') - return evaluate.MeasurementInfo( - module_type="measurement", - description=_DESCRIPTION, - citation=_CITATION, - inputs_description=_KWARGS_DESCRIPTION, - features=datasets.Features( - { - "data": datasets.Value("string", id="sequence"), - "references": datasets.Value("string", id="sequence"), - } - if self.config_name == "compare" - else { - "data": datasets.Value("string", id="sequence"), - } - ), - codebase_urls=[], - reference_urls=[], - ) - - def _download_and_prepare(self, dl_manager): - regard_tokenizer = AutoTokenizer.from_pretrained("sasha/regardv3") - regard_model = AutoModelForSequenceClassification.from_pretrained("sasha/regardv3") - self.regard_classifier = pipeline( - "text-classification", model=regard_model, top_k=4, tokenizer=regard_tokenizer, truncation=True - ) - - def _compute( - self, - data, - references=None, - aggregation=None, - ): - if self.config_name == "compare": - pred_scores, pred_regard = regard(data, self.regard_classifier) - ref_scores, ref_regard = regard(references, self.regard_classifier) - pred_mean = {k: mean(v) for k, v in pred_regard.items()} - pred_max = {k: max(v) for k, v in pred_regard.items()} - ref_mean = {k: mean(v) for k, v in ref_regard.items()} - ref_max = {k: max(v) for k, v in ref_regard.items()} - if aggregation == "maximum": - return { - "max_data_regard": pred_max, - "max_references_regard": ref_max, - } - elif aggregation == "average": - return {"average_data_regard": pred_mean, "average_references_regard": ref_mean} - else: - return {"regard_difference": {key: pred_mean[key] - ref_mean.get(key, 0) for key in pred_mean}} - else: - pred_scores, pred_regard = regard(data, self.regard_classifier) - pred_mean = {k: mean(v) for k, v in pred_regard.items()} - pred_max = {k: max(v) for k, v in pred_regard.items()} - if aggregation == "maximum": - return {"max_regard": pred_max} - elif aggregation == "average": - return {"average_regard": pred_mean} - else: - return {"regard": pred_scores} diff --git a/spaces/falterWliame/Face_Mask_Detection/Enter Password For The Encrypted File Setup AutoCAD OEM 2016 Activation.md b/spaces/falterWliame/Face_Mask_Detection/Enter Password For The Encrypted File Setup AutoCAD OEM 2016 Activation.md deleted file mode 100644 index 4b132a201363d6ed6e0e31f35daca01273b77bc6..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Enter Password For The Encrypted File Setup AutoCAD OEM 2016 Activation.md +++ /dev/null @@ -1,6 +0,0 @@ -

Enter Password For The Encrypted File Setup AutoCAD OEM 2016 Activation


Download ---> https://urlca.com/2uDdST



- -Then start Outlook, you will need to enter password again, you will get asked to ... edit a Word/Excel/PowerPoint data file, or receive encrypted connection error) it ... to configure. by wildjuvenile. error code 80090034 encryption failed outlook: 0. ... and 2016 / 0x4004f00c office 2010 could not be activated / error 0x4004f00c ... 4d29de3e1b
-
-
-

diff --git a/spaces/falterWliame/Face_Mask_Detection/Marvelous Designer 2 Crack Id And Passwordl !!LINK!!.md b/spaces/falterWliame/Face_Mask_Detection/Marvelous Designer 2 Crack Id And Passwordl !!LINK!!.md deleted file mode 100644 index 7b9a5ea64a32464c88fe3dff02eb0d9d641b2c3e..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Marvelous Designer 2 Crack Id And Passwordl !!LINK!!.md +++ /dev/null @@ -1,11 +0,0 @@ -
-

Marvelous Designer Crack is a powerful software application that enables the user to create and simulate realistic 3D virtual garments. It was created by CLO Virtual Fashion, a company based in Seoul, Korea that specializes in 3D fashion technology. The first official version of CLO Virtual Fashion was released in 2022 under the name CLO3D. Although it had some initial issues such as instability and the absence of basic functions like Undo, it managed to attract early users who recognized the potential of this innovative tool.

- -

CLO3D was designed to provide a fast and easy way to create 3D garments without the need for sewing or pattern making skills. Users can import 2D patterns or draw them directly on the 3D model, and then adjust the fit and style using various tools and parameters. CLO3D also allows users to preview the draping and animation of the garments in real time, as well as export them to other 3D software or render them with high-quality effects.

-

Marvelous Designer 2 Crack Id And Passwordl


Downloadhttps://urlca.com/2uDcnb



- -

Since its launch, CLO3D has been updated and improved with new features and functions, such as layer cloning, fabric presets, avatar customization, and more. It has also been renamed to Marvelous Designer Crack to reflect its enhanced capabilities and appeal to a wider range of users. Marvelous Designer Crack is now used by many professionals and enthusiasts in the fashion, film, game, and animation industries, as well as by hobbyists and students who want to express their creativity and imagination through 3D clothing design.

- -

Marvelous Designer Crack is not only a software application, but also a platform for sharing and learning. Users can access the online community where they can showcase their work, get feedback, exchange tips and tricks, and download free resources. Users can also enroll in online courses and tutorials that teach them how to use Marvelous Designer Crack effectively and efficiently. Marvelous Designer Crack aims to empower users with the ability to create stunning 3D virtual garments that can be used for various purposes and projects.

d5da3c52bf
-
-
\ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Geometry Dash World Hile APK and Enjoy the Rhythm-based Action Platforming.md b/spaces/fatiXbelha/sd/Download Geometry Dash World Hile APK and Enjoy the Rhythm-based Action Platforming.md deleted file mode 100644 index c9fb09334f9cc8ba975ed4a5d625a0466409ce0c..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Geometry Dash World Hile APK and Enjoy the Rhythm-based Action Platforming.md +++ /dev/null @@ -1,123 +0,0 @@ - -

Geometry Dash World Hile Apk Indir: How to Download and Play the Game with Cheats

-

If you are a fan of Geometry Dash, you might have heard of Geometry Dash World, a spin-off of the popular rhythm-based action platformer game. Geometry Dash World is a free-to-play game that offers new levels, music, monsters, and secrets for you to discover. But what if you want to play the game with cheats? In this article, we will show you how to download and play Geometry Dash World hile apk, a modified version of the game that allows you to use cheats and hacks.

-

geometry dash world hile apk indir


Download Filehttps://urllie.com/2uNFB1



-

What is Geometry Dash World?

-

Geometry Dash World is a game developed by RobTop Games, the same creator of Geometry Dash. It was released in December 2016 as a spin-off of the original game. Geometry Dash World features two worlds with five levels each, as well as online levels created by the community. The game also introduces new icons, colors, vaults, quests, rewards, and chests for you to unlock.

-

A rhythm-based action platformer game

-

The gameplay of Geometry Dash World is similar to Geometry Dash. You control a geometric icon that jumps, flies, and flips through various obstacles and hazards. The game is synchronized with the music, so you have to time your moves according to the rhythm. The game is very challenging and requires quick reflexes and precision.

-

A spin-off of Geometry Dash with new features and levels

-

Geometry Dash World is not a sequel or an update of Geometry Dash. It is a separate game that showcases some of the new features that will be included in the upcoming version 2.1 of Geometry Dash. These features include new icons, colors, vaults, quests, rewards, and chests. The game also has new levels with different themes and music from Dex Arson, Waterflame, and F-777.

-

geometry dash world mod apk unlimited money
-geometry dash world hack apk download
-geometry dash world full version apk free
-geometry dash world apk indir son sürüm
-geometry dash world hileli apk 2021
-geometry dash world unlocked all levels apk
-geometry dash world android oyun club indir
-geometry dash world apk pure download
-geometry dash world cheats apk mod
-geometry dash world latest version apk
-geometry dash world hile nasıl yapılır
-geometry dash world apk indir cepde
-geometry dash world mod menu apk 2021
-geometry dash world online play free
-geometry dash world apk indir android oyun club
-geometry dash world hileli apk dayı
-geometry dash world apk uptodown download
-geometry dash world hack tool apk
-geometry dash world full unlocked apk
-geometry dash world apk indir tamindir
-geometry dash world hileli mod apk 2021
-geometry dash world no ads apk
-geometry dash world apk indir gezginler
-geometry dash world hile kodları 2021
-geometry dash world mod apk rexdl
-geometry dash world premium apk download
-geometry dash world hileli apk 2020
-geometry dash world apk indir mobi kafa topu 2
-geometry dash world mod apk all icons unlocked
-geometry dash world free download for pc
-geometry dash world hileli apk indir android oyun club
-geometry dash world apk mirror download
-geometry dash world hack version apk
-geometry dash world offline mode apk
-geometry dash world apk indir apkpure
-geometry dash world hileli oyun indir club
-geometry dash world mod apk revdl
-geometry dash world unlimited orbs and diamonds apk
-geometry dash world apk indir pc
-geometry dash world hileli oyna 2021

-

A free-to-play game available for Android and iOS devices

-

Geometry Dash World is a free-to-play game that you can download from Google Play or App Store. You do not need to pay anything to play the game, but you can purchase optional in-app items such as diamonds or keys. The game is compatible with most Android and iOS devices, but it may not run smoothly on older or low-end devices.

-

How to Download Geometry Dash World Hile Apk?

-

If you want to play Geometry Dash World with cheats, you will need to download a hile apk. A hile apk is a modified version of the original apk file that has been hacked or altered to enable cheats and hacks. A hile apk can give you unlimited resources, unlock all items, remove ads, or bypass restrictions in the game.

-

What is a hile apk?

-

An apk file is the format used by Android devices to install applications. A hile apk file is an apk file that has been modified by hackers or modders to enable cheats and hacks in the game. A hile apk file can be downloaded from various websites or forums that offer such files.

-

Where to find and download a hile apk for Geometry Dash World?

-

There are many websites and forums that offer hile apk files for various games, including Geometry Dash World. However, not all of them are safe or reliable. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Some of them may also provide fake or outdated hile apk files that do not work or cause errors in the game. Therefore, you should be careful and cautious when looking for and downloading a hile apk file for Geometry Dash World.

-

One of the websites that claims to offer a working and safe hile apk file for Geometry Dash World is [Geometry Dash World Hile Apk Indir]. This website provides a download link for the hile apk file, as well as a video tutorial on how to install and use it. The website also claims that the hile apk file has the following features:

- -
    -
  • Unlimited diamonds and orbs
  • -
  • Unlock all icons, colors, and vaults
  • -
  • Unlock all levels and online levels
  • -
  • Remove all ads and pop-ups
  • -
  • No root or jailbreak required
  • -
-

We cannot verify the authenticity or safety of this website or the hile apk file it provides. We do not endorse or recommend using any hile apk files for Geometry Dash World or any other games. Use them at your own risk and discretion.

-

How to install and run a hile apk on your device?

-

If you have decided to download and use a hile apk file for Geometry Dash World, you will need to follow these steps to install and run it on your device:

- -
    -
  1. Make sure you have enough storage space on your device and a stable internet connection.
  2. -
  3. Uninstall the original Geometry Dash World game from your device if you have it installed.
  4. -
  5. Enable the installation of unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.
  6. -
  7. Download the hile apk file from the website or forum of your choice. Make sure it is compatible with your device and has the latest version of the game.
  8. -
  9. Locate the downloaded hile apk file on your device and tap on it to install it.
  10. -
  11. Wait for the installation process to finish and then launch the game from your app drawer or home screen.
  12. -
  13. Enjoy playing Geometry Dash World with cheats and hacks!
  14. -
-

How to Play Geometry Dash World with Cheats?

-

Now that you have installed and run the hile apk file for Geometry Dash World, you might be wondering how to play the game with cheats. Cheats are special codes or commands that can alter the gameplay or give you an advantage in the game. Cheats can help you complete levels faster, unlock items easier, or have more fun in the game.

-

What are the benefits of using cheats in Geometry Dash World?

-

Some of the benefits of using cheats in Geometry Dash World are:

- -
    -
  • You can get unlimited diamonds and orbs, which are the main currencies in the game. You can use them to buy icons, colors, keys, chests, and other items in the shop.
  • -
  • You can unlock all icons, colors, and vaults, which are cosmetic features that let you customize your icon's appearance and style.
  • -
  • You can unlock all levels and online levels, which are the main challenges in the game. You can play any level you want without having to complete the previous ones or meet any requirements.
  • -
  • You can remove all ads and pop-ups, which are annoying interruptions that can distract you from playing the game or make you wait for a certain time before resuming the game.
  • -
  • You can have more fun and excitement in the game by trying out different cheats and hacks that can change the gameplay or add new elements to the game.
  • -
-

What are some of the cheats available for Geometry Dash World?

-

Some of the cheats available for Geometry Dash World are:

- -
    -
  • NoClip: This cheat allows you to fly through any obstacle or hazard without dying or crashing. You can activate this cheat by tapping on the screen twice while playing a level.
  • -
  • SpeedHack: This cheat allows you to change the speed of the game. You can make it faster or slower depending on your preference. You can activate this cheat by swiping up or down on the screen while playing a level.
  • -
  • InfiniteJump: This cheat allows you to jump infinitely without falling or landing. You can activate this cheat by tapping on the screen once while playing a level.
  • -
  • GodMode: This cheat makes you invincible and immune to any damage or death. You can activate this cheat by tapping on the screen three times while playing a level.
  • -
  • AutoComplete: This cheat allows you to automatically complete any level without playing it. You can activate this cheat by tapping on the screen four times while playing a level.
  • -
-

These are just some of the cheats available for Geometry Dash World. There may be more cheats and hacks that you can find or create using the hile apk file. However, you should be careful and responsible when using cheats and hacks in the game. Do not use them to ruin the game experience for yourself or others. Do not use them to gain an unfair advantage or to harm other players. Do not use them to violate the terms and conditions of the game or the platform. Use them at your own risk and discretion.

-

How to activate and use cheats in Geometry Dash World?

-

To activate and use cheats in Geometry Dash World, you will need to follow these steps:

- -
    -
  1. Launch the game from your device and select a level or an online level that you want to play.
  2. -
  3. While playing the level, perform the corresponding gesture or action on the screen to activate the cheat that you want to use.
  4. -
  5. Enjoy playing the level with the cheat activated. You can deactivate the cheat by performing the same gesture or action again.
  6. -
  7. Repeat steps 2 and 3 for any other cheats that you want to use in the game.
  8. -
-

Conclusion

-

Geometry Dash World is a fun and challenging game that tests your skills and reflexes in a rhythm-based action platformer. However, if you want to play the game with cheats, you will need to download and install a hile apk file that enables cheats and hacks in the game. A hile apk file is a modified version of the original apk file that has been hacked or altered by hackers or modders. You can find and download a hile apk file for Geometry Dash World from various websites or forums that offer such files, but you should be careful and cautious when doing so. Some of them may be unsafe or unreliable, and some of them may not work or cause errors in the game. You should also be careful and responsible when using cheats and hacks in the game. Do not use them to ruin the game experience for yourself or others. Do not use them to gain an unfair advantage or to harm other players. Do not use them to violate the terms and conditions of the game or the platform. Use them at your own risk and discretion.

-

FAQs

-

Here are some of the frequently asked questions about Geometry Dash World hile apk indir:

- -

Q: Is Geometry Dash World hile apk indir legal?

-

A: No, Geometry Dash World hile apk indir is not legal. It is a form of piracy and hacking that violates the intellectual property rights of RobTop Games, the developer of Geometry Dash World. It also violates the terms and conditions of Google Play and App Store, the platforms that distribute Geometry Dash World. Using Geometry Dash World hile apk indir may result in legal actions or penalties from RobTop Games or Google Play or App Store.

--

Q: Is Geometry Dash World hile apk indir safe?

-

A: No, Geometry Dash World hile apk indir is not safe. It may contain viruses, malware, or spyware that can harm your device or steal your personal information. It may also provide fake or outdated hile apk files that do not work or cause errors in the game. It may also expose you to security risks or cyberattacks from hackers or modders who created or distributed the hile apk files.

--

Q: Is Geometry Dash World hile apk indir worth it?

-

A: No, Geometry Dash World hile apk indir is not worth it. It may ruin the game experience for yourself or others by making it too easy or boring. It may also take away the satisfaction and achievement of completing levels legitimately and fairly. It may also cause you to lose interest or motivation in playing the game or improving your skills and reflexes.

--

Q: How can I play Geometry Dash World without cheats?

-

A: You can play Geometry Dash World without cheats by downloading and installing the original Geometry Dash World game from Google Play or App Store. You can then play the game normally and enjoy its features and challenges. You can also improve your skills and reflexes by practicing and replaying levels, watching tutorials and tips from other players, and joining online communities and forums related to Geometry Dash World.

--

Q: How can I get more diamonds and orbs in Geometry Dash World?

- < p>A: You can get more diamonds and orbs in Geometry Dash World by playing the game regularly and completing levels, quests, and achievements. You can also collect diamonds and orbs from chests, vaults, and daily rewards. You can also watch ads or purchase diamonds or keys with real money to get more diamonds and orbs.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Ludo Vectors for Free - High-Quality Images and PSD Files.md b/spaces/fatiXbelha/sd/Download Ludo Vectors for Free - High-Quality Images and PSD Files.md deleted file mode 100644 index 2babc3c3aecac1d3ea3d9c6a5fc69fde45b9755c..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Ludo Vectors for Free - High-Quality Images and PSD Files.md +++ /dev/null @@ -1,98 +0,0 @@ - -

Ludo Vector APK Download: How to Play and Win the Classic Board Game Online

-

Ludo is one of the most popular board games in the world, enjoyed by millions of people of all ages. It is a simple yet exciting game, where you have to race your four tokens from start to finish according to the rolls of a single die. But what if you want to play Ludo online with your friends and family, without having to buy a physical board or pieces? Well, that's where Ludo Vector comes in.

-

ludo vector apk download


Download File ☆☆☆ https://urllie.com/2uNCxD



-

Ludo Vector is an online version of the classic board game Ludo, with stunning graphics, smooth gameplay, and various features and modes. You can play with your friends and family, or against the computer, or even with players from around the world. You can also customize your tokens, board, dice, and background, and choose from different themes and styles. Ludo Vector is a fun and addictive game that will keep you entertained for hours.

-

In this article, we will show you how to download Ludo Vector APK on your Android device, how to play Ludo Vector online, what are the features and modes of Ludo Vector, what are the reviews and ratings of Ludo Vector, and what are some tips and tricks to always win in Ludo Vector. So, let's get started!

-

How to download Ludo Vector APK on your Android device

-

If you want to play Ludo Vector on your Android device, you will need to download the APK file from a trusted source. APK stands for Android Package Kit, and it is a file format that allows you to install apps that are not available on the Google Play Store. However, you need to be careful when downloading APK files, as some of them may contain viruses or malware that can harm your device.

-

ludo vector images free download apk
-ludo star game apk download for android
-ludo king board game apk free download
-ludo vector graphics download apk file
-ludo star online multiplayer apk download
-ludo king latest version apk download 2023
-ludo vector art download apk mod
-ludo star 2 apk download for pc
-ludo king offline mode apk download
-ludo vector icons download apk app
-ludo star hack apk download unlimited coins
-ludo king dice game apk download
-ludo vector background download apk android
-ludo star classic apk download for ios
-ludo king 3d apk download for laptop
-ludo vector logo download apk pro
-ludo star old version apk download 2017
-ludo king tournament apk download for windows
-ludo vector design download apk premium
-ludo star new version apk download 2021
-ludo king original apk download for mac
-ludo vector illustration download apk full
-ludo star mod apk download unlimited gems
-ludo king update apk download for chromebook
-ludo vector animation download apk cracked
-ludo star beta apk download for iphone
-ludo king theme apk download for tablet
-ludo vector pattern download apk free
-ludo star vip apk download for mobile
-ludo king voice chat apk download for desktop
-ludo vector png download apk latest
-ludo star cheats apk download for firestick
-ludo king video call apk download for smart tv
-ludo vector svg download apk old
-ludo star private server apk download for kindle fire
-ludo king snake and ladder apk download for roku
-ludo vector eps download apk new
-ludo star facebook login problem solved apk download for android tv box
-ludo king carrom board game mode unlocked apk download for chromecast

-

One of the best sources for downloading Ludo Vector APK is [Freepik](^1^), a website that offers free vectors, photos, and PSD files for personal and commercial use. You can find thousands of Ludo vectors on Freepik, which you can use to create your own custom board and pieces for Ludo Vector. To download Ludo Vector APK from Freepik, follow these steps:

-
    -
  1. Go to [Freepik](^1^) and search for "Ludo vector".
  2. -
  3. Choose a vector that you like and click on it.
  4. -
  5. Click on the "Download" button at the bottom right corner of the screen.
  6. -
  7. Select the "Free License" option and click on "Download".
  8. -
  9. You will be redirected to a page where you have to enter your email address and password to create a free account on Freepik.
  10. -
  11. After creating your account, you will receive an email with a link to download the vector file.
  12. -
  13. Click on the link and save the file on your device.
  14. -
  15. Open the file manager app on your device and locate the downloaded file.
  16. -
  17. Tap on the file and select "Install".
  18. -
  19. You may need to enable "Unknown Sources" in your device settings to allow the installation of apps from sources other than the Google Play Store.
  20. -
  21. Wait for the installation to complete and then open Ludo Vector on your device.
  22. -
-

How to play Ludo Vector online with your friends and family

-

Playing Ludo Vector online with your friends and family is very easy and fun. You can either create a private room or join an existing one. To create a private room, follow these steps:

-
    -
  1. Open Ludo Vector on your device and tap on the "Online" option.
  2. -
  3. Select the "Create Room" option.
  4. -
  5. Select the number of players (2-4) and the game mode (Classic or Quick).
  6. -
  7. Select the board theme (Wooden or Marble) and the token color (Red, Blue, Green, or Yellow).
  8. -
  9. Select the dice error message and does not load the game. It also has a lot of viruses and malware that damage my device. I hate it. - -

    Ludo Vector game tips and tricks to always win

    -

    Ludo Vector is a game of luck and strategy, where you have to use your skills and tactics to beat your opponents. Here are some tips and tricks that can help you always win in Ludo Vector:

    -
      -
    • Use the special dice wisely: In the quick mode, you can use the special dice that have different effects, such as doubling your move, skipping your turn, swapping your token with another player's token, or moving your token to any spot on the board. However, you have to use them wisely, as they can also backfire on you. For example, if you swap your token with another player's token, you may end up in a worse position than before. Or if you move your token to any spot on the board, you may land on a spot where your opponent can capture you. So, use the special dice only when you are sure that they will benefit you and not harm you.
    • -
    • Protect your tokens: One of the main objectives of Ludo Vector is to protect your tokens from being captured by your opponents. You can do this by moving your tokens to safe spots, such as the star spots or the home spots. You can also move your tokens in pairs or groups, so that they can support each other and block your opponent's moves. You can also avoid moving your tokens to risky spots, such as the spots near your opponent's home base or the spots where your opponent can easily reach you.
    • -
    • Be aggressive: Another main objective of Ludo Vector is to capture your opponent's tokens and send them back to their home base. You can do this by being aggressive and attacking your opponent whenever you get a chance. You can also use the special dice to move your token to a spot where you can capture your opponent's token. You can also try to capture your opponent's token when they are close to the center of the board, so that they have to start all over again.
    • -
    • Be flexible: Ludo Vector is a game of uncertainty, where anything can happen at any time. You have to be flexible and adapt to the changing situations of the game. You have to be ready to change your strategy and plan according to the rolls of the dice, the moves of your opponents, and the effects of the special dice. You have to be prepared for any surprises and challenges that may come your way.
    • -
    -

    Conclusion: Summarize the main points and benefits of Ludo Vector

    -

    Ludo Vector is an online version of the classic board game Ludo, with stunning graphics, smooth gameplay, and various features and modes. You can play with your friends and family, or against the computer, or even with players from around the world. You can also customize your tokens, board, dice, and background, and choose from different themes and styles. Ludo Vector is a fun and addictive game that will keep you entertained for hours.

    -

    If you want to play Ludo Vector on your Android device, you can download the APK file from Freepik, a website that offers free vectors, photos, and PSD files for personal and commercial use. You can also create your own custom board and pieces by using Ludo vectors from Freepik. To play Ludo Vector online with your friends and family, you can either create a private room or join an existing one. To always win in Ludo Vector, you have to use your skills and tactics, and follow some tips and tricks that we have shared in this article.

    -

    Ludo Vector is a game of luck and strategy, where you have to race your tokens from start to finish according to the rolls of a single die. It is a simple yet exciting game, where you have to protect your tokens from being captured by your opponents, and capture their tokens whenever you get a chance. It is a game that will test your patience, perseverance, and intelligence.

    -

    Ludo Vector is a game that will bring back your childhood memories, and make new ones with your friends and family. It is a game that will make you laugh, cry, cheer, and scream. It is a game that will make you happy.

    -

    FAQs: Answer some common questions about Ludo Vector

    -

    Here are some common questions and answers about Ludo Vector:

    -
      -
    1. Q: Is Ludo Vector free?
    2. -
    3. A: Yes, Ludo Vector is free to download and play on Android devices. However, it may contain some in-app purchases or ads that may enhance your gaming experience or support the developers.
    4. Q: Is Ludo Vector safe?
    5. -
    6. A: Yes, Ludo Vector is safe to download and play on Android devices. However, you need to be careful when downloading the APK file from Freepik, as some of them may contain viruses or malware that can harm your device. You should always scan the file before installing it, and use a reliable antivirus app on your device.
    7. -
    8. Q: Is Ludo Vector offline?
    9. -
    10. A: No, Ludo Vector is not offline. You need an internet connection to play Ludo Vector online with other players. However, you can play Ludo Vector offline with the computer, or with your friends and family on the same device.
    11. -
    12. Q: How many players can play Ludo Vector?
    13. -
    14. A: Ludo Vector can be played by up to four players online, either with your friends and family or with random players from around the world. You can also play Ludo Vector by yourself against the computer, or with another player on the same device.
    15. -
    16. Q: How can I contact the developers of Ludo Vector?
    17. -
    18. A: You can contact the developers of Ludo Vector by sending them an email at ludovector@gmail.com. You can also follow them on their social media accounts, such as Facebook, Twitter, and Instagram, where they post updates and news about the game.
    19. -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/fclong/summary/fengshen/models/transformer_utils.py b/spaces/fclong/summary/fengshen/models/transformer_utils.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/felixz/open_llm_leaderboard/src/display_models/model_metadata_type.py b/spaces/felixz/open_llm_leaderboard/src/display_models/model_metadata_type.py deleted file mode 100644 index 61dec0a30f758c9350f59d028f07b0a782a3f317..0000000000000000000000000000000000000000 --- a/spaces/felixz/open_llm_leaderboard/src/display_models/model_metadata_type.py +++ /dev/null @@ -1,555 +0,0 @@ -from dataclasses import dataclass -from enum import Enum -from typing import Dict - - -@dataclass -class ModelInfo: - name: str - symbol: str # emoji - - -class ModelType(Enum): - PT = ModelInfo(name="pretrained", symbol="🟢") - FT = ModelInfo(name="fine-tuned", symbol="🔶") - IFT = ModelInfo(name="instruction-tuned", symbol="⭕") - RL = ModelInfo(name="RL-tuned", symbol="🟦") - Unknown = ModelInfo(name="Unknown", symbol="?") - - def to_str(self, separator=" "): - return f"{self.value.symbol}{separator}{self.value.name}" - - -MODEL_TYPE_METADATA: Dict[str, ModelType] = { - "tiiuae/falcon-180B": ModelType.PT, - "tiiuae/falcon-180B-chat": ModelType.RL, - "microsoft/phi-1_5": ModelType.PT, - "Qwen/Qwen-7B": ModelType.PT, - "Qwen/Qwen-7B-Chat": ModelType.RL, - "notstoic/PygmalionCoT-7b": ModelType.IFT, - "aisquared/dlite-v1-355m": ModelType.IFT, - "aisquared/dlite-v1-1_5b": ModelType.IFT, - "aisquared/dlite-v1-774m": ModelType.IFT, - "aisquared/dlite-v1-124m": ModelType.IFT, - "aisquared/chopt-2_7b": ModelType.IFT, - "aisquared/dlite-v2-124m": ModelType.IFT, - "aisquared/dlite-v2-774m": ModelType.IFT, - "aisquared/dlite-v2-1_5b": ModelType.IFT, - "aisquared/chopt-1_3b": ModelType.IFT, - "aisquared/dlite-v2-355m": ModelType.IFT, - "augtoma/qCammel-13": ModelType.IFT, - "Aspik101/Llama-2-7b-hf-instruct-pl-lora_unload": ModelType.IFT, - "Aspik101/vicuna-7b-v1.3-instruct-pl-lora_unload": ModelType.IFT, - "TheBloke/alpaca-lora-65B-HF": ModelType.FT, - "TheBloke/tulu-7B-fp16": ModelType.IFT, - "TheBloke/guanaco-7B-HF": ModelType.FT, - "TheBloke/koala-7B-HF": ModelType.FT, - "TheBloke/wizardLM-7B-HF": ModelType.IFT, - "TheBloke/airoboros-13B-HF": ModelType.IFT, - "TheBloke/koala-13B-HF": ModelType.FT, - "TheBloke/Wizard-Vicuna-7B-Uncensored-HF": ModelType.FT, - "TheBloke/dromedary-65b-lora-HF": ModelType.IFT, - "TheBloke/wizardLM-13B-1.0-fp16": ModelType.IFT, - "TheBloke/WizardLM-13B-V1-1-SuperHOT-8K-fp16": ModelType.FT, - "TheBloke/Wizard-Vicuna-30B-Uncensored-fp16": ModelType.FT, - "TheBloke/wizard-vicuna-13B-HF": ModelType.IFT, - "TheBloke/UltraLM-13B-fp16": ModelType.IFT, - "TheBloke/OpenAssistant-FT-7-Llama-30B-HF": ModelType.FT, - "TheBloke/vicuna-13B-1.1-HF": ModelType.IFT, - "TheBloke/guanaco-13B-HF": ModelType.FT, - "TheBloke/guanaco-65B-HF": ModelType.FT, - "TheBloke/airoboros-7b-gpt4-fp16": ModelType.IFT, - "TheBloke/llama-30b-supercot-SuperHOT-8K-fp16": ModelType.IFT, - "TheBloke/Llama-2-13B-fp16": ModelType.PT, - "TheBloke/llama-2-70b-Guanaco-QLoRA-fp16": ModelType.FT, - "TheBloke/landmark-attention-llama7b-fp16": ModelType.IFT, - "TheBloke/Planner-7B-fp16": ModelType.IFT, - "TheBloke/Wizard-Vicuna-13B-Uncensored-HF": ModelType.FT, - "TheBloke/gpt4-alpaca-lora-13B-HF": ModelType.IFT, - "TheBloke/gpt4-x-vicuna-13B-HF": ModelType.IFT, - "TheBloke/gpt4-alpaca-lora_mlp-65B-HF": ModelType.IFT, - "TheBloke/tulu-13B-fp16": ModelType.IFT, - "TheBloke/VicUnlocked-alpaca-65B-QLoRA-fp16": ModelType.IFT, - "TheBloke/Llama-2-70B-fp16": ModelType.IFT, - "TheBloke/WizardLM-30B-fp16": ModelType.IFT, - "TheBloke/robin-13B-v2-fp16": ModelType.FT, - "TheBloke/robin-33B-v2-fp16": ModelType.FT, - "TheBloke/Vicuna-13B-CoT-fp16": ModelType.IFT, - "TheBloke/Vicuna-33B-1-3-SuperHOT-8K-fp16": ModelType.IFT, - "TheBloke/Wizard-Vicuna-30B-Superhot-8K-fp16": ModelType.FT, - "TheBloke/Nous-Hermes-13B-SuperHOT-8K-fp16": ModelType.IFT, - "TheBloke/GPlatty-30B-SuperHOT-8K-fp16": ModelType.FT, - "TheBloke/CAMEL-33B-Combined-Data-SuperHOT-8K-fp16": ModelType.IFT, - "TheBloke/Chinese-Alpaca-33B-SuperHOT-8K-fp16": ModelType.IFT, - "jphme/orca_mini_v2_ger_7b": ModelType.IFT, - "Ejafa/vicuna_7B_vanilla_1.1": ModelType.FT, - "kevinpro/Vicuna-13B-CoT": ModelType.IFT, - "AlekseyKorshuk/pygmalion-6b-vicuna-chatml": ModelType.FT, - "AlekseyKorshuk/chatml-pyg-v1": ModelType.FT, - "concedo/Vicuzard-30B-Uncensored": ModelType.FT, - "concedo/OPT-19M-ChatSalad": ModelType.FT, - "concedo/Pythia-70M-ChatSalad": ModelType.FT, - "digitous/13B-HyperMantis": ModelType.IFT, - "digitous/Adventien-GPTJ": ModelType.FT, - "digitous/Alpacino13b": ModelType.IFT, - "digitous/GPT-R": ModelType.IFT, - "digitous/Javelin-R": ModelType.IFT, - "digitous/Javalion-GPTJ": ModelType.IFT, - "digitous/Javalion-R": ModelType.IFT, - "digitous/Skegma-GPTJ": ModelType.FT, - "digitous/Alpacino30b": ModelType.IFT, - "digitous/Janin-GPTJ": ModelType.FT, - "digitous/Janin-R": ModelType.FT, - "digitous/Javelin-GPTJ": ModelType.FT, - "SaylorTwift/gpt2_test": ModelType.PT, - "anton-l/gpt-j-tiny-random": ModelType.FT, - "Andron00e/YetAnother_Open-Llama-3B-LoRA-OpenOrca": ModelType.FT, - "Lazycuber/pyg-instruct-wizardlm": ModelType.FT, - "Lazycuber/Janemalion-6B": ModelType.FT, - "IDEA-CCNL/Ziya-LLaMA-13B-Pretrain-v1": ModelType.FT, - "IDEA-CCNL/Ziya-LLaMA-13B-v1": ModelType.IFT, - "dsvv-cair/alpaca-cleaned-llama-30b-bf16": ModelType.FT, - "gpt2-medium": ModelType.PT, - "camel-ai/CAMEL-13B-Combined-Data": ModelType.IFT, - "camel-ai/CAMEL-13B-Role-Playing-Data": ModelType.FT, - "camel-ai/CAMEL-33B-Combined-Data": ModelType.IFT, - "PygmalionAI/pygmalion-6b": ModelType.FT, - "PygmalionAI/metharme-1.3b": ModelType.IFT, - "PygmalionAI/pygmalion-1.3b": ModelType.FT, - "PygmalionAI/pygmalion-350m": ModelType.FT, - "PygmalionAI/pygmalion-2.7b": ModelType.FT, - "medalpaca/medalpaca-7b": ModelType.FT, - "lilloukas/Platypus-30B": ModelType.IFT, - "lilloukas/GPlatty-30B": ModelType.FT, - "mncai/chatdoctor": ModelType.FT, - "chaoyi-wu/MedLLaMA_13B": ModelType.FT, - "LoupGarou/WizardCoder-Guanaco-15B-V1.0": ModelType.IFT, - "LoupGarou/WizardCoder-Guanaco-15B-V1.1": ModelType.FT, - "hakurei/instruct-12b": ModelType.IFT, - "hakurei/lotus-12B": ModelType.FT, - "shibing624/chinese-llama-plus-13b-hf": ModelType.IFT, - "shibing624/chinese-alpaca-plus-7b-hf": ModelType.IFT, - "shibing624/chinese-alpaca-plus-13b-hf": ModelType.IFT, - "mosaicml/mpt-7b-instruct": ModelType.IFT, - "mosaicml/mpt-30b-chat": ModelType.IFT, - "mosaicml/mpt-7b-storywriter": ModelType.FT, - "mosaicml/mpt-30b-instruct": ModelType.IFT, - "mosaicml/mpt-7b-chat": ModelType.IFT, - "mosaicml/mpt-30b": ModelType.PT, - "Corianas/111m": ModelType.IFT, - "Corianas/Quokka_1.3b": ModelType.IFT, - "Corianas/256_5epoch": ModelType.FT, - "Corianas/Quokka_256m": ModelType.IFT, - "Corianas/Quokka_590m": ModelType.IFT, - "Corianas/gpt-j-6B-Dolly": ModelType.FT, - "Corianas/Quokka_2.7b": ModelType.IFT, - "cyberagent/open-calm-7b": ModelType.FT, - "Aspik101/Nous-Hermes-13b-pl-lora_unload": ModelType.IFT, - "THUDM/chatglm2-6b": ModelType.IFT, - "MetaIX/GPT4-X-Alpasta-30b": ModelType.IFT, - "NYTK/PULI-GPTrio": ModelType.PT, - "EleutherAI/pythia-1.3b": ModelType.PT, - "EleutherAI/pythia-2.8b-deduped": ModelType.PT, - "EleutherAI/gpt-neo-125m": ModelType.PT, - "EleutherAI/pythia-160m": ModelType.PT, - "EleutherAI/gpt-neo-2.7B": ModelType.PT, - "EleutherAI/pythia-1b-deduped": ModelType.PT, - "EleutherAI/pythia-6.7b": ModelType.PT, - "EleutherAI/pythia-70m-deduped": ModelType.PT, - "EleutherAI/gpt-neox-20b": ModelType.PT, - "EleutherAI/pythia-1.4b-deduped": ModelType.PT, - "EleutherAI/pythia-2.7b": ModelType.PT, - "EleutherAI/pythia-6.9b-deduped": ModelType.PT, - "EleutherAI/pythia-70m": ModelType.PT, - "EleutherAI/gpt-j-6b": ModelType.PT, - "EleutherAI/pythia-12b-deduped": ModelType.PT, - "EleutherAI/gpt-neo-1.3B": ModelType.PT, - "EleutherAI/pythia-410m-deduped": ModelType.PT, - "EleutherAI/pythia-160m-deduped": ModelType.PT, - "EleutherAI/polyglot-ko-12.8b": ModelType.PT, - "EleutherAI/pythia-12b": ModelType.PT, - "roneneldan/TinyStories-33M": ModelType.PT, - "roneneldan/TinyStories-28M": ModelType.PT, - "roneneldan/TinyStories-1M": ModelType.PT, - "roneneldan/TinyStories-8M": ModelType.PT, - "roneneldan/TinyStories-3M": ModelType.PT, - "jerryjalapeno/nart-100k-7b": ModelType.FT, - "lmsys/vicuna-13b-v1.3": ModelType.IFT, - "lmsys/vicuna-7b-v1.3": ModelType.IFT, - "lmsys/vicuna-13b-v1.1": ModelType.IFT, - "lmsys/vicuna-13b-delta-v1.1": ModelType.IFT, - "lmsys/vicuna-7b-delta-v1.1": ModelType.IFT, - "abhiramtirumala/DialoGPT-sarcastic-medium": ModelType.FT, - "haonan-li/bactrian-x-llama-13b-merged": ModelType.IFT, - "Gryphe/MythoLogic-13b": ModelType.IFT, - "Gryphe/MythoBoros-13b": ModelType.IFT, - "pillowtalks-ai/delta13b": ModelType.FT, - "wannaphong/openthaigpt-0.1.0-beta-full-model_for_open_llm_leaderboard": ModelType.FT, - "bigscience/bloom-7b1": ModelType.PT, - "bigcode/tiny_starcoder_py": ModelType.PT, - "bigcode/starcoderplus": ModelType.FT, - "bigcode/gpt_bigcode-santacoder": ModelType.PT, - "bigcode/starcoder": ModelType.PT, - "Open-Orca/OpenOrca-Preview1-13B": ModelType.IFT, - "microsoft/DialoGPT-large": ModelType.FT, - "microsoft/DialoGPT-small": ModelType.FT, - "microsoft/DialoGPT-medium": ModelType.FT, - "microsoft/CodeGPT-small-py": ModelType.FT, - "Tincando/fiction_story_generator": ModelType.FT, - "Pirr/pythia-13b-deduped-green_devil": ModelType.FT, - "Aeala/GPT4-x-AlpacaDente2-30b": ModelType.FT, - "Aeala/GPT4-x-AlpacaDente-30b": ModelType.FT, - "Aeala/GPT4-x-Alpasta-13b": ModelType.FT, - "Aeala/VicUnlocked-alpaca-30b": ModelType.IFT, - "Tap-M/Luna-AI-Llama2-Uncensored": ModelType.FT, - "illuin/test-custom-llama": ModelType.FT, - "dvruette/oasst-llama-13b-2-epochs": ModelType.FT, - "dvruette/oasst-gpt-neox-20b-1000-steps": ModelType.FT, - "dvruette/llama-13b-pretrained-dropout": ModelType.PT, - "dvruette/llama-13b-pretrained": ModelType.PT, - "dvruette/llama-13b-pretrained-sft-epoch-1": ModelType.FT, - "dvruette/llama-13b-pretrained-sft-do2": ModelType.FT, - "dvruette/oasst-gpt-neox-20b-3000-steps": ModelType.FT, - "dvruette/oasst-pythia-12b-pretrained-sft": ModelType.FT, - "dvruette/oasst-pythia-6.9b-4000-steps": ModelType.FT, - "dvruette/gpt-neox-20b-full-precision": ModelType.FT, - "dvruette/oasst-llama-13b-1000-steps": ModelType.FT, - "openlm-research/open_llama_7b_700bt_preview": ModelType.PT, - "openlm-research/open_llama_7b": ModelType.PT, - "openlm-research/open_llama_7b_v2": ModelType.PT, - "openlm-research/open_llama_3b": ModelType.PT, - "openlm-research/open_llama_13b": ModelType.PT, - "openlm-research/open_llama_3b_v2": ModelType.PT, - "PocketDoc/Dans-PileOfSets-Mk1-llama-13b-merged": ModelType.IFT, - "GeorgiaTechResearchInstitute/galpaca-30b": ModelType.IFT, - "GeorgiaTechResearchInstitute/starcoder-gpteacher-code-instruct": ModelType.IFT, - "databricks/dolly-v2-7b": ModelType.IFT, - "databricks/dolly-v2-3b": ModelType.IFT, - "databricks/dolly-v2-12b": ModelType.IFT, - "Rachneet/gpt2-xl-alpaca": ModelType.FT, - "Locutusque/gpt2-conversational-or-qa": ModelType.FT, - "psyche/kogpt": ModelType.FT, - "NbAiLab/nb-gpt-j-6B-alpaca": ModelType.IFT, - "Mikael110/llama-2-7b-guanaco-fp16": ModelType.FT, - "Mikael110/llama-2-13b-guanaco-fp16": ModelType.FT, - "Fredithefish/CrimsonPajama": ModelType.IFT, - "Fredithefish/RedPajama-INCITE-Chat-3B-ShareGPT-11K": ModelType.FT, - "Fredithefish/ScarletPajama-3B-HF": ModelType.FT, - "Fredithefish/RedPajama-INCITE-Chat-3B-Instruction-Tuning-with-GPT-4": ModelType.IFT, - "acrastt/RedPajama-INCITE-Chat-Instruct-3B-V1": ModelType.IFT, - "eachadea/vicuna-13b-1.1": ModelType.FT, - "eachadea/vicuna-7b-1.1": ModelType.FT, - "eachadea/vicuna-13b": ModelType.FT, - "openaccess-ai-collective/wizard-mega-13b": ModelType.IFT, - "openaccess-ai-collective/manticore-13b": ModelType.IFT, - "openaccess-ai-collective/manticore-30b-chat-pyg-alpha": ModelType.IFT, - "openaccess-ai-collective/minotaur-13b": ModelType.IFT, - "openaccess-ai-collective/minotaur-13b-fixed": ModelType.IFT, - "openaccess-ai-collective/hippogriff-30b-chat": ModelType.IFT, - "openaccess-ai-collective/manticore-13b-chat-pyg": ModelType.IFT, - "pythainlp/wangchanglm-7.5B-sft-enth": ModelType.IFT, - "pythainlp/wangchanglm-7.5B-sft-en-sharded": ModelType.IFT, - "euclaise/gpt-neox-122m-minipile-digits": ModelType.FT, - "stabilityai/StableBeluga1-Delta": ModelType.IFT, - "stabilityai/stablelm-tuned-alpha-7b": ModelType.IFT, - "stabilityai/StableBeluga2": ModelType.IFT, - "stabilityai/StableBeluga-13B": ModelType.IFT, - "stabilityai/StableBeluga-7B": ModelType.IFT, - "stabilityai/stablelm-base-alpha-7b": ModelType.PT, - "stabilityai/stablelm-base-alpha-3b": ModelType.PT, - "stabilityai/stablelm-tuned-alpha-3b": ModelType.IFT, - "alibidaran/medical_transcription_generator": ModelType.FT, - "CalderaAI/30B-Lazarus": ModelType.IFT, - "CalderaAI/13B-BlueMethod": ModelType.IFT, - "CalderaAI/13B-Ouroboros": ModelType.IFT, - "KoboldAI/OPT-13B-Erebus": ModelType.FT, - "KoboldAI/GPT-J-6B-Janeway": ModelType.FT, - "KoboldAI/GPT-J-6B-Shinen": ModelType.FT, - "KoboldAI/fairseq-dense-2.7B": ModelType.PT, - "KoboldAI/OPT-6B-nerys-v2": ModelType.FT, - "KoboldAI/GPT-NeoX-20B-Skein": ModelType.FT, - "KoboldAI/PPO_Pygway-6b-Mix": ModelType.FT, - "KoboldAI/fairseq-dense-6.7B": ModelType.PT, - "KoboldAI/fairseq-dense-125M": ModelType.PT, - "KoboldAI/OPT-13B-Nerybus-Mix": ModelType.FT, - "KoboldAI/OPT-2.7B-Erebus": ModelType.FT, - "KoboldAI/OPT-350M-Nerys-v2": ModelType.FT, - "KoboldAI/OPT-2.7B-Nerys-v2": ModelType.FT, - "KoboldAI/OPT-2.7B-Nerybus-Mix": ModelType.FT, - "KoboldAI/OPT-13B-Nerys-v2": ModelType.FT, - "KoboldAI/GPT-NeoX-20B-Erebus": ModelType.FT, - "KoboldAI/OPT-6.7B-Erebus": ModelType.FT, - "KoboldAI/fairseq-dense-355M": ModelType.PT, - "KoboldAI/OPT-6.7B-Nerybus-Mix": ModelType.FT, - "KoboldAI/GPT-J-6B-Adventure": ModelType.FT, - "KoboldAI/OPT-350M-Erebus": ModelType.FT, - "KoboldAI/GPT-J-6B-Skein": ModelType.FT, - "KoboldAI/OPT-30B-Erebus": ModelType.FT, - "klosax/pythia-160m-deduped-step92k-193bt": ModelType.PT, - "klosax/open_llama_3b_350bt_preview": ModelType.PT, - "klosax/openllama-3b-350bt": ModelType.PT, - "klosax/pythia-70m-deduped-step44k-92bt": ModelType.PT, - "klosax/open_llama_13b_600bt_preview": ModelType.PT, - "klosax/open_llama_7b_400bt_preview": ModelType.PT, - "kfkas/Llama-2-ko-7b-Chat": ModelType.IFT, - "WeOpenML/Alpaca-7B-v1": ModelType.IFT, - "WeOpenML/PandaLM-Alpaca-7B-v1": ModelType.IFT, - "TFLai/gpt2-turkish-uncased": ModelType.FT, - "ehartford/WizardLM-13B-Uncensored": ModelType.IFT, - "ehartford/dolphin-llama-13b": ModelType.IFT, - "ehartford/Wizard-Vicuna-30B-Uncensored": ModelType.FT, - "ehartford/WizardLM-30B-Uncensored": ModelType.IFT, - "ehartford/Wizard-Vicuna-13B-Uncensored": ModelType.FT, - "ehartford/WizardLM-7B-Uncensored": ModelType.IFT, - "ehartford/based-30b": ModelType.FT, - "ehartford/Wizard-Vicuna-7B-Uncensored": ModelType.FT, - "wahaha1987/llama_7b_sharegpt94k_fastchat": ModelType.FT, - "wahaha1987/llama_13b_sharegpt94k_fastchat": ModelType.FT, - "OpenAssistant/oasst-sft-1-pythia-12b": ModelType.FT, - "OpenAssistant/stablelm-7b-sft-v7-epoch-3": ModelType.IFT, - "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5": ModelType.FT, - "OpenAssistant/pythia-12b-sft-v8-2.5k-steps": ModelType.IFT, - "OpenAssistant/pythia-12b-sft-v8-7k-steps": ModelType.IFT, - "OpenAssistant/pythia-12b-pre-v8-12.5k-steps": ModelType.IFT, - "OpenAssistant/llama2-13b-orca-8k-3319": ModelType.IFT, - "junelee/wizard-vicuna-13b": ModelType.FT, - "BreadAi/gpt-YA-1-1_160M": ModelType.PT, - "BreadAi/MuseCan": ModelType.PT, - "BreadAi/MusePy-1-2": ModelType.PT, - "BreadAi/DiscordPy": ModelType.PT, - "BreadAi/PM_modelV2": ModelType.PT, - "BreadAi/gpt-Youtube": ModelType.PT, - "BreadAi/StoryPy": ModelType.FT, - "julianweng/Llama-2-7b-chat-orcah": ModelType.FT, - "AGI-inc/lora_moe_7b_baseline": ModelType.FT, - "AGI-inc/lora_moe_7b": ModelType.FT, - "togethercomputer/GPT-NeoXT-Chat-Base-20B": ModelType.IFT, - "togethercomputer/RedPajama-INCITE-Chat-7B-v0.1": ModelType.IFT, - "togethercomputer/RedPajama-INCITE-Instruct-7B-v0.1": ModelType.IFT, - "togethercomputer/RedPajama-INCITE-7B-Base": ModelType.PT, - "togethercomputer/RedPajama-INCITE-7B-Instruct": ModelType.IFT, - "togethercomputer/RedPajama-INCITE-Base-3B-v1": ModelType.PT, - "togethercomputer/Pythia-Chat-Base-7B": ModelType.IFT, - "togethercomputer/RedPajama-INCITE-Base-7B-v0.1": ModelType.PT, - "togethercomputer/GPT-JT-6B-v1": ModelType.IFT, - "togethercomputer/GPT-JT-6B-v0": ModelType.IFT, - "togethercomputer/RedPajama-INCITE-Chat-3B-v1": ModelType.IFT, - "togethercomputer/RedPajama-INCITE-7B-Chat": ModelType.IFT, - "togethercomputer/RedPajama-INCITE-Instruct-3B-v1": ModelType.IFT, - "Writer/camel-5b-hf": ModelType.IFT, - "Writer/palmyra-base": ModelType.PT, - "MBZUAI/LaMini-GPT-1.5B": ModelType.IFT, - "MBZUAI/lamini-cerebras-111m": ModelType.IFT, - "MBZUAI/lamini-neo-1.3b": ModelType.IFT, - "MBZUAI/lamini-cerebras-1.3b": ModelType.IFT, - "MBZUAI/lamini-cerebras-256m": ModelType.IFT, - "MBZUAI/LaMini-GPT-124M": ModelType.IFT, - "MBZUAI/lamini-neo-125m": ModelType.IFT, - "TehVenom/DiffMerge-DollyGPT-Pygmalion": ModelType.FT, - "TehVenom/PPO_Shygmalion-6b": ModelType.FT, - "TehVenom/Dolly_Shygmalion-6b-Dev_V8P2": ModelType.FT, - "TehVenom/Pygmalion_AlpacaLora-7b": ModelType.FT, - "TehVenom/PPO_Pygway-V8p4_Dev-6b": ModelType.FT, - "TehVenom/Dolly_Malion-6b": ModelType.FT, - "TehVenom/PPO_Shygmalion-V8p4_Dev-6b": ModelType.FT, - "TehVenom/ChanMalion": ModelType.FT, - "TehVenom/GPT-J-Pyg_PPO-6B": ModelType.IFT, - "TehVenom/Pygmalion-13b-Merged": ModelType.FT, - "TehVenom/Metharme-13b-Merged": ModelType.IFT, - "TehVenom/Dolly_Shygmalion-6b": ModelType.FT, - "TehVenom/GPT-J-Pyg_PPO-6B-Dev-V8p4": ModelType.IFT, - "georgesung/llama2_7b_chat_uncensored": ModelType.FT, - "vicgalle/gpt2-alpaca": ModelType.IFT, - "vicgalle/alpaca-7b": ModelType.FT, - "vicgalle/gpt2-alpaca-gpt4": ModelType.IFT, - "facebook/opt-350m": ModelType.PT, - "facebook/opt-125m": ModelType.PT, - "facebook/xglm-4.5B": ModelType.PT, - "facebook/opt-2.7b": ModelType.PT, - "facebook/opt-6.7b": ModelType.PT, - "facebook/galactica-30b": ModelType.PT, - "facebook/opt-13b": ModelType.PT, - "facebook/opt-66b": ModelType.PT, - "facebook/xglm-7.5B": ModelType.PT, - "facebook/xglm-564M": ModelType.PT, - "facebook/opt-30b": ModelType.PT, - "golaxy/gogpt-7b": ModelType.FT, - "golaxy/gogpt2-7b": ModelType.FT, - "golaxy/gogpt-7b-bloom": ModelType.FT, - "golaxy/gogpt-3b-bloom": ModelType.FT, - "psmathur/orca_mini_v2_7b": ModelType.IFT, - "psmathur/orca_mini_7b": ModelType.IFT, - "psmathur/orca_mini_3b": ModelType.IFT, - "psmathur/orca_mini_v2_13b": ModelType.IFT, - "gpt2-xl": ModelType.PT, - "lxe/Cerebras-GPT-2.7B-Alpaca-SP": ModelType.FT, - "Monero/Manticore-13b-Chat-Pyg-Guanaco": ModelType.FT, - "Monero/WizardLM-Uncensored-SuperCOT-StoryTelling-30b": ModelType.IFT, - "Monero/WizardLM-13b-OpenAssistant-Uncensored": ModelType.IFT, - "Monero/WizardLM-30B-Uncensored-Guanaco-SuperCOT-30b": ModelType.IFT, - "jzjiao/opt-1.3b-rlhf": ModelType.FT, - "HuggingFaceH4/starchat-beta": ModelType.IFT, - "KnutJaegersberg/gpt-2-xl-EvolInstruct": ModelType.IFT, - "KnutJaegersberg/megatron-GPT-2-345m-EvolInstruct": ModelType.IFT, - "KnutJaegersberg/galactica-orca-wizardlm-1.3b": ModelType.IFT, - "openchat/openchat_8192": ModelType.IFT, - "openchat/openchat_v2": ModelType.IFT, - "openchat/openchat_v2_w": ModelType.IFT, - "ausboss/llama-13b-supercot": ModelType.IFT, - "ausboss/llama-30b-supercot": ModelType.IFT, - "Neko-Institute-of-Science/metharme-7b": ModelType.IFT, - "Neko-Institute-of-Science/pygmalion-7b": ModelType.FT, - "SebastianSchramm/Cerebras-GPT-111M-instruction": ModelType.IFT, - "victor123/WizardLM-13B-1.0": ModelType.IFT, - "OpenBuddy/openbuddy-openllama-13b-v7-fp16": ModelType.FT, - "OpenBuddy/openbuddy-llama2-13b-v8.1-fp16": ModelType.FT, - "OpenBuddyEA/openbuddy-llama-30b-v7.1-bf16": ModelType.FT, - "baichuan-inc/Baichuan-7B": ModelType.PT, - "tiiuae/falcon-40b-instruct": ModelType.IFT, - "tiiuae/falcon-40b": ModelType.PT, - "tiiuae/falcon-7b": ModelType.PT, - "YeungNLP/firefly-llama-13b": ModelType.FT, - "YeungNLP/firefly-llama-13b-v1.2": ModelType.FT, - "YeungNLP/firefly-llama2-13b": ModelType.FT, - "YeungNLP/firefly-ziya-13b": ModelType.FT, - "shaohang/Sparse0.5_OPT-1.3": ModelType.FT, - "xzuyn/Alpacino-SuperCOT-13B": ModelType.IFT, - "xzuyn/MedicWizard-7B": ModelType.FT, - "xDAN-AI/xDAN_13b_l2_lora": ModelType.FT, - "beomi/KoAlpaca-Polyglot-5.8B": ModelType.FT, - "beomi/llama-2-ko-7b": ModelType.IFT, - "Salesforce/codegen-6B-multi": ModelType.PT, - "Salesforce/codegen-16B-nl": ModelType.PT, - "Salesforce/codegen-6B-nl": ModelType.PT, - "ai-forever/rugpt3large_based_on_gpt2": ModelType.FT, - "gpt2-large": ModelType.PT, - "frank098/orca_mini_3b_juniper": ModelType.FT, - "frank098/WizardLM_13B_juniper": ModelType.FT, - "FPHam/Free_Sydney_13b_HF": ModelType.FT, - "huggingface/llama-13b": ModelType.PT, - "huggingface/llama-7b": ModelType.PT, - "huggingface/llama-65b": ModelType.PT, - "huggingface/llama-30b": ModelType.PT, - "Henk717/chronoboros-33B": ModelType.IFT, - "jondurbin/airoboros-13b-gpt4-1.4": ModelType.IFT, - "jondurbin/airoboros-7b": ModelType.IFT, - "jondurbin/airoboros-7b-gpt4": ModelType.IFT, - "jondurbin/airoboros-7b-gpt4-1.1": ModelType.IFT, - "jondurbin/airoboros-7b-gpt4-1.2": ModelType.IFT, - "jondurbin/airoboros-7b-gpt4-1.3": ModelType.IFT, - "jondurbin/airoboros-7b-gpt4-1.4": ModelType.IFT, - "jondurbin/airoboros-l2-7b-gpt4-1.4.1": ModelType.IFT, - "jondurbin/airoboros-l2-13b-gpt4-1.4.1": ModelType.IFT, - "jondurbin/airoboros-l2-70b-gpt4-1.4.1": ModelType.IFT, - "jondurbin/airoboros-13b": ModelType.IFT, - "jondurbin/airoboros-33b-gpt4-1.4": ModelType.IFT, - "jondurbin/airoboros-33b-gpt4-1.2": ModelType.IFT, - "jondurbin/airoboros-65b-gpt4-1.2": ModelType.IFT, - "ariellee/SuperPlatty-30B": ModelType.IFT, - "danielhanchen/open_llama_3b_600bt_preview": ModelType.FT, - "cerebras/Cerebras-GPT-256M": ModelType.PT, - "cerebras/Cerebras-GPT-1.3B": ModelType.PT, - "cerebras/Cerebras-GPT-13B": ModelType.PT, - "cerebras/Cerebras-GPT-2.7B": ModelType.PT, - "cerebras/Cerebras-GPT-111M": ModelType.PT, - "cerebras/Cerebras-GPT-6.7B": ModelType.PT, - "Yhyu13/oasst-rlhf-2-llama-30b-7k-steps-hf": ModelType.RL, - "Yhyu13/llama-30B-hf-openassitant": ModelType.FT, - "NousResearch/Nous-Hermes-Llama2-13b": ModelType.IFT, - "NousResearch/Nous-Hermes-llama-2-7b": ModelType.IFT, - "NousResearch/Redmond-Puffin-13B": ModelType.IFT, - "NousResearch/Nous-Hermes-13b": ModelType.IFT, - "project-baize/baize-v2-7b": ModelType.IFT, - "project-baize/baize-v2-13b": ModelType.IFT, - "LLMs/WizardLM-13B-V1.0": ModelType.FT, - "LLMs/AlpacaGPT4-7B-elina": ModelType.FT, - "wenge-research/yayi-7b": ModelType.FT, - "wenge-research/yayi-7b-llama2": ModelType.FT, - "wenge-research/yayi-13b-llama2": ModelType.FT, - "yhyhy3/open_llama_7b_v2_med_instruct": ModelType.IFT, - "llama-anon/instruct-13b": ModelType.IFT, - "huggingtweets/jerma985": ModelType.FT, - "huggingtweets/gladosystem": ModelType.FT, - "huggingtweets/bladeecity-jerma985": ModelType.FT, - "huggyllama/llama-13b": ModelType.PT, - "huggyllama/llama-65b": ModelType.PT, - "FabbriSimo01/Facebook_opt_1.3b_Quantized": ModelType.PT, - "upstage/Llama-2-70b-instruct": ModelType.IFT, - "upstage/Llama-2-70b-instruct-1024": ModelType.IFT, - "upstage/llama-65b-instruct": ModelType.IFT, - "upstage/llama-30b-instruct-2048": ModelType.IFT, - "upstage/llama-30b-instruct": ModelType.IFT, - "WizardLM/WizardLM-13B-1.0": ModelType.IFT, - "WizardLM/WizardLM-13B-V1.1": ModelType.IFT, - "WizardLM/WizardLM-13B-V1.2": ModelType.IFT, - "WizardLM/WizardLM-30B-V1.0": ModelType.IFT, - "WizardLM/WizardCoder-15B-V1.0": ModelType.IFT, - "gpt2": ModelType.PT, - "keyfan/vicuna-chinese-replication-v1.1": ModelType.IFT, - "nthngdy/pythia-owt2-70m-100k": ModelType.FT, - "nthngdy/pythia-owt2-70m-50k": ModelType.FT, - "quantumaikr/KoreanLM-hf": ModelType.FT, - "quantumaikr/open_llama_7b_hf": ModelType.FT, - "quantumaikr/QuantumLM-70B-hf": ModelType.IFT, - "MayaPH/FinOPT-Lincoln": ModelType.FT, - "MayaPH/FinOPT-Franklin": ModelType.FT, - "MayaPH/GodziLLa-30B": ModelType.IFT, - "MayaPH/GodziLLa-30B-plus": ModelType.IFT, - "MayaPH/FinOPT-Washington": ModelType.FT, - "ogimgio/gpt-neo-125m-neurallinguisticpioneers": ModelType.FT, - "layoric/llama-2-13b-code-alpaca": ModelType.FT, - "CobraMamba/mamba-gpt-3b": ModelType.FT, - "CobraMamba/mamba-gpt-3b-v2": ModelType.FT, - "CobraMamba/mamba-gpt-3b-v3": ModelType.FT, - "timdettmers/guanaco-33b-merged": ModelType.FT, - "elinas/chronos-33b": ModelType.IFT, - "heegyu/RedTulu-Uncensored-3B-0719": ModelType.IFT, - "heegyu/WizardVicuna-Uncensored-3B-0719": ModelType.IFT, - "heegyu/WizardVicuna-3B-0719": ModelType.IFT, - "meta-llama/Llama-2-7b-chat-hf": ModelType.RL, - "meta-llama/Llama-2-7b-hf": ModelType.PT, - "meta-llama/Llama-2-13b-chat-hf": ModelType.RL, - "meta-llama/Llama-2-13b-hf": ModelType.PT, - "meta-llama/Llama-2-70b-chat-hf": ModelType.RL, - "meta-llama/Llama-2-70b-hf": ModelType.PT, - "xhyi/PT_GPTNEO350_ATG": ModelType.FT, - "h2oai/h2ogpt-gm-oasst1-en-1024-20b": ModelType.FT, - "h2oai/h2ogpt-gm-oasst1-en-1024-open-llama-7b-preview-400bt": ModelType.FT, - "h2oai/h2ogpt-oig-oasst1-512-6_9b": ModelType.IFT, - "h2oai/h2ogpt-oasst1-512-12b": ModelType.IFT, - "h2oai/h2ogpt-oig-oasst1-256-6_9b": ModelType.IFT, - "h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt": ModelType.FT, - "h2oai/h2ogpt-oasst1-512-20b": ModelType.IFT, - "h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt-v2": ModelType.FT, - "h2oai/h2ogpt-gm-oasst1-en-1024-12b": ModelType.FT, - "h2oai/h2ogpt-gm-oasst1-multilang-1024-20b": ModelType.FT, - "bofenghuang/vigogne-13b-instruct": ModelType.IFT, - "bofenghuang/vigogne-13b-chat": ModelType.FT, - "bofenghuang/vigogne-2-7b-instruct": ModelType.IFT, - "bofenghuang/vigogne-7b-instruct": ModelType.IFT, - "bofenghuang/vigogne-7b-chat": ModelType.FT, - "Vmware/open-llama-7b-v2-open-instruct": ModelType.IFT, - "VMware/open-llama-0.7T-7B-open-instruct-v1.1": ModelType.IFT, - "ewof/koishi-instruct-3b": ModelType.IFT, - "gywy/llama2-13b-chinese-v1": ModelType.FT, - "GOAT-AI/GOAT-7B-Community": ModelType.FT, - "psyche/kollama2-7b": ModelType.FT, - "TheTravellingEngineer/llama2-7b-hf-guanaco": ModelType.FT, - "beaugogh/pythia-1.4b-deduped-sharegpt": ModelType.FT, - "augtoma/qCammel-70-x": ModelType.IFT, - "Lajonbot/Llama-2-7b-chat-hf-instruct-pl-lora_unload": ModelType.IFT, - "anhnv125/pygmalion-6b-roleplay": ModelType.FT, - "64bits/LexPodLM-13B": ModelType.FT, -} - - -def model_type_from_str(type): - if "fine-tuned" in type or "🔶" in type: - return ModelType.FT - if "pretrained" in type or "🟢" in type: - return ModelType.PT - if "RL-tuned" in type or "🟦" in type: - return ModelType.RL - if "instruction-tuned" in type or "⭕" in type: - return ModelType.IFT - return ModelType.Unknown diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Dr. Driving 2 MOD APK with Unlimited Gold and Ruby Features.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Dr. Driving 2 MOD APK with Unlimited Gold and Ruby Features.md deleted file mode 100644 index b3c83131f829d7698c00a53e933ea15f29ad545c..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Dr. Driving 2 MOD APK with Unlimited Gold and Ruby Features.md +++ /dev/null @@ -1,115 +0,0 @@ -
    -

    Dr Driving 2 Mod Apk: A Fun and Realistic Driving Simulation Game

    -

    Do you love driving games? Do you want to experience the thrill of driving on realistic roads and traffic? Do you want to unlock and customize your own cars and challenge other players online? If you answered yes to any of these questions, then you should try Dr Driving 2, one of the best driving simulation games for Android devices. And if you want to make the game even more fun and exciting, you should download Dr Driving 2 Mod Apk, which gives you unlimited money, rubies, cars, and more. In this article, we will tell you everything you need to know about Dr Driving 2 and its mod version. Read on to find out more.

    -

    dr driving 2 mod apk unlimited ruby


    Download Filehttps://gohhs.com/2uPuYg



    -

    What is Dr Driving 2?

    -

    Dr Driving 2 is a driving simulation game developed by SUD Inc., the same company that created the popular Dr Parking series. Dr Driving 2 is the sequel to Dr Driving, which has over 100 million downloads on Google Play Store. Dr Driving 2 takes the driving game genre to a new level with its realistic graphics, physics, controls, and gameplay. You can drive various cars on different roads and environments, complete missions and challenges, compete with other players online, and customize your cars with upgrades and accessories. Dr Driving 2 is a free-to-play game that you can download from Google Play Store or App Store.

    -

    Features of Dr Driving 2

    -

    Realistic graphics and physics

    -

    Dr Driving 2 has stunning graphics that make you feel like you are driving in real life. The game has detailed car models, realistic road textures, dynamic weather effects, and smooth animations. The game also has realistic physics that simulate the car's behavior, such as acceleration, braking, steering, collision, damage, and more. You can adjust the camera angle to suit your preference, whether you want to see the car from behind, from the front, or from inside.

    -

    Various modes and missions

    -

    Dr Driving 2 has different modes and missions that test your driving skills and offer you fun and variety. You can play the career mode, where you have to complete various tasks and objectives, such as parking, racing, drifting, delivering, escaping, etc. You can also play the challenge mode, where you have to beat the time limit or score as high as possible. You can also play the endless mode, where you can drive freely without any restrictions or goals.

    -

    Online multiplayer and leaderboards

    -

    Dr Driving 2 has an online multiplayer mode that lets you compete with other players around the world. You can join or create a room and invite your friends or random players to join you. You can choose from different modes, such as team battle, survival, capture the flag, etc. You can also chat with other players using emojis or text messages. You can also check your ranking on the global or local leaderboards and see how you compare with other drivers.

    -

    Customizable cars and upgrades

    -

    Dr Driving 2 has a wide range of cars that you can unlock and drive. You can choose from different categories, such as compact, sedan, sports, SUV , etc. You can also customize your cars with different colors, stickers, wheels, spoilers, etc. You can also upgrade your cars with better engines, brakes, tires, suspension, etc. to improve their performance and handling.

    -

    What is Dr Driving 2 Mod Apk?

    -

    Dr Driving 2 Mod Apk is a modified version of Dr Driving 2 that gives you access to unlimited money, rubies, cars, and other features that are normally locked or require in-app purchases. With Dr Driving 2 Mod Apk, you can enjoy the game without any limitations or restrictions. You can buy and upgrade any car you want, play any mode or mission you want, and compete with other players online without any fear of losing.

    -

    Benefits of Dr Driving 2 Mod Apk

    -

    Unlimited money and rubies

    -

    Money and rubies are the main currencies in Dr Driving 2 that you can use to buy and upgrade cars, unlock modes and missions, and more. However, earning money and rubies in the game can be slow and tedious, especially if you want to get the best cars and upgrades. With Dr Driving 2 Mod Apk, you don't have to worry about that. You will get unlimited money and rubies as soon as you start the game. You can spend them as much as you want without running out.

    -

    dr driving 2 mod apk unlimited gold and ruby
    -dr driving 2 hack mod apk unlimited money and ruby
    -dr driving 2 mod apk latest version unlimited ruby
    -dr driving 2 mod apk free download unlimited ruby
    -dr driving 2 mod apk android 1 unlimited ruby
    -dr driving 2 mod apk revdl unlimited ruby
    -dr driving 2 mod apk happymod unlimited ruby
    -dr driving 2 mod apk rexdl unlimited ruby
    -dr driving 2 mod apk offline unlimited ruby
    -dr driving 2 mod apk no root unlimited ruby
    -dr driving 2 mod apk online unlimited ruby
    -dr driving 2 mod apk ios unlimited ruby
    -dr driving 2 mod apk obb unlimited ruby
    -dr driving 2 mod apk data unlimited ruby
    -dr driving 2 mod apk pure unlimited ruby
    -dr driving 2 mod apk vip unlimited ruby
    -dr driving 2 mod apk mega unlimited ruby
    -dr driving 2 mod apk pro unlimited ruby
    -dr driving 2 mod apk premium unlimited ruby
    -dr driving 2 mod apk full unlocked unlimited ruby
    -dr driving 2 mod apk all cars unlocked unlimited ruby
    -dr driving 2 mod apk all levels unlocked unlimited ruby
    -dr driving 2 mod apk all missions unlocked unlimited ruby
    -dr driving 2 mod apk all stages unlocked unlimited ruby
    -dr driving 2 mod apk all modes unlocked unlimited ruby
    -dr driving 2 mod apk cheat menu unlimited ruby
    -dr driving 2 mod apk cheat codes unlimited ruby
    -dr driving 2 mod apk hack tool unlimited ruby
    -dr driving 2 mod apk hack download unlimited ruby
    -dr driving 2 mod apk hack version unlimited ruby
    -dr driving 2 mod apk hack online generator unlimited ruby
    -dr driving 2 mod apk hack no survey unlimited ruby
    -dr driving 2 mod apk hack no human verification unlimited ruby
    -dr driving 2 mod apk hack without root unlimited ruby
    -dr driving 2 mod apk hack without verification unlimited ruby
    -dr driving 2 mod apk hack without password unlimited ruby
    -dr driving 2 mod apk high graphics unlimited ruby
    -dr driving 2 mod apk hd graphics unlimited ruby
    -dr driving 2 mod apk ultra hd graphics unlimited ruby
    -dr driving 2 mod apk realistic graphics unlimited ruby
    -dr driving 2 mod apk best graphics unlimited ruby
    -dr driving 2 mod apk new update unlimited ruby
    -dr driving 2 mod apk latest update unlimited ruby
    -dr driving 2 mod apk new version unlimited ruby
    -dr driving 2 mod apk old version unlimited ruby
    -dr driving 2 mod apk original version unlimited ruby
    -dr driving 2 mod apk latest version download for android free with obb data file and original game file with no ads and no virus and no malware and no root required and fully unlocked with all features and all cars and all levels and all missions and all stages and all modes and all cheats and hacks available for free download from the official website of the game developer or from the google play store or from any other trusted source of downloading android games and apps for free with high speed direct download link or mirror link or torrent link or magnet link or google drive link or dropbox link or mediafire link or zippyshare link or any other cloud storage service link or any other file sharing service link or any other download manager link or any other downloader link or any other software link or any other tool link or any other website link or any other blog link or any other forum link or any other social media platform link or any other video sharing platform link or any other audio sharing platform link or any other image sharing platform link or any other document sharing platform link or any other file format link or any other file extension link.

    -

    All cars unlocked and upgraded

    -

    Cars are the most important part of Dr Driving 2, as they determine your driving experience and performance. However, unlocking and upgrading cars in the game can be expensive and time-consuming, as you need to complete missions, earn money and rubies, and meet certain requirements. With Dr Driving 2 Mod Apk, you don't have to do that. You will get all the cars unlocked and upgraded to the maximum level as soon as you start the game. You can choose any car you want and drive it with ease.

    -

    No ads and no root required

    -

    Ads are annoying and distracting, especially when you are playing a game that requires concentration and focus. They can also interrupt your gameplay and ruin your mood. With Dr Driving 2 Mod Apk, you don't have to deal with ads anymore. The mod apk removes all the ads from the game, so you can enjoy a smooth and uninterrupted driving experience. Moreover, the mod apk does not require root access to work on your device, so you don't have to worry about voiding your warranty or risking your security.

    -

    How to download and install Dr Driving 2 Mod Apk?

    -

    If you are interested in downloading and installing Dr Driving 2 Mod Apk on your device, you can follow these simple steps:

    -

    Steps to download and install Dr Driving 2 Mod Apk

    -

    Download the mod apk file from a trusted source

    -

    The first step is to download the mod apk file from a reliable source that offers safe and virus-free downloads. You can use the link below to download the latest version of Dr Driving 2 Mod Apk for free.

    -

    Dr Driving 2 Mod Apk Download Link

    -

    Enable unknown sources in your device settings

    -

    The next step is to enable unknown sources in your device settings, so you can install apps from sources other than Google Play Store or App Store. To do this, go to your device settings > security > unknown sources > enable.

    -

    Install the mod apk file and launch the game

    -

    The final step is to install the mod apk file on your device by tapping on it and following the instructions on the screen. Once the installation is done, launch the game from your app drawer or home screen. You will see that you have unlimited money, rubies, cars, and other features in the game.

    -

    Enjoy the mod features and have fun driving

    -

    Now that you have installed Dr Driving 2 Mod Apk on your device, you can enjoy all the mod features and have fun driving on realistic roads and traffic. You can buy and upgrade any car you want, play any mode or mission you want, and compete with other players online without any fear of losing.

    -

    Conclusion

    -

    Dr Driving 2 is a fun and realistic driving simulation game that offers you a lot of features and variety. You can drive various cars on different roads and environments, complete missions and challenges, compete with other players online , and customize your cars with upgrades and accessories. Dr Driving 2 is a free-to-play game that you can download from Google Play Store or App Store. However, if you want to make the game even more fun and exciting, you should download Dr Driving 2 Mod Apk, which gives you unlimited money, rubies, cars, and other features that are normally locked or require in-app purchases. With Dr Driving 2 Mod Apk, you can enjoy the game without any limitations or restrictions. You can download and install Dr Driving 2 Mod Apk on your device by following the simple steps we have provided in this article. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy driving!

    -

    FAQs

    -

    Here are some frequently asked questions about Dr Driving 2 and its mod version:

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    QuestionAnswer
    Is Dr Driving 2 Mod Apk safe to use?Yes, Dr Driving 2 Mod Apk is safe to use, as long as you download it from a trusted source that offers virus-free downloads. However, you should always be careful when installing apps from unknown sources and scan them with an antivirus before installing them.
    Will Dr Driving 2 Mod Apk work on my device?Dr Driving 2 Mod Apk should work on most Android devices that support the original game. However, some devices may not be compatible or may experience some issues due to different specifications or settings. If you encounter any problems, you can try to clear the cache, reinstall the game, or contact the mod developer for support.
    Can I play Dr Driving 2 Mod Apk online with other players?Yes, you can play Dr Driving 2 Mod Apk online with other players who are using the same mod version. However, you may not be able to play with players who are using the original game or a different mod version, as they may have different features or versions.
    Can I update Dr Driving 2 Mod Apk to the latest version?Yes, you can update Dr Driving 2 Mod Apk to the latest version by downloading and installing the new mod apk file from the same source that you downloaded it from. However, you may lose your progress or mod features if you update the game from Google Play Store or App Store.
    Can I use Dr Driving 2 Mod Apk with other mods or cheats?No, we do not recommend using Dr Driving 2 Mod Apk with other mods or cheats, as they may cause conflicts or errors in the game. Dr Driving 2 Mod Apk already has all the features and benefits that you need to enjoy the game.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download WhatsApp Business APK Terbaru 2023 for Android.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download WhatsApp Business APK Terbaru 2023 for Android.md deleted file mode 100644 index 02875b7840c1d92c39c7ea227c692eb26164c881..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download WhatsApp Business APK Terbaru 2023 for Android.md +++ /dev/null @@ -1,149 +0,0 @@ - -

    WhatsApp Business APK Terbaru 2023: What You Need to Know

    -

    If you are a business owner who wants to connect with your customers in a fast, convenient, and personal way, you might want to consider using WhatsApp Business. This is a special version of the popular messaging app that is designed specifically for small and medium businesses. It allows you to create a professional profile, showcase your products and services, automate your communication, and more.

    -

    whatsapp business apk terbaru 2023


    Download Ziphttps://gohhs.com/2uPm2n



    -

    But what if we told you that there is a new version of WhatsApp Business coming out in 2023 that will make your business communication even better? That's right, WhatsApp Business APK Terbaru 2023 is the latest update of the app that will bring you new features and benefits that will help you grow your business and satisfy your customers. In this article, we will tell you everything you need to know about WhatsApp Business APK Terbaru 2023, including what it is, what are its features, what are its benefits, how to download and install it, and how to use it effectively. So, let's get started!

    -

    What is WhatsApp Business?

    -

    Before we dive into the details of WhatsApp Business APK Terbaru 2023, let's first understand what WhatsApp Business is and how it differs from WhatsApp Messenger. WhatsApp Business is a separate app from WhatsApp Messenger that is meant for businesses who want to communicate with their customers in a personal and convenient way. There are two versions of WhatsApp Business: WhatsApp Business App and WhatsApp Business Platform.

    -

    WhatsApp Business App is for small businesses who personally manage conversations with customers. It allows you to create a business profile with your logo, description, website URL, address, and other information. It also allows you to showcase your products and services in a catalog that customers can browse and order from. You can also use various messaging tools such as automated messages, quick replies, labels, and chat filters to manage your customer conversations efficiently and professionally. You can also use different communication channels such as voice messages, calls, video calls, group calls, and media sharing to interact with your customers in different ways.

    -

    WhatsApp Business Platform is for medium to large businesses who communicate with customers at scale through programmatic access. It allows you to integrate WhatsApp with your existing systems such as CRM, ERP, or e-commerce platforms. You can also use the WhatsApp Business API to send notifications, confirmations, reminders, updates, and other messages to your customers. You can also use chatbots or live agents to handle customer queries and requests.

    -

    What are the features of WhatsApp Business?

    -

    Business profile

    -

    One of the main features of WhatsApp Business is the business profile. This is where you can create a branded profile for your business that includes your logo, description, website URL, address, and other information. You can also add multimedia elements such as photos, videos, or documents to showcase your business. Your business profile will help you increase your visibility and credibility among your customers and potential customers.

    -

    Catalog

    -

    Another feature of WhatsApp Business is the catalog. This is where you can showcase your products and services in a catalog that customers can browse and order from. You can add images, prices, descriptions, and links to each item in your catalog. You can also update your catalog anytime with new products or offers. Your catalog will help you attract more customers and increase your sales.

    -

    whatsapp business app download 2023
    -whatsapp business mod apk latest version 2023
    -whatsapp business features 2023
    -whatsapp business api 2023
    -whatsapp business account setup 2023
    -whatsapp business vs whatsapp messenger 2023
    -whatsapp business web 2023
    -whatsapp business for pc 2023
    -whatsapp business catalog 2023
    -whatsapp business pricing 2023
    -whatsapp business verification 2023
    -whatsapp business backup 2023
    -whatsapp business analytics 2023
    -whatsapp business automation 2023
    -whatsapp business benefits 2023
    -whatsapp business chatbot 2023
    -whatsapp business customer service 2023
    -whatsapp business desktop 2023
    -whatsapp business ecommerce 2023
    -whatsapp business examples 2023
    -whatsapp business faq 2023
    -whatsapp business group 2023
    -whatsapp business how to use 2023
    -whatsapp business ios 2023
    -whatsapp business integration 2023
    -whatsapp business logo 2023
    -whatsapp business marketing 2023
    -whatsapp business number change 2023
    -whatsapp business online status 2023
    -whatsapp business payment 2023
    -whatsapp business qr code 2023
    -whatsapp business review 2023
    -whatsapp business stickers 2023
    -whatsapp business tips and tricks 2023
    -whatsapp business update 2023
    -whatsapp business video call 2023
    -whatsapp business with landline number 2023
    -gbwhatsapp business apk download latest version 2023
    -yowhatsapp business apk download latest version 2023
    -fmwhatsapp business apk download latest version 2023
    -ogwhatsapp business apk download latest version 2023
    -nswhatsapp business apk download latest version 2023
    -soula whatsapp lite apk download latest version for android (business) - official website - soula mods - soula official website - soula mods - soula official website - soula mods - soula official website - soula mods - soula official website - soula mods - soula official website - soula mods - soula official website - soula mods - soula official website - soula mods - soula official website - soula mods - soula official website - soula mods - soula official website - soula mods - soula official website - soula mods - soula official website - soula mods -

    -

    Messaging tools

    -

    A third feature of WhatsApp Business is the messaging tools. These are tools that help you manage your customer conversations efficiently and professionally. Some of the messaging tools are:

    -
      -
    • Automated messages: These are messages that you can set up to send automatically when customers contact you or when certain conditions are met. For example, you can set up a welcome message when a customer first messages you or a away message when you are offline.
    • -
    • Quick replies: These are messages that you can save and reuse for frequently asked questions or common responses. For example, you can save a quick reply for your delivery time or payment methods.
    • -
    • Labels: These are tags that you can use to organize your chats and contacts into categories. For example, you can use labels for new customers, orders, payments, etc.
    • -
    • Chat filters: These are filters that you can use to sort your chats by unread messages, groups, broadcast lists, or labels.
    • -
    -

    Communication channels

    -

    A fourth feature of WhatsApp Business is the communication channels. These are channels that allow you to communicate with your customers in different ways. Some of the communication channels are:

    -
      -
    • Voice messages: These are messages that allow you to record and send audio messages to your customers.
    • -
    • Calls: These are calls that allow you to make voice calls to your customers.
    • -
    • Video calls: These are calls that allow you to make video calls to your customers.
    • -
    • Group calls: These are calls that allow you to make voice or video calls with up to eight participants.
    • -
    • Media sharing: These are messages that allow you to send images, videos, documents, contacts, locations, and other media files to your customers.
    • -
    -

    What are the benefits of WhatsApp Business?

    -

    Now that you know what WhatsApp Business is and what are its features, you might be wondering what are the benefits of using it for your business. Well, there are many benefits of using WhatsApp Business, such as:

    -
      -
    • Improve visibility: By creating a business profile and a catalog, you can increase your visibility and credibility among your customers and potential customers. You can also use WhatsApp Status to share updates and offers with your contacts.
    • -
    • Automate communication: By using automated messages and quick replies, you can save time and resources and provide faster and better service to your customers. You can also use the WhatsApp Business API to integrate WhatsApp with your existing systems and automate your communication at scale.
    • -
    • Keep workflow organized: By using labels and chat filters, you can keep your workflow organized and manage your chats and contacts easily. You can also use the WhatsApp Web or Desktop app to access your chats from your computer.
    • -
    • Enhance customer experience: By using different communication channels and media sharing, you can enhance your customer experience and build trust and loyalty. You can also use interactive messages to engage your customers and get feedback.
    • -
    • Increase profitability: By using WhatsApp Business, you can increase your profitability by attracting more customers, increasing your sales, reducing your costs, and improving your retention rate.
    • -
    -

    According to a study by Nielsen, 67% of mobile messaging app users said they expect to use chat more for communicating with businesses in the next two years. Moreover, 53% of respondents said they are more likely to shop with a business they can message directly. Some examples of successful brands that use WhatsApp Business are BookMyShow, KLM Royal Dutch Airlines, and MakeMyTrip.

    -

    What is new in WhatsApp Business APK Terbaru 2023?

    -

    As you can see, WhatsApp Business is already a great app for businesses who want to communicate with their customers in a personal and convenient way. But what if we told you that there is a new version of WhatsApp Business coming out in 2023 that will make your business communication even better? That's right, WhatsApp Business APK Terbaru 2023 is the latest update of the app that will bring you new features and benefits that will help you grow your business and satisfy your customers. Some of the new features are:

    -

    Multi-device support

    -

    One of the new features of WhatsApp Business APK Terbaru 2023 is the multi-device support. This feature will allow you to connect up to four devices to a single account and work synchronously on your phone and computer. This means that you can access your chats from any device without losing any data or functionality. You can also switch between devices seamlessly without logging out or verifying each time. This feature will help you work more efficiently and conveniently on multiple devices.

    -

    Payments integration

    -

    Another new feature of WhatsApp Business APK Terbaru 2023 is the payments integration. This feature will allow you to accept payments from customers directly on WhatsApp using local payment methods in India and Brazil. This means that you can offer a seamless payment experience to your customers without leaving the app or redirecting them to another platform. You can also track your transactions and manage your refunds easily on WhatsApp. This feature will help you increase your sales and reduce your payment friction.

    -

    Interactive messages

    -

    A third new feature of WhatsApp Business APK Terbaru 2023 is the interactive messages. This feature will allow you to send messages that include buttons or menus that customers can tap to perform actions or reply quickly. For example, you can send a message that asks for feedback with a button that says "Rate us" or "Leave a comment". You can also send a message that offers options with a menu that says "Choose one" or "Select all". This feature will help you engage your customers and get more responses.

    -

    File sharing

    -

    A fourth new feature of WhatsApp Business APK Terbaru 2023 is the file sharing. This feature will allow you to send files such as PDFs, spreadsheets, slideshows, etc. to customers easily and securely. This means that you can share important documents such as invoices, receipts, contracts, etc. with your customers without worrying about file size or format limitations. You can also preview the files before sending them and download them anytime from the chat history. This feature will help you share information and documents with your customers more conveniently.

    -

    How to download and install WhatsApp Business APK Terbaru 2023?

    -

    If you are interested in trying out the new features of WhatsApp Business APK Terbaru 2023, you might be wondering how to download and install it on your device. Well, it's very easy and simple. Just follow these steps:

    -
      -
    1. Go to a reliable source such as APKCombo and search for WhatsApp Business APK Terbaru 2023.
    2. -
    3. Select the version that matches your device and click on the download button.
    4. -
    5. Once the download is complete, open the file and tap on install.
    6. -
    7. Allow the app to access your device's permissions and follow the instructions on the screen.
    8. -
    9. Verify your phone number and set up your business profile and catalog.
    10. -
    11. Start using WhatsApp Business APK Terbaru 2023 for your business communication.
    12. -
    -

    Note: You need to have Android 4.4 or higher to run WhatsApp Business APK Terbaru 2023. You also need to have a separate phone number for your business account from your personal account.

    -

    How to use WhatsApp Business effectively?

    -

    Now that you have downloaded and installed WhatsApp Business APK Terbaru 2023, you might be wondering how to use it effectively for your business needs. Well, there are many ways to use WhatsApp Business for different purposes such as customer service, marketing campaigns, human resources, pipeline growth, etc. Here are some tips and tricks on how to use WhatsApp Business effectively:

    -
      -
    • Create a professional and attractive business profile and catalog that showcases your brand identity, products, and services.
    • -
    • Use automated messages and quick replies to provide faster and better service to your customers. You can also use interactive messages to engage your customers and get feedback.
    • -
    • Use labels and chat filters to organize your chats and contacts into categories. You can also use the WhatsApp Web or Desktop app to access your chats from your computer.
    • -
    • Use different communication channels and media sharing to interact with your customers in different ways. You can also use group calls to communicate with your team members or partners.
    • -
    • Use the WhatsApp Business API to integrate WhatsApp with your existing systems and automate your communication at scale. You can also use chatbots or live agents to handle customer queries and requests.
    • -
    • Use payments integration to accept payments from customers directly on WhatsApp using local payment methods in India and Brazil. You can also track your transactions and manage your refunds easily on WhatsApp.
    • -
    • Use file sharing to share important documents such as invoices, receipts, contracts, etc. with your customers easily and securely. You can also preview the files before sending them and download them anytime from the chat history.
    • -
    • Use multi-device support to connect up to four devices to a single account and work synchronously on your phone and computer. You can also switch between devices seamlessly without logging out or verifying each time.
    • -
    -

    Some examples of best practices and successful brands that use WhatsApp Business are:

    -
      -
    • BookMyShow: This is an online ticketing platform that uses WhatsApp Business App to send ticket confirmations, reminders, offers, and recommendations to its customers. It also uses WhatsApp Business Platform to send notifications, updates, and surveys to its customers at scale.
    • -
    • KLM Royal Dutch Airlines: This is an airline company that uses WhatsApp Business Platform to provide customer service, flight information, boarding passes, check-in confirmations, travel tips, and more to its customers. It also uses interactive messages to offer options and menus to its customers.
    • -
    • MakeMyTrip: This is an online travel agency that uses WhatsApp Business App to provide booking confirmations, travel updates, itinerary changes, cancellations, refunds, etc. to its customers. It also uses payments integration to accept payments from customers directly on WhatsApp using local payment methods in India.
    • -
    -

    Conclusion

    -

    In conclusion, WhatsApp Business APK Terbaru 2023 is a great app for businesses who want to communicate with their customers in a fast, convenient, and personal way. It offers many features and benefits that can help businesses improve visibility, automate communication, keep workflow organized, enhance customer experience, and increase profitability. It also brings new features such as multi-device support, payments integration, interactive messages, and file sharing that will make business communication even better. If you are a business owner who wants to grow your business and satisfy your customers, you should definitely download WhatsApp Business APK Terbaru 2023 and start using it for your business needs.

    -

    FAQs

    -

    Here are some answers to five common questions that readers might have about WhatsApp Business APK Terbaru 2023:

    -
      -
    1. What are the differences between WhatsApp Business App and WhatsApp Messenger?
      -WhatsApp Business App is a separate app from WhatsApp Messenger that is meant for businesses who want to communicate with their customers in a personal and convenient way. It allows them to create a business profile, showcase their products and services, automate their communication, and more. WhatsApp Messenger is the regular app that is meant for personal communication with friends and family. It allows them to send messages, calls, media, and more.
    2. -
    3. How much does it cost to use WhatsApp Business?
      -WhatsApp Business is free to download and use for both WhatsApp Business App and WhatsApp Business Platform. However, there may be some charges associated with using certain features or services such as data usage, payments integration, WhatsApp Business API, etc. You should check with your service provider or WhatsApp for more details.
    4. -
    5. Is WhatsApp Business secure and compliant with data privacy laws?
      -WhatsApp Business is secure and compliant with data privacy laws. It uses end-to-end encryption to protect your messages and calls from being accessed by anyone else. It also follows the General Data Protection Regulation (GDPR) and other local data privacy laws to ensure that your data is handled responsibly and transparently. You can also control your privacy settings and manage your data on WhatsApp.
    6. -
    7. How can I get support for WhatsApp Business?
      -You can get support for WhatsApp Business by visiting the WhatsApp Help Center or contacting the WhatsApp Support Team. You can also find answers to common questions and issues on the WhatsApp FAQ page or the WhatsApp Business Blog.
    8. -
    9. How can I switch from WhatsApp Messenger to WhatsApp Business App?
      -You can switch from WhatsApp Messenger to WhatsApp Business App by following these steps:

      -
        -
      1. Back up your chat history on WhatsApp Messenger.
      2. -
      3. Download WhatsApp Business App from the Google Play Store or the Apple App Store.
      4. -
      5. Verify your business phone number and restore your chat history on WhatsApp Business App.
      6. -
      7. Set up your business profile and catalog on WhatsApp Business App.
      8. -
      9. Start using WhatsApp Business App for your business communication.
      10. -
      -

      Note: You can use both WhatsApp Messenger and WhatsApp Business App on the same device as long as they have different phone numbers. However, you cannot use the same phone number for both apps.

      -

      I hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. And if you are ready to take your business communication to the next level, download WhatsApp Business APK Terbaru 2023 today and start using it for your business needs. Thank you for reading!

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/media-typer/HISTORY.md b/spaces/fffiloni/controlnet-animation-doodle/node_modules/media-typer/HISTORY.md deleted file mode 100644 index 62c2003168f588b4d470470278a2319c5950edc2..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/media-typer/HISTORY.md +++ /dev/null @@ -1,22 +0,0 @@ -0.3.0 / 2014-09-07 -================== - - * Support Node.js 0.6 - * Throw error when parameter format invalid on parse - -0.2.0 / 2014-06-18 -================== - - * Add `typer.format()` to format media types - -0.1.0 / 2014-06-17 -================== - - * Accept `req` as argument to `parse` - * Accept `res` as argument to `parse` - * Parse media type with extra LWS between type and first parameter - -0.0.0 / 2014-06-13 -================== - - * Initial implementation diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/socket.io/dist/index.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/socket.io/dist/index.js deleted file mode 100644 index aa6151c551f9fe26fb933fcd32df915bb1bbe826..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/socket.io/dist/index.js +++ /dev/null @@ -1,802 +0,0 @@ -"use strict"; -var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { - if (k2 === undefined) k2 = k; - var desc = Object.getOwnPropertyDescriptor(m, k); - if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { - desc = { enumerable: true, get: function() { return m[k]; } }; - } - Object.defineProperty(o, k2, desc); -}) : (function(o, m, k, k2) { - if (k2 === undefined) k2 = k; - o[k2] = m[k]; -})); -var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { - Object.defineProperty(o, "default", { enumerable: true, value: v }); -}) : function(o, v) { - o["default"] = v; -}); -var __importStar = (this && this.__importStar) || function (mod) { - if (mod && mod.__esModule) return mod; - var result = {}; - if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); - __setModuleDefault(result, mod); - return result; -}; -var __importDefault = (this && this.__importDefault) || function (mod) { - return (mod && mod.__esModule) ? mod : { "default": mod }; -}; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.Namespace = exports.Socket = exports.Server = void 0; -const http = require("http"); -const fs_1 = require("fs"); -const zlib_1 = require("zlib"); -const accepts = require("accepts"); -const stream_1 = require("stream"); -const path = require("path"); -const engine_io_1 = require("engine.io"); -const client_1 = require("./client"); -const events_1 = require("events"); -const namespace_1 = require("./namespace"); -Object.defineProperty(exports, "Namespace", { enumerable: true, get: function () { return namespace_1.Namespace; } }); -const parent_namespace_1 = require("./parent-namespace"); -const socket_io_adapter_1 = require("socket.io-adapter"); -const parser = __importStar(require("socket.io-parser")); -const debug_1 = __importDefault(require("debug")); -const socket_1 = require("./socket"); -Object.defineProperty(exports, "Socket", { enumerable: true, get: function () { return socket_1.Socket; } }); -const typed_events_1 = require("./typed-events"); -const uws_1 = require("./uws"); -const debug = (0, debug_1.default)("socket.io:server"); -const clientVersion = require("../package.json").version; -const dotMapRegex = /\.map/; -/** - * Represents a Socket.IO server. - * - * @example - * import { Server } from "socket.io"; - * - * const io = new Server(); - * - * io.on("connection", (socket) => { - * console.log(`socket ${socket.id} connected`); - * - * // send an event to the client - * socket.emit("foo", "bar"); - * - * socket.on("foobar", () => { - * // an event was received from the client - * }); - * - * // upon disconnection - * socket.on("disconnect", (reason) => { - * console.log(`socket ${socket.id} disconnected due to ${reason}`); - * }); - * }); - * - * io.listen(3000); - */ -class Server extends typed_events_1.StrictEventEmitter { - constructor(srv, opts = {}) { - super(); - /** - * @private - */ - this._nsps = new Map(); - this.parentNsps = new Map(); - /** - * A subset of the {@link parentNsps} map, only containing {@link ParentNamespace} which are based on a regular - * expression. - * - * @private - */ - this.parentNamespacesFromRegExp = new Map(); - if ("object" === typeof srv && - srv instanceof Object && - !srv.listen) { - opts = srv; - srv = undefined; - } - this.path(opts.path || "/socket.io"); - this.connectTimeout(opts.connectTimeout || 45000); - this.serveClient(false !== opts.serveClient); - this._parser = opts.parser || parser; - this.encoder = new this._parser.Encoder(); - this.opts = opts; - if (opts.connectionStateRecovery) { - opts.connectionStateRecovery = Object.assign({ - maxDisconnectionDuration: 2 * 60 * 1000, - skipMiddlewares: true, - }, opts.connectionStateRecovery); - this.adapter(opts.adapter || socket_io_adapter_1.SessionAwareAdapter); - } - else { - this.adapter(opts.adapter || socket_io_adapter_1.Adapter); - } - opts.cleanupEmptyChildNamespaces = !!opts.cleanupEmptyChildNamespaces; - this.sockets = this.of("/"); - if (srv || typeof srv == "number") - this.attach(srv); - } - get _opts() { - return this.opts; - } - serveClient(v) { - if (!arguments.length) - return this._serveClient; - this._serveClient = v; - return this; - } - /** - * Executes the middleware for an incoming namespace not already created on the server. - * - * @param name - name of incoming namespace - * @param auth - the auth parameters - * @param fn - callback - * - * @private - */ - _checkNamespace(name, auth, fn) { - if (this.parentNsps.size === 0) - return fn(false); - const keysIterator = this.parentNsps.keys(); - const run = () => { - const nextFn = keysIterator.next(); - if (nextFn.done) { - return fn(false); - } - nextFn.value(name, auth, (err, allow) => { - if (err || !allow) { - return run(); - } - if (this._nsps.has(name)) { - // the namespace was created in the meantime - debug("dynamic namespace %s already exists", name); - return fn(this._nsps.get(name)); - } - const namespace = this.parentNsps.get(nextFn.value).createChild(name); - debug("dynamic namespace %s was created", name); - fn(namespace); - }); - }; - run(); - } - path(v) { - if (!arguments.length) - return this._path; - this._path = v.replace(/\/$/, ""); - const escapedPath = this._path.replace(/[-\/\\^$*+?.()|[\]{}]/g, "\\$&"); - this.clientPathRegex = new RegExp("^" + - escapedPath + - "/socket\\.io(\\.msgpack|\\.esm)?(\\.min)?\\.js(\\.map)?(?:\\?|$)"); - return this; - } - connectTimeout(v) { - if (v === undefined) - return this._connectTimeout; - this._connectTimeout = v; - return this; - } - adapter(v) { - if (!arguments.length) - return this._adapter; - this._adapter = v; - for (const nsp of this._nsps.values()) { - nsp._initAdapter(); - } - return this; - } - /** - * Attaches socket.io to a server or port. - * - * @param srv - server or port - * @param opts - options passed to engine.io - * @return self - */ - listen(srv, opts = {}) { - return this.attach(srv, opts); - } - /** - * Attaches socket.io to a server or port. - * - * @param srv - server or port - * @param opts - options passed to engine.io - * @return self - */ - attach(srv, opts = {}) { - if ("function" == typeof srv) { - const msg = "You are trying to attach socket.io to an express " + - "request handler function. Please pass a http.Server instance."; - throw new Error(msg); - } - // handle a port as a string - if (Number(srv) == srv) { - srv = Number(srv); - } - if ("number" == typeof srv) { - debug("creating http server and binding to %d", srv); - const port = srv; - srv = http.createServer((req, res) => { - res.writeHead(404); - res.end(); - }); - srv.listen(port); - } - // merge the options passed to the Socket.IO server - Object.assign(opts, this.opts); - // set engine.io path to `/socket.io` - opts.path = opts.path || this._path; - this.initEngine(srv, opts); - return this; - } - attachApp(app /*: TemplatedApp */, opts = {}) { - // merge the options passed to the Socket.IO server - Object.assign(opts, this.opts); - // set engine.io path to `/socket.io` - opts.path = opts.path || this._path; - // initialize engine - debug("creating uWebSockets.js-based engine with opts %j", opts); - const engine = new engine_io_1.uServer(opts); - engine.attach(app, opts); - // bind to engine events - this.bind(engine); - if (this._serveClient) { - // attach static file serving - app.get(`${this._path}/*`, (res, req) => { - if (!this.clientPathRegex.test(req.getUrl())) { - req.setYield(true); - return; - } - const filename = req - .getUrl() - .replace(this._path, "") - .replace(/\?.*$/, "") - .replace(/^\//, ""); - const isMap = dotMapRegex.test(filename); - const type = isMap ? "map" : "source"; - // Per the standard, ETags must be quoted: - // https://tools.ietf.org/html/rfc7232#section-2.3 - const expectedEtag = '"' + clientVersion + '"'; - const weakEtag = "W/" + expectedEtag; - const etag = req.getHeader("if-none-match"); - if (etag) { - if (expectedEtag === etag || weakEtag === etag) { - debug("serve client %s 304", type); - res.writeStatus("304 Not Modified"); - res.end(); - return; - } - } - debug("serve client %s", type); - res.writeHeader("cache-control", "public, max-age=0"); - res.writeHeader("content-type", "application/" + (isMap ? "json" : "javascript") + "; charset=utf-8"); - res.writeHeader("etag", expectedEtag); - const filepath = path.join(__dirname, "../client-dist/", filename); - (0, uws_1.serveFile)(res, filepath); - }); - } - (0, uws_1.patchAdapter)(app); - } - /** - * Initialize engine - * - * @param srv - the server to attach to - * @param opts - options passed to engine.io - * @private - */ - initEngine(srv, opts) { - // initialize engine - debug("creating engine.io instance with opts %j", opts); - this.eio = (0, engine_io_1.attach)(srv, opts); - // attach static file serving - if (this._serveClient) - this.attachServe(srv); - // Export http server - this.httpServer = srv; - // bind to engine events - this.bind(this.eio); - } - /** - * Attaches the static file serving. - * - * @param srv http server - * @private - */ - attachServe(srv) { - debug("attaching client serving req handler"); - const evs = srv.listeners("request").slice(0); - srv.removeAllListeners("request"); - srv.on("request", (req, res) => { - if (this.clientPathRegex.test(req.url)) { - this.serve(req, res); - } - else { - for (let i = 0; i < evs.length; i++) { - evs[i].call(srv, req, res); - } - } - }); - } - /** - * Handles a request serving of client source and map - * - * @param req - * @param res - * @private - */ - serve(req, res) { - const filename = req.url.replace(this._path, "").replace(/\?.*$/, ""); - const isMap = dotMapRegex.test(filename); - const type = isMap ? "map" : "source"; - // Per the standard, ETags must be quoted: - // https://tools.ietf.org/html/rfc7232#section-2.3 - const expectedEtag = '"' + clientVersion + '"'; - const weakEtag = "W/" + expectedEtag; - const etag = req.headers["if-none-match"]; - if (etag) { - if (expectedEtag === etag || weakEtag === etag) { - debug("serve client %s 304", type); - res.writeHead(304); - res.end(); - return; - } - } - debug("serve client %s", type); - res.setHeader("Cache-Control", "public, max-age=0"); - res.setHeader("Content-Type", "application/" + (isMap ? "json" : "javascript") + "; charset=utf-8"); - res.setHeader("ETag", expectedEtag); - Server.sendFile(filename, req, res); - } - /** - * @param filename - * @param req - * @param res - * @private - */ - static sendFile(filename, req, res) { - const readStream = (0, fs_1.createReadStream)(path.join(__dirname, "../client-dist/", filename)); - const encoding = accepts(req).encodings(["br", "gzip", "deflate"]); - const onError = (err) => { - if (err) { - res.end(); - } - }; - switch (encoding) { - case "br": - res.writeHead(200, { "content-encoding": "br" }); - readStream.pipe((0, zlib_1.createBrotliCompress)()).pipe(res); - (0, stream_1.pipeline)(readStream, (0, zlib_1.createBrotliCompress)(), res, onError); - break; - case "gzip": - res.writeHead(200, { "content-encoding": "gzip" }); - (0, stream_1.pipeline)(readStream, (0, zlib_1.createGzip)(), res, onError); - break; - case "deflate": - res.writeHead(200, { "content-encoding": "deflate" }); - (0, stream_1.pipeline)(readStream, (0, zlib_1.createDeflate)(), res, onError); - break; - default: - res.writeHead(200); - (0, stream_1.pipeline)(readStream, res, onError); - } - } - /** - * Binds socket.io to an engine.io instance. - * - * @param engine engine.io (or compatible) server - * @return self - */ - bind(engine) { - this.engine = engine; - this.engine.on("connection", this.onconnection.bind(this)); - return this; - } - /** - * Called with each incoming transport connection. - * - * @param {engine.Socket} conn - * @return self - * @private - */ - onconnection(conn) { - debug("incoming connection with id %s", conn.id); - const client = new client_1.Client(this, conn); - if (conn.protocol === 3) { - // @ts-ignore - client.connect("/"); - } - return this; - } - /** - * Looks up a namespace. - * - * @example - * // with a simple string - * const myNamespace = io.of("/my-namespace"); - * - * // with a regex - * const dynamicNsp = io.of(/^\/dynamic-\d+$/).on("connection", (socket) => { - * const namespace = socket.nsp; // newNamespace.name === "/dynamic-101" - * - * // broadcast to all clients in the given sub-namespace - * namespace.emit("hello"); - * }); - * - * @param name - nsp name - * @param fn optional, nsp `connection` ev handler - */ - of(name, fn) { - if (typeof name === "function" || name instanceof RegExp) { - const parentNsp = new parent_namespace_1.ParentNamespace(this); - debug("initializing parent namespace %s", parentNsp.name); - if (typeof name === "function") { - this.parentNsps.set(name, parentNsp); - } - else { - this.parentNsps.set((nsp, conn, next) => next(null, name.test(nsp)), parentNsp); - this.parentNamespacesFromRegExp.set(name, parentNsp); - } - if (fn) { - // @ts-ignore - parentNsp.on("connect", fn); - } - return parentNsp; - } - if (String(name)[0] !== "/") - name = "/" + name; - let nsp = this._nsps.get(name); - if (!nsp) { - for (const [regex, parentNamespace] of this.parentNamespacesFromRegExp) { - if (regex.test(name)) { - debug("attaching namespace %s to parent namespace %s", name, regex); - return parentNamespace.createChild(name); - } - } - debug("initializing namespace %s", name); - nsp = new namespace_1.Namespace(this, name); - this._nsps.set(name, nsp); - if (name !== "/") { - // @ts-ignore - this.sockets.emitReserved("new_namespace", nsp); - } - } - if (fn) - nsp.on("connect", fn); - return nsp; - } - /** - * Closes server connection - * - * @param [fn] optional, called as `fn([err])` on error OR all conns closed - */ - close(fn) { - for (const socket of this.sockets.sockets.values()) { - socket._onclose("server shutting down"); - } - this.engine.close(); - // restore the Adapter prototype - (0, uws_1.restoreAdapter)(); - if (this.httpServer) { - this.httpServer.close(fn); - } - else { - fn && fn(); - } - } - /** - * Registers a middleware, which is a function that gets executed for every incoming {@link Socket}. - * - * @example - * io.use((socket, next) => { - * // ... - * next(); - * }); - * - * @param fn - the middleware function - */ - use(fn) { - this.sockets.use(fn); - return this; - } - /** - * Targets a room when emitting. - * - * @example - * // the “foo” event will be broadcast to all connected clients in the “room-101” room - * io.to("room-101").emit("foo", "bar"); - * - * // with an array of rooms (a client will be notified at most once) - * io.to(["room-101", "room-102"]).emit("foo", "bar"); - * - * // with multiple chained calls - * io.to("room-101").to("room-102").emit("foo", "bar"); - * - * @param room - a room, or an array of rooms - * @return a new {@link BroadcastOperator} instance for chaining - */ - to(room) { - return this.sockets.to(room); - } - /** - * Targets a room when emitting. Similar to `to()`, but might feel clearer in some cases: - * - * @example - * // disconnect all clients in the "room-101" room - * io.in("room-101").disconnectSockets(); - * - * @param room - a room, or an array of rooms - * @return a new {@link BroadcastOperator} instance for chaining - */ - in(room) { - return this.sockets.in(room); - } - /** - * Excludes a room when emitting. - * - * @example - * // the "foo" event will be broadcast to all connected clients, except the ones that are in the "room-101" room - * io.except("room-101").emit("foo", "bar"); - * - * // with an array of rooms - * io.except(["room-101", "room-102"]).emit("foo", "bar"); - * - * // with multiple chained calls - * io.except("room-101").except("room-102").emit("foo", "bar"); - * - * @param room - a room, or an array of rooms - * @return a new {@link BroadcastOperator} instance for chaining - */ - except(room) { - return this.sockets.except(room); - } - /** - * Emits an event and waits for an acknowledgement from all clients. - * - * @example - * try { - * const responses = await io.timeout(1000).emitWithAck("some-event"); - * console.log(responses); // one response per client - * } catch (e) { - * // some clients did not acknowledge the event in the given delay - * } - * - * @return a Promise that will be fulfilled when all clients have acknowledged the event - */ - emitWithAck(ev, ...args) { - return this.sockets.emitWithAck(ev, ...args); - } - /** - * Sends a `message` event to all clients. - * - * This method mimics the WebSocket.send() method. - * - * @see https://developer.mozilla.org/en-US/docs/Web/API/WebSocket/send - * - * @example - * io.send("hello"); - * - * // this is equivalent to - * io.emit("message", "hello"); - * - * @return self - */ - send(...args) { - this.sockets.emit("message", ...args); - return this; - } - /** - * Sends a `message` event to all clients. Alias of {@link send}. - * - * @return self - */ - write(...args) { - this.sockets.emit("message", ...args); - return this; - } - /** - * Sends a message to the other Socket.IO servers of the cluster. - * - * @example - * io.serverSideEmit("hello", "world"); - * - * io.on("hello", (arg1) => { - * console.log(arg1); // prints "world" - * }); - * - * // acknowledgements (without binary content) are supported too: - * io.serverSideEmit("ping", (err, responses) => { - * if (err) { - * // some servers did not acknowledge the event in the given delay - * } else { - * console.log(responses); // one response per server (except the current one) - * } - * }); - * - * io.on("ping", (cb) => { - * cb("pong"); - * }); - * - * @param ev - the event name - * @param args - an array of arguments, which may include an acknowledgement callback at the end - */ - serverSideEmit(ev, ...args) { - return this.sockets.serverSideEmit(ev, ...args); - } - /** - * Sends a message and expect an acknowledgement from the other Socket.IO servers of the cluster. - * - * @example - * try { - * const responses = await io.serverSideEmitWithAck("ping"); - * console.log(responses); // one response per server (except the current one) - * } catch (e) { - * // some servers did not acknowledge the event in the given delay - * } - * - * @param ev - the event name - * @param args - an array of arguments - * - * @return a Promise that will be fulfilled when all servers have acknowledged the event - */ - serverSideEmitWithAck(ev, ...args) { - return this.sockets.serverSideEmitWithAck(ev, ...args); - } - /** - * Gets a list of socket ids. - * - * @deprecated this method will be removed in the next major release, please use {@link Server#serverSideEmit} or - * {@link Server#fetchSockets} instead. - */ - allSockets() { - return this.sockets.allSockets(); - } - /** - * Sets the compress flag. - * - * @example - * io.compress(false).emit("hello"); - * - * @param compress - if `true`, compresses the sending data - * @return a new {@link BroadcastOperator} instance for chaining - */ - compress(compress) { - return this.sockets.compress(compress); - } - /** - * Sets a modifier for a subsequent event emission that the event data may be lost if the client is not ready to - * receive messages (because of network slowness or other issues, or because they’re connected through long polling - * and is in the middle of a request-response cycle). - * - * @example - * io.volatile.emit("hello"); // the clients may or may not receive it - * - * @return a new {@link BroadcastOperator} instance for chaining - */ - get volatile() { - return this.sockets.volatile; - } - /** - * Sets a modifier for a subsequent event emission that the event data will only be broadcast to the current node. - * - * @example - * // the “foo” event will be broadcast to all connected clients on this node - * io.local.emit("foo", "bar"); - * - * @return a new {@link BroadcastOperator} instance for chaining - */ - get local() { - return this.sockets.local; - } - /** - * Adds a timeout in milliseconds for the next operation. - * - * @example - * io.timeout(1000).emit("some-event", (err, responses) => { - * if (err) { - * // some clients did not acknowledge the event in the given delay - * } else { - * console.log(responses); // one response per client - * } - * }); - * - * @param timeout - */ - timeout(timeout) { - return this.sockets.timeout(timeout); - } - /** - * Returns the matching socket instances. - * - * Note: this method also works within a cluster of multiple Socket.IO servers, with a compatible {@link Adapter}. - * - * @example - * // return all Socket instances - * const sockets = await io.fetchSockets(); - * - * // return all Socket instances in the "room1" room - * const sockets = await io.in("room1").fetchSockets(); - * - * for (const socket of sockets) { - * console.log(socket.id); - * console.log(socket.handshake); - * console.log(socket.rooms); - * console.log(socket.data); - * - * socket.emit("hello"); - * socket.join("room1"); - * socket.leave("room2"); - * socket.disconnect(); - * } - */ - fetchSockets() { - return this.sockets.fetchSockets(); - } - /** - * Makes the matching socket instances join the specified rooms. - * - * Note: this method also works within a cluster of multiple Socket.IO servers, with a compatible {@link Adapter}. - * - * @example - * - * // make all socket instances join the "room1" room - * io.socketsJoin("room1"); - * - * // make all socket instances in the "room1" room join the "room2" and "room3" rooms - * io.in("room1").socketsJoin(["room2", "room3"]); - * - * @param room - a room, or an array of rooms - */ - socketsJoin(room) { - return this.sockets.socketsJoin(room); - } - /** - * Makes the matching socket instances leave the specified rooms. - * - * Note: this method also works within a cluster of multiple Socket.IO servers, with a compatible {@link Adapter}. - * - * @example - * // make all socket instances leave the "room1" room - * io.socketsLeave("room1"); - * - * // make all socket instances in the "room1" room leave the "room2" and "room3" rooms - * io.in("room1").socketsLeave(["room2", "room3"]); - * - * @param room - a room, or an array of rooms - */ - socketsLeave(room) { - return this.sockets.socketsLeave(room); - } - /** - * Makes the matching socket instances disconnect. - * - * Note: this method also works within a cluster of multiple Socket.IO servers, with a compatible {@link Adapter}. - * - * @example - * // make all socket instances disconnect (the connections might be kept alive for other namespaces) - * io.disconnectSockets(); - * - * // make all socket instances in the "room1" room disconnect and close the underlying connections - * io.in("room1").disconnectSockets(true); - * - * @param close - whether to close the underlying connection - */ - disconnectSockets(close = false) { - return this.sockets.disconnectSockets(close); - } -} -exports.Server = Server; -/** - * Expose main namespace (/). - */ -const emitterMethods = Object.keys(events_1.EventEmitter.prototype).filter(function (key) { - return typeof events_1.EventEmitter.prototype[key] === "function"; -}); -emitterMethods.forEach(function (fn) { - Server.prototype[fn] = function () { - return this.sockets[fn].apply(this.sockets, arguments); - }; -}); -module.exports = (srv, opts) => new Server(srv, opts); -module.exports.Server = Server; -module.exports.Namespace = namespace_1.Namespace; -module.exports.Socket = socket_1.Socket; -var socket_2 = require("./socket"); diff --git a/spaces/firsk/ai_otto/text/cleaner.py b/spaces/firsk/ai_otto/text/cleaner.py deleted file mode 100644 index 3ba3739816aabbe16663b68c74fcda0588c14bab..0000000000000000000000000000000000000000 --- a/spaces/firsk/ai_otto/text/cleaner.py +++ /dev/null @@ -1,28 +0,0 @@ -from text import chinese, japanese, cleaned_text_to_sequence - - -language_module_map = {"ZH": chinese, "JP": japanese} - - -def clean_text(text, language): - language_module = language_module_map[language] - norm_text = language_module.text_normalize(text) - phones, tones, word2ph = language_module.g2p(norm_text) - return norm_text, phones, tones, word2ph - - -def clean_text_bert(text, language): - language_module = language_module_map[language] - norm_text = language_module.text_normalize(text) - phones, tones, word2ph = language_module.g2p(norm_text) - bert = language_module.get_bert_feature(norm_text, word2ph) - return phones, tones, bert - - -def text_to_sequence(text, language): - norm_text, phones, tones, word2ph = clean_text(text, language) - return cleaned_text_to_sequence(phones, tones, language) - - -if __name__ == "__main__": - pass diff --git a/spaces/fkhuggingme/gpt-academic/main.py b/spaces/fkhuggingme/gpt-academic/main.py deleted file mode 100644 index 5932b83ed9b3bffcfbcf613ec3175d253c674bd6..0000000000000000000000000000000000000000 --- a/spaces/fkhuggingme/gpt-academic/main.py +++ /dev/null @@ -1,209 +0,0 @@ -import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 - -def main(): - import gradio as gr - from request_llm.bridge_all import predict - from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith - # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到 - proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY, AVAIL_LLM_MODELS = \ - get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY', 'AVAIL_LLM_MODELS') - - # 如果WEB_PORT是-1, 则随机选取WEB端口 - PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT - if not AUTHENTICATION: AUTHENTICATION = None - - from check_proxy import get_current_version - initial_prompt = "Serve me as a writing and programming assistant." - title_html = f"

      ChatGPT 学术优化 {get_current_version()}

      " - description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)""" - - # 问询记录, python 版本建议3.9+(越新越好) - import logging - os.makedirs("gpt_log", exist_ok=True) - try:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8") - except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO) - print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!") - - # 一些普通功能模块 - from core_functional import get_core_functions - functional = get_core_functions() - - # 高级函数插件 - from crazy_functional import get_crazy_functions - crazy_fns = get_crazy_functions() - - # 处理markdown文本格式的转变 - gr.Chatbot.postprocess = format_io - - # 做一些外观色彩上的调整 - from theme import adjust_theme, advanced_css - set_theme = adjust_theme() - - # 代理与自动更新 - from check_proxy import check_proxy, auto_update, warm_up_modules - proxy_info = check_proxy(proxies) - - gr_L1 = lambda: gr.Row().style() - gr_L2 = lambda scale: gr.Column(scale=scale) - if LAYOUT == "TOP-DOWN": - gr_L1 = lambda: DummyWith() - gr_L2 = lambda scale: gr.Row() - CHATBOT_HEIGHT /= 2 - - cancel_handles = [] - with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo: - gr.HTML(title_html) - gr.HTML('''
      Duplicate Space请您打开此页面后务必点击上方的“复制空间”(Duplicate Space)按钮!使用时,先在输入框填入API-KEY然后回车。
      切忌在“复制空间”(Duplicate Space)之前填入API_KEY或进行提问,否则您的API_KEY将极可能被空间所有者攫取!
      支持任意数量的OpenAI的密钥和API2D的密钥共存,例如输入"OpenAI密钥1,API2D密钥2",然后提交,即可同时使用两种模型接口。
      ''') - cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL}) - with gr_L1(): - with gr_L2(scale=2): - chatbot = gr.Chatbot(label=f"当前模型:{LLM_MODEL}") - chatbot.style(height=CHATBOT_HEIGHT) - history = gr.State([]) - with gr_L2(scale=1): - with gr.Accordion("输入区", open=True) as area_input_primary: - with gr.Row(): - txt = gr.Textbox(show_label=False, lines=2, placeholder="输入问题或API密钥,输入多个密钥时,用英文逗号间隔。支持OpenAI密钥和API2D密钥共存。").style(container=False) - with gr.Row(): - submitBtn = gr.Button("提交", variant="primary") - with gr.Row(): - resetBtn = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm") - stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm") - clearBtn = gr.Button("清除", variant="secondary", visible=False); clearBtn.style(size="sm") - with gr.Row(): - status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}") - with gr.Accordion("基础功能区", open=True) as area_basic_fn: - with gr.Row(): - for k in functional: - variant = functional[k]["Color"] if "Color" in functional[k] else "secondary" - functional[k]["Button"] = gr.Button(k, variant=variant) - with gr.Accordion("函数插件区", open=True) as area_crazy_fn: - with gr.Row(): - gr.Markdown("注意:以下“红颜色”标识的函数插件需从输入区读取路径作为参数.") - with gr.Row(): - for k in crazy_fns: - if not crazy_fns[k].get("AsButton", True): continue - variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary" - crazy_fns[k]["Button"] = gr.Button(k, variant=variant) - crazy_fns[k]["Button"].style(size="sm") - with gr.Row(): - with gr.Accordion("更多函数插件", open=True): - dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)] - with gr.Row(): - dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(container=False) - with gr.Row(): - plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False, - placeholder="这里是特殊函数插件的高级参数输入区").style(container=False) - with gr.Row(): - switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary") - with gr.Row(): - with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up: - file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple") - with gr.Accordion("更换模型 & SysPrompt & 交互界面布局", open=(LAYOUT == "TOP-DOWN")): - system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt) - top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",) - temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",) - max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="Local LLM MaxLength",) - checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区") - md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False) - - gr.Markdown(description) - with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary: - with gr.Row(): - txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(container=False) - with gr.Row(): - submitBtn2 = gr.Button("提交", variant="primary") - with gr.Row(): - resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm") - stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm") - clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm") - # 功能区显示开关与功能区的互动 - def fn_area_visibility(a): - ret = {} - ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))}) - ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))}) - ret.update({area_input_primary: gr.update(visible=("底部输入区" not in a))}) - ret.update({area_input_secondary: gr.update(visible=("底部输入区" in a))}) - ret.update({clearBtn: gr.update(visible=("输入清除键" in a))}) - ret.update({clearBtn2: gr.update(visible=("输入清除键" in a))}) - ret.update({plugin_advanced_arg: gr.update(visible=("插件参数区" in a))}) - if "底部输入区" in a: ret.update({txt: gr.update(value="")}) - return ret - checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, clearBtn, clearBtn2, plugin_advanced_arg] ) - # 整理反复出现的控件句柄组合 - input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg] - output_combo = [cookies, chatbot, history, status] - predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=input_combo, outputs=output_combo) - # 提交按钮、重置按钮 - cancel_handles.append(txt.submit(**predict_args)) - cancel_handles.append(txt2.submit(**predict_args)) - cancel_handles.append(submitBtn.click(**predict_args)) - cancel_handles.append(submitBtn2.click(**predict_args)) - resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) - resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) - clearBtn.click(lambda: ("",""), None, [txt, txt2]) - clearBtn2.click(lambda: ("",""), None, [txt, txt2]) - # 基础功能区的回调函数注册 - for k in functional: - click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo) - cancel_handles.append(click_handle) - # 文件上传区,接收文件后与chatbot的互动 - file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes], [chatbot, txt, txt2]) - # 函数插件-固定按钮区 - for k in crazy_fns: - if not crazy_fns[k].get("AsButton", True): continue - click_handle = crazy_fns[k]["Button"].click(ArgsGeneralWrapper(crazy_fns[k]["Function"]), [*input_combo, gr.State(PORT)], output_combo) - click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot]) - cancel_handles.append(click_handle) - # 函数插件-下拉菜单与随变按钮的互动 - def on_dropdown_changed(k): - variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary" - ret = {switchy_bt: gr.update(value=k, variant=variant)} - if crazy_fns[k].get("AdvancedArgs", False): # 是否唤起高级插件参数区 - ret.update({plugin_advanced_arg: gr.update(visible=True, label=f"插件[{k}]的高级参数说明:" + crazy_fns[k].get("ArgsReminder", [f"没有提供高级参数功能说明"]))}) - else: - ret.update({plugin_advanced_arg: gr.update(visible=False, label=f"插件[{k}]不需要高级参数。")}) - return ret - dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt, plugin_advanced_arg] ) - def on_md_dropdown_changed(k): - return {chatbot: gr.update(label="当前模型:"+k)} - md_dropdown.select(on_md_dropdown_changed, [md_dropdown], [chatbot] ) - # 随变按钮的回调函数注册 - def route(k, *args, **kwargs): - if k in [r"打开插件列表", r"请先从插件列表中选择"]: return - yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs) - click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo) - click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot]) - cancel_handles.append(click_handle) - # 终止按钮的回调函数注册 - stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles) - stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles) - - # gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数 - def auto_opentab_delay(): - import threading, webbrowser, time - print(f"如果浏览器没有自动打开,请复制并转到以下URL:") - print(f"\t(亮色主题): http://localhost:{PORT}") - print(f"\t(暗色主题): http://localhost:{PORT}/?__dark-theme=true") - def open(): - time.sleep(2) # 打开浏览器 - DARK_MODE, = get_conf('DARK_MODE') - if DARK_MODE: webbrowser.open_new_tab(f"http://localhost:{PORT}/?__dark-theme=true") - else: webbrowser.open_new_tab(f"http://localhost:{PORT}") - threading.Thread(target=open, name="open-browser", daemon=True).start() - threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start() - threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start() - - auto_opentab_delay() - demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=False, favicon_path="docs/logo.png") - - # 如果需要在二级路径下运行 - # CUSTOM_PATH, = get_conf('CUSTOM_PATH') - # if CUSTOM_PATH != "/": - # from toolbox import run_gradio_in_subpath - # run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH) - # else: - # demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png") - -if __name__ == "__main__": - main() diff --git a/spaces/freddyaboulton/3.1.4.9-all-demos/demos/image_mod_default_image/run.py b/spaces/freddyaboulton/3.1.4.9-all-demos/demos/image_mod_default_image/run.py deleted file mode 100644 index c2ad1f8be43b53d179254cb9a0cadcb4c11378b3..0000000000000000000000000000000000000000 --- a/spaces/freddyaboulton/3.1.4.9-all-demos/demos/image_mod_default_image/run.py +++ /dev/null @@ -1,18 +0,0 @@ -import gradio as gr -import os - - -def image_mod(image): - return image.rotate(45) - - -cheetah = os.path.join(os.path.dirname(__file__), "images/cheetah1.jpg") - -demo = gr.Interface(image_mod, gr.Image(type="pil", value=cheetah), "image", - flagging_options=["blurry", "incorrect", "other"], examples=[ - os.path.join(os.path.dirname(__file__), "images/lion.jpg"), - os.path.join(os.path.dirname(__file__), "images/logo.png") - ]) - -if __name__ == "__main__": - demo.launch() diff --git a/spaces/fun-research/FC-CLIP/fcclip/modeling/pixel_decoder/ops/setup.py b/spaces/fun-research/FC-CLIP/fcclip/modeling/pixel_decoder/ops/setup.py deleted file mode 100644 index 3b57ad313ac8f9b6586892142da8ba943e516cec..0000000000000000000000000000000000000000 --- a/spaces/fun-research/FC-CLIP/fcclip/modeling/pixel_decoder/ops/setup.py +++ /dev/null @@ -1,78 +0,0 @@ -# ------------------------------------------------------------------------------------------------ -# Deformable DETR -# Copyright (c) 2020 SenseTime. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------------------------------ -# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -# ------------------------------------------------------------------------------------------------ - -# Copyright (c) Facebook, Inc. and its affiliates. -# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR - -import os -import glob - -import torch - -from torch.utils.cpp_extension import CUDA_HOME -from torch.utils.cpp_extension import CppExtension -from torch.utils.cpp_extension import CUDAExtension - -from setuptools import find_packages -from setuptools import setup - -requirements = ["torch", "torchvision"] - -def get_extensions(): - this_dir = os.path.dirname(os.path.abspath(__file__)) - extensions_dir = os.path.join(this_dir, "src") - - main_file = glob.glob(os.path.join(extensions_dir, "*.cpp")) - source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp")) - source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu")) - - sources = main_file + source_cpu - extension = CppExtension - extra_compile_args = {"cxx": []} - define_macros = [] - - # Force cuda since torch ask for a device, not if cuda is in fact available. - if (os.environ.get('FORCE_CUDA') or torch.cuda.is_available()) and CUDA_HOME is not None: - extension = CUDAExtension - sources += source_cuda - define_macros += [("WITH_CUDA", None)] - extra_compile_args["nvcc"] = [ - "-DCUDA_HAS_FP16=1", - "-D__CUDA_NO_HALF_OPERATORS__", - "-D__CUDA_NO_HALF_CONVERSIONS__", - "-D__CUDA_NO_HALF2_OPERATORS__", - ] - else: - if CUDA_HOME is None: - raise NotImplementedError('CUDA_HOME is None. Please set environment variable CUDA_HOME.') - else: - raise NotImplementedError('No CUDA runtime is found. Please set FORCE_CUDA=1 or test it by running torch.cuda.is_available().') - - sources = [os.path.join(extensions_dir, s) for s in sources] - include_dirs = [extensions_dir] - ext_modules = [ - extension( - "MultiScaleDeformableAttention", - sources, - include_dirs=include_dirs, - define_macros=define_macros, - extra_compile_args=extra_compile_args, - ) - ] - return ext_modules - -setup( - name="MultiScaleDeformableAttention", - version="1.0", - author="Weijie Su", - url="https://github.com/fundamentalvision/Deformable-DETR", - description="PyTorch Wrapper for CUDA Functions of Multi-Scale Deformable Attention", - packages=find_packages(exclude=("configs", "tests",)), - ext_modules=get_extensions(), - cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension}, -) diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/decode_heads/apc_head.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/decode_heads/apc_head.py deleted file mode 100644 index c7038bdbe0edf2a1f184b6899486d2d190dda076..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/decode_heads/apc_head.py +++ /dev/null @@ -1,158 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from annotator.uniformer.mmcv.cnn import ConvModule - -from annotator.uniformer.mmseg.ops import resize -from ..builder import HEADS -from .decode_head import BaseDecodeHead - - -class ACM(nn.Module): - """Adaptive Context Module used in APCNet. - - Args: - pool_scale (int): Pooling scale used in Adaptive Context - Module to extract region features. - fusion (bool): Add one conv to fuse residual feature. - in_channels (int): Input channels. - channels (int): Channels after modules, before conv_seg. - conv_cfg (dict | None): Config of conv layers. - norm_cfg (dict | None): Config of norm layers. - act_cfg (dict): Config of activation layers. - """ - - def __init__(self, pool_scale, fusion, in_channels, channels, conv_cfg, - norm_cfg, act_cfg): - super(ACM, self).__init__() - self.pool_scale = pool_scale - self.fusion = fusion - self.in_channels = in_channels - self.channels = channels - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - self.pooled_redu_conv = ConvModule( - self.in_channels, - self.channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - self.input_redu_conv = ConvModule( - self.in_channels, - self.channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - self.global_info = ConvModule( - self.channels, - self.channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - self.gla = nn.Conv2d(self.channels, self.pool_scale**2, 1, 1, 0) - - self.residual_conv = ConvModule( - self.channels, - self.channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - if self.fusion: - self.fusion_conv = ConvModule( - self.channels, - self.channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - def forward(self, x): - """Forward function.""" - pooled_x = F.adaptive_avg_pool2d(x, self.pool_scale) - # [batch_size, channels, h, w] - x = self.input_redu_conv(x) - # [batch_size, channels, pool_scale, pool_scale] - pooled_x = self.pooled_redu_conv(pooled_x) - batch_size = x.size(0) - # [batch_size, pool_scale * pool_scale, channels] - pooled_x = pooled_x.view(batch_size, self.channels, - -1).permute(0, 2, 1).contiguous() - # [batch_size, h * w, pool_scale * pool_scale] - affinity_matrix = self.gla(x + resize( - self.global_info(F.adaptive_avg_pool2d(x, 1)), size=x.shape[2:]) - ).permute(0, 2, 3, 1).reshape( - batch_size, -1, self.pool_scale**2) - affinity_matrix = F.sigmoid(affinity_matrix) - # [batch_size, h * w, channels] - z_out = torch.matmul(affinity_matrix, pooled_x) - # [batch_size, channels, h * w] - z_out = z_out.permute(0, 2, 1).contiguous() - # [batch_size, channels, h, w] - z_out = z_out.view(batch_size, self.channels, x.size(2), x.size(3)) - z_out = self.residual_conv(z_out) - z_out = F.relu(z_out + x) - if self.fusion: - z_out = self.fusion_conv(z_out) - - return z_out - - -@HEADS.register_module() -class APCHead(BaseDecodeHead): - """Adaptive Pyramid Context Network for Semantic Segmentation. - - This head is the implementation of - `APCNet `_. - - Args: - pool_scales (tuple[int]): Pooling scales used in Adaptive Context - Module. Default: (1, 2, 3, 6). - fusion (bool): Add one conv to fuse residual feature. - """ - - def __init__(self, pool_scales=(1, 2, 3, 6), fusion=True, **kwargs): - super(APCHead, self).__init__(**kwargs) - assert isinstance(pool_scales, (list, tuple)) - self.pool_scales = pool_scales - self.fusion = fusion - acm_modules = [] - for pool_scale in self.pool_scales: - acm_modules.append( - ACM(pool_scale, - self.fusion, - self.in_channels, - self.channels, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg)) - self.acm_modules = nn.ModuleList(acm_modules) - self.bottleneck = ConvModule( - self.in_channels + len(pool_scales) * self.channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - acm_outs = [x] - for acm_module in self.acm_modules: - acm_outs.append(acm_module(x)) - acm_outs = torch.cat(acm_outs, dim=1) - output = self.bottleneck(acm_outs) - output = self.cls_seg(output) - return output diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/decode_heads/nl_head.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/decode_heads/nl_head.py deleted file mode 100644 index 3eee424199e6aa363b564e2a3340a070db04db86..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/decode_heads/nl_head.py +++ /dev/null @@ -1,49 +0,0 @@ -import torch -from annotator.uniformer.mmcv.cnn import NonLocal2d - -from ..builder import HEADS -from .fcn_head import FCNHead - - -@HEADS.register_module() -class NLHead(FCNHead): - """Non-local Neural Networks. - - This head is the implementation of `NLNet - `_. - - Args: - reduction (int): Reduction factor of projection transform. Default: 2. - use_scale (bool): Whether to scale pairwise_weight by - sqrt(1/inter_channels). Default: True. - mode (str): The nonlocal mode. Options are 'embedded_gaussian', - 'dot_product'. Default: 'embedded_gaussian.'. - """ - - def __init__(self, - reduction=2, - use_scale=True, - mode='embedded_gaussian', - **kwargs): - super(NLHead, self).__init__(num_convs=2, **kwargs) - self.reduction = reduction - self.use_scale = use_scale - self.mode = mode - self.nl_block = NonLocal2d( - in_channels=self.channels, - reduction=self.reduction, - use_scale=self.use_scale, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - mode=self.mode) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - output = self.convs[0](x) - output = self.nl_block(output) - output = self.convs[1](output) - if self.concat_input: - output = self.conv_cat(torch.cat([x, output], dim=1)) - output = self.cls_seg(output) - return output diff --git a/spaces/ghlee94/MEDIAR/segmentation_models_pytorch/__init__.py b/spaces/ghlee94/MEDIAR/segmentation_models_pytorch/__init__.py deleted file mode 100644 index f34d0ad932b66e14a92026f8631f68a78283478a..0000000000000000000000000000000000000000 --- a/spaces/ghlee94/MEDIAR/segmentation_models_pytorch/__init__.py +++ /dev/null @@ -1,61 +0,0 @@ -from . import datasets -from . import encoders -from . import decoders -from . import losses -from . import metrics - -from .decoders.unet import Unet -from .decoders.unetplusplus import UnetPlusPlus -from .decoders.manet import MAnet -from .decoders.linknet import Linknet -from .decoders.fpn import FPN -from .decoders.pspnet import PSPNet -from .decoders.deeplabv3 import DeepLabV3, DeepLabV3Plus -from .decoders.pan import PAN - -from .__version__ import __version__ - -# some private imports for create_model function -from typing import Optional as _Optional -import torch as _torch - - -def create_model( - arch: str, - encoder_name: str = "resnet34", - encoder_weights: _Optional[str] = "imagenet", - in_channels: int = 3, - classes: int = 1, - **kwargs, -) -> _torch.nn.Module: - """Models entrypoint, allows to create any model architecture just with - parameters, without using its class - """ - - archs = [ - Unet, - UnetPlusPlus, - MAnet, - Linknet, - FPN, - PSPNet, - DeepLabV3, - DeepLabV3Plus, - PAN, - ] - archs_dict = {a.__name__.lower(): a for a in archs} - try: - model_class = archs_dict[arch.lower()] - except KeyError: - raise KeyError( - "Wrong architecture type `{}`. Available options are: {}".format( - arch, list(archs_dict.keys()), - ) - ) - return model_class( - encoder_name=encoder_name, - encoder_weights=encoder_weights, - in_channels=in_channels, - classes=classes, - **kwargs, - ) diff --git a/spaces/godot-demo/godot-3d-trucks/index.html b/spaces/godot-demo/godot-3d-trucks/index.html deleted file mode 100644 index b6228c62f8cc50fb177a4c928e6316230453cd46..0000000000000000000000000000000000000000 --- a/spaces/godot-demo/godot-3d-trucks/index.html +++ /dev/null @@ -1,247 +0,0 @@ - - - - - - Truck Town - - - - - - - - HTML5 canvas appears to be unsupported in the current browser.
      - Please try updating or use a different browser. -
      -
      - - - -
      - - - - - - diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Ez3d2009 Serial Code How to Create Stunning 3D Models in Minutes.md b/spaces/gotiQspiryo/whisper-ui/examples/Ez3d2009 Serial Code How to Create Stunning 3D Models in Minutes.md deleted file mode 100644 index b2939e9e44d36049231f4785ee5d384d5b410ff5..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Ez3d2009 Serial Code How to Create Stunning 3D Models in Minutes.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Ez3d2009 Serial Codel


      DOWNLOADhttps://urlgoal.com/2uyMcG



      - - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/gotiQspiryo/whisper-ui/examples/HACK Adobe Universal Patcher V1.1 [CC 2014].md b/spaces/gotiQspiryo/whisper-ui/examples/HACK Adobe Universal Patcher V1.1 [CC 2014].md deleted file mode 100644 index 05c73ba9b22d350579c531a397be1cf3b5c17b66..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/HACK Adobe Universal Patcher V1.1 [CC 2014].md +++ /dev/null @@ -1,9 +0,0 @@ -
      -

      adobe zii is a trojan, or a nasty virus that attempts to steal your personal information. if you have this virus on your computer, it is important that you delete it immediately. if you believe that you have this virus, download the best anti-virus software to find out if it is present on your computer. in case the virus is found, it is important that you remove the virus immediately.

      -

      HACK Adobe Universal Patcher v1.1 [CC 2014]


      Download File ✒ ✒ ✒ https://urlgoal.com/2uyLWK



      -

      flash player used to make the adobe zii working fine and most of the time the adobe zii crack for mac os x and windows 7,8,8.1,10 & 10.10,6,5,4,3,2 & 1 & 10 is the same. but we will use another adobe zii crack because of the reason that you may be not able to use the adobe zii without adobe flash player.

      -

      i havent focused on adobes new cracking resources for a long time, so some of those tools shared at appnee may be failed now (i.e.: outdated for the latest versions of adobe apps). fortunately, i received an email from medtaha4everthe other day, recommending adobe cc 2015, 2016 genp. after actual tests, i decided to include and share it at appnee. at this point, theres a new addition to our adobe all products universal patchers collection, thanks to mohamed t.b.

      -

      this awesome apps is bolstered by recent adaptation of mac os catalina or upper and lower any macos. in addition, it underpins every single very recent rendition of cc 2015, cc 2016, cc 2017, cc 2018, cc 2019 cc 2020 and cc 2021. this application automatically distinguishes the adaptation of your application and after that legislate it in a bloom. below adobezii.com share adobe zii full version or adobe zii cracked or adobe zii latest version details.

      -

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/gotiQspiryo/whisper-ui/examples/I Caught My Brother Having Gay Sex NEW.md b/spaces/gotiQspiryo/whisper-ui/examples/I Caught My Brother Having Gay Sex NEW.md deleted file mode 100644 index 64bda1030273a76c5b35cfce441c46e7ea87ba15..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/I Caught My Brother Having Gay Sex NEW.md +++ /dev/null @@ -1,14 +0,0 @@ - -

      Humor Sex Story: Emily arrived home from school and found her brother Jordy wanking, more specifically he was wanking over pictures of her boyfriend Tyler. So there was only one thing to do, a double date, Emily & Tyler, Jordy & Tyler's sister Jess. Let's see what comes out the other side... (note there's an m/m tag, but it's only in the background, I've included it for people who are offended by gay sex)

      -

      When I was very young my brother played with my cock whilst I led on my bed. He started to suck it and of course it felt really good. This ended with me having my first ever orgasam in my brothers mouth. I loved it. This was a real sexual awakening for me. From this moment I would mastebate when ever I could. The sex with my brother continued....... My Dad would leave the house at about 9pm to collect my mother from work on a couple of evenings every week and he would leave me and my brother alone in the house (he was gone for about an hour). As soon as he was out the house, both me & my brother would jump out of our beds and strip naked. We would kiss & have oral sex, which was fantastic. We tried anal sex but I was only young and found it difficult to take him inside me but I always tried as I really wanted him to cum inside me. This carried on for several years. When I was 16 I went on holiday with him to Portugal (our first holiday without Mum & Dad). Of course whilst we were away we enjoyed having sex on a daily basis. We would start by sucking each other and probing our tounges into each others anus. I could take his full length and still remember him 'twitching' inside me as he sprayed his load. After this holiday we did not engage in sex again. For the life of me I can't remember why we stopped. I'm now 43 years old and he is 46. I still see him regularly. We are both married now, although I have had quite a few gay sex excounters behind my wifes back. I know my brother looks at gay porn on the web (I have seen evidence on his PC's history) but I am not sure if he still gets involved. I have never spoken to him since the Portugal trip about our sexual past. It's a shame that it fizzled out as sex with my brother was without dought the best sex I have ever had. It did feel wrong at the time but I think that was what made it that bit more exciting. If he ever offers to have sex with me again, I would jump at the chance. Sometimes I think I should mention it to him, but I am afraid of rejection as he may want to keep our sexual relationship 'historic'.

      -

      I Caught My Brother Having Gay Sex


      Download Filehttps://urlgoal.com/2uyLHv



      -

      This happened during the summer when I was a teenager. It was the first summer I was jerking off as often as I could - I was always looking for an opportunity! I was too shy to hint at my obsession to friends so I really did not know if other guys were doing it as much as I was. I did not have a summer job so I had LOTS of free time and guess what, my first choice was to find an excuse to jerk off. Now this may sound like it would be easy but I had my parents, three brothers and a sister and finding the time to do it in private was never easy - but I got real creative and always managed to find some way to do it at least once a day and sometimes a few times in the same day!

      -

      OK, during the summer I shared a room with my youngest brother and to get to the door to our room required going through my older brother's room (the layout of the rooms is weird). That summer we were in a rented summer house (more like a bungalow) that was not insulated. The walls were just a piece of wood thick and any sound in one room could be easily heard in adjacent rooms (my parents were a thin floor above me (I could see light between the floor boards at night). I could hear my older brother snore in the room next to me so I am sure he could have heard me breathing heavily at night. Needless to say, the house was almost always busy and it was not easy to find time to jerk off in privacy - but I always managed to find plenty of opportunities since my dick always seems to be hard and I was real clever in doing it without anyone knowing. Sometimes I took some risks but I was too horny to not jerk off... I always managed to do it without being caught and after a while I was pretty confident I was able to do it safely without ever being caught... I was still real careful and always had a plan to cover up in a hurry if I heard anyone approaching (or so I thought...).

      -

      -

      I usually did it at night when my roommate brother was asleep. We had a window fan in our room and if the fan was on it's fastest setting it made enough noise to mask the bed squeaks, my rapid breathing and eventual groans. Afternoons were sometime good opportunities to have jerking off marathons if the house was empty (plus it was better than doing it than in the dark at night because during the day I could see my dick as I jerked off - it added to the excitement!). One REALLY hot summer afternoon a great opportunity presented it self and my dick was as hard as a rock with anticipation. My parents were not expected back before dinner time. My sister was at a friend's house for the day and my younger brothers were at day camp. My older brother had a summer job and did not get back until dinner time so I had the house to my self. (little did I know that he had taken the day off... well, you'll see what happened, read on...)

      -

      I checked all the rooms in the house (hey, I was being careful!), I closed every door in the house figuring I would hear anyone if they came into the house. I closed the room to my brother's room and continued on to my room and closed that door too. It was incredibly hot so I cranked the fan up to high - I had long since taken my T shirt off and all I was just wearing was some cut offs and my underwear. I unbuttoned my shorts and they dropped to the floor. I was sweaty from the heat so I wiped myself off with a towel and stood in front of the fan so the air would dry me off. I put my hand on my underwear as I stood there and could feel my disk which felt like it was hard as steel - it was so hard it had popped out of the top of my jockey shorts. I was horny as shit by now I was teasing myself knowing that I would be stroking my dick soon but had lots of time to play and get as aroused as possible.

      -

      Just as I was pounding furiously, arching my back and could feel the early signs of the much anticipated spasms, I heard a sound behind me (the fan must have masked the other sounds)! OH shit!!! My heart stopped and I froze in mid stroke! I looked over my shoulder and saw to my HORROR that my 17 year old brother was standing with the door wide open and watching me. I thought I was going to fucking die!!! Here I was with my almost 6" throbbing dick in my hands and just seconds from having the most intense orgasm of my life. I was stark naked, legs spread apart and my butt off the bed just seconds from shooting a major load.

      -

      I remember trying to think of what to do - I was so close to an intense orgasm (which is what my dick voted for) but I was WAY too embarrassed to keep going with my older brother watching! My brother had never seen my dick hard since I had reached puberty and I had never let on to him that I was so into jerking off (or basically always horny). Since I had started puberty I had never discussed sex and certainly not jerking off with my older brother - though I was sure he jerked off and assumed he was always anxious to know if I did too. But, this was NOT the time to discuss masturbation - I was in a some what "awkward" position - (naked, breathing heavily and holding an aching hardon) while my brother stood there silently smiling... I could not cover my self up (what was the point - I could not have been any more exposed!) so I just laid there. I did not know how long he had been watching but it was obvious that he had seen enough to know what I was doing and he probably knew I was close to shooting my load... After what seemed like an eternity, without saying a word my brother (finally) turned and closed the door (I think he finally realized I was not going to keep going while he was standing there).

      -

      I stayed motionless on my bed holding my still hard dick in my hand. The walls were so thin I could hear that he was still in his adjacent room and must have been stalling to do something so he could listen to see if I was going to continue. I waited hoping he would leave (I still had a major hardon and had decided there was no use wasting a good orgasm!). After a while I decided I would just do it quietly (I had practiced jerking off quietly to avoid being heard by my younger brother as a roommate many times). After waiting what seemed like enough time I started slowly stroking my dick again and though it took a while to get to the same point I was before being caught, it was not too long before I was ready to shoot. I do not know if it was because of the intensity of the first build up, the surprise of getting caught or having delayed it for such a long time, but I was so tense that even though I was trying to do it SILENTLY, the more I tried to stifle my movements and breathing, the more intense the spasms got. My head felt light as I held my breath and I felt the spasms uncontrollably pulse hot cum out of my now aching dick. The first shot spurt out and hit me in the chin with such force it scared me! I quickly forgot it as the spasms continued and I shot the most number of spurts I had ever experienced. I could feel each hot spurt as it left my groin and traveled the length of my pulsing dick. Each spurt landed on my chest and stomach and I realized I had a huge mess - more cum than I had ever shot at one time! I fought to avoid gasping as I needed to catch my breath after holding it for so long. I was totally worn out from all of the tension and suspense and fell back on the bed in total exhaustion. I laid there to catch my breath and wanted to be sure not to doze off least I be discovered naked again! I eventually wiped up the mess and put my shorts back on. But how was I going to get out of my room?

      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/gradio/longformer/tests/test_sliding_chunks.py b/spaces/gradio/longformer/tests/test_sliding_chunks.py deleted file mode 100644 index d411135a7a7bbca4da32d951f46b4c234c497dcd..0000000000000000000000000000000000000000 --- a/spaces/gradio/longformer/tests/test_sliding_chunks.py +++ /dev/null @@ -1,85 +0,0 @@ -import time -import unittest -import torch -import numpy as np -import random -from longformer.diagonaled_mm_tvm import diagonaled_mm as diagonaled_mm_tvm, mask_invalid_locations -from longformer.sliding_chunks import sliding_chunks_matmul_pv, sliding_chunks_matmul_qk - - -def same_storage(x, y): - '''Tests if two tensors share the same underlying storage (for memory optimizations)''' - return x.storage().data_ptr() == y.storage().data_ptr() - - -class TestSlidingChunksMM(unittest.TestCase): - def test_tvm_equal_sliding_chunks(self): - np.random.seed(3) - random.seed(3) - torch.manual_seed(3) - torch.cuda.manual_seed(3) - torch.cuda.manual_seed_all(3) - - torch.set_printoptions(sci_mode=False) - N = 4096 # * 16 - M = 64 # hidden size - W = 256 # one sided. Actual window size = 2w+1 - B = 3 - D = 1 # no dilation - H = 12 # number of heads - autoregressive = False # not autoregressive - device = 'cuda' - dtype = torch.float32 - - failed_tests = 0 - time1 = time2 = 0 - for i in range(50): - if i < 5: - time1 = time2 = 0 # don't include the first few iterations because of high variance - - query = torch.randn(B * N * H * M, requires_grad=True, device=device, dtype=dtype).view(B, N, H, M) - key = torch.randn(B * N * H * M, requires_grad=True, device=device, dtype=dtype).flip(dims=(0,)).view(B, N, H, M) - value = torch.randn(B * N * H * M, requires_grad=True, device=device, dtype=dtype).view(B, N, H, M) - - # TVM MM - torch.cuda.synchronize() - start = time.time() - attention1 = diagonaled_mm_tvm(query, key, W, D, False, 0, autoregressive) - mask_invalid_locations(attention1, W, D, autoregressive) - attention_probs1 = torch.nn.functional.softmax(attention1, dim=-1) - context1 = diagonaled_mm_tvm(attention_probs1, value, W, D, True, 0, autoregressive) - context1.sum().backward() - torch.cuda.synchronize() - time1 += time.time() - start - torch.cuda.empty_cache() - - # query = query.half() # uncomment to profile the fp16 performance - # key = key.half() - # value = value.half() - assert D == 1 - assert not autoregressive - torch.cuda.synchronize() - start = time.time() - attention2 = sliding_chunks_matmul_qk(query, key, W, float('-inf')) - attention_probs2 = torch.nn.functional.softmax(attention2, dim=-1) - context2 = sliding_chunks_matmul_pv(attention_probs2, value, W) - context2.sum().backward() - torch.cuda.synchronize() - time2 += time.time() - start - torch.cuda.empty_cache() - - try: - assert torch.allclose(attention1, attention2.float(), atol=1e-4, rtol=1e-5) - assert torch.allclose(context1, context2.float(), atol=1e-4, rtol=1e-5) - except AssertionError: - failed_tests += 1 - - print('Time tvm: {0:.5f} s'.format(time1)) - print('Time pytorch sliding chunks: {0:.5f} s'.format(time2)) - print('Sliding chunks vs. TVM speedup: {0:.5f}x'.format(time1/time2)) - print(f'Failed tests: {failed_tests}/{i+1}') - assert failed_tests == 0 - - -if __name__ == '__main__': - unittest.main() diff --git a/spaces/gwang-kim/DATID-3D/eg3d/metrics/__init__.py b/spaces/gwang-kim/DATID-3D/eg3d/metrics/__init__.py deleted file mode 100644 index dfebd04f47e6f6b1b44984c14c23b57d56f72240..0000000000000000000000000000000000000000 --- a/spaces/gwang-kim/DATID-3D/eg3d/metrics/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-NvidiaProprietary -# -# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual -# property and proprietary rights in and to this material, related -# documentation and any modifications thereto. Any use, reproduction, -# disclosure or distribution of this material and related documentation -# without an express license agreement from NVIDIA CORPORATION or -# its affiliates is strictly prohibited. - -# empty diff --git a/spaces/gwang-kim/DATID-3D/eg3d/torch_utils/ops/conv2d_resample.py b/spaces/gwang-kim/DATID-3D/eg3d/torch_utils/ops/conv2d_resample.py deleted file mode 100644 index d46f4ddd85606b9032d08efe3556ecad4676cee5..0000000000000000000000000000000000000000 --- a/spaces/gwang-kim/DATID-3D/eg3d/torch_utils/ops/conv2d_resample.py +++ /dev/null @@ -1,145 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-NvidiaProprietary -# -# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual -# property and proprietary rights in and to this material, related -# documentation and any modifications thereto. Any use, reproduction, -# disclosure or distribution of this material and related documentation -# without an express license agreement from NVIDIA CORPORATION or -# its affiliates is strictly prohibited. - -"""2D convolution with optional up/downsampling.""" - -import torch - -from .. import misc -from . import conv2d_gradfix -from . import upfirdn2d -from .upfirdn2d import _parse_padding -from .upfirdn2d import _get_filter_size - -#---------------------------------------------------------------------------- - -def _get_weight_shape(w): - with misc.suppress_tracer_warnings(): # this value will be treated as a constant - shape = [int(sz) for sz in w.shape] - misc.assert_shape(w, shape) - return shape - -#---------------------------------------------------------------------------- - -def _conv2d_wrapper(x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True): - """Wrapper for the underlying `conv2d()` and `conv_transpose2d()` implementations. - """ - _out_channels, _in_channels_per_group, kh, kw = _get_weight_shape(w) - - # Flip weight if requested. - # Note: conv2d() actually performs correlation (flip_weight=True) not convolution (flip_weight=False). - if not flip_weight and (kw > 1 or kh > 1): - w = w.flip([2, 3]) - - # Execute using conv2d_gradfix. - op = conv2d_gradfix.conv_transpose2d if transpose else conv2d_gradfix.conv2d - return op(x, w, stride=stride, padding=padding, groups=groups) - -#---------------------------------------------------------------------------- - -@misc.profiled_function -def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False): - r"""2D convolution with optional up/downsampling. - - Padding is performed only once at the beginning, not between the operations. - - Args: - x: Input tensor of shape - `[batch_size, in_channels, in_height, in_width]`. - w: Weight tensor of shape - `[out_channels, in_channels//groups, kernel_height, kernel_width]`. - f: Low-pass filter for up/downsampling. Must be prepared beforehand by - calling upfirdn2d.setup_filter(). None = identity (default). - up: Integer upsampling factor (default: 1). - down: Integer downsampling factor (default: 1). - padding: Padding with respect to the upsampled image. Can be a single number - or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - groups: Split input channels into N groups (default: 1). - flip_weight: False = convolution, True = correlation (default: True). - flip_filter: False = convolution, True = correlation (default: False). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - # Validate arguments. - assert isinstance(x, torch.Tensor) and (x.ndim == 4) - assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype) - assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2] and f.dtype == torch.float32) - assert isinstance(up, int) and (up >= 1) - assert isinstance(down, int) and (down >= 1) - assert isinstance(groups, int) and (groups >= 1) - out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w) - fw, fh = _get_filter_size(f) - px0, px1, py0, py1 = _parse_padding(padding) - - # Adjust padding to account for up/downsampling. - if up > 1: - px0 += (fw + up - 1) // 2 - px1 += (fw - up) // 2 - py0 += (fh + up - 1) // 2 - py1 += (fh - up) // 2 - if down > 1: - px0 += (fw - down + 1) // 2 - px1 += (fw - down) // 2 - py0 += (fh - down + 1) // 2 - py1 += (fh - down) // 2 - - # Fast path: 1x1 convolution with downsampling only => downsample first, then convolve. - if kw == 1 and kh == 1 and (down > 1 and up == 1): - x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter) - x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) - return x - - # Fast path: 1x1 convolution with upsampling only => convolve first, then upsample. - if kw == 1 and kh == 1 and (up > 1 and down == 1): - x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) - x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter) - return x - - # Fast path: downsampling only => use strided convolution. - if down > 1 and up == 1: - x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter) - x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight) - return x - - # Fast path: upsampling with optional downsampling => use transpose strided convolution. - if up > 1: - if groups == 1: - w = w.transpose(0, 1) - else: - w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw) - w = w.transpose(1, 2) - w = w.reshape(groups * in_channels_per_group, out_channels // groups, kh, kw) - px0 -= kw - 1 - px1 -= kw - up - py0 -= kh - 1 - py1 -= kh - up - pxt = max(min(-px0, -px1), 0) - pyt = max(min(-py0, -py1), 0) - x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight)) - x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter) - if down > 1: - x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter) - return x - - # Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d. - if up == 1 and down == 1: - if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0: - return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight) - - # Fallback: Generic reference implementation. - x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter) - x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) - if down > 1: - x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter) - return x - -#---------------------------------------------------------------------------- diff --git a/spaces/gyugnsu/DragGan-Inversion/PTI/torch_utils/__init__.py b/spaces/gyugnsu/DragGan-Inversion/PTI/torch_utils/__init__.py deleted file mode 100644 index ece0ea08fe2e939cc260a1dafc0ab5b391b773d9..0000000000000000000000000000000000000000 --- a/spaces/gyugnsu/DragGan-Inversion/PTI/torch_utils/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -# empty diff --git a/spaces/housexu123/bingo-2.0/src/components/tone-selector.tsx b/spaces/housexu123/bingo-2.0/src/components/tone-selector.tsx deleted file mode 100644 index 5c6e464c91f564b895acd121f0a4a79ed9c5c356..0000000000000000000000000000000000000000 --- a/spaces/housexu123/bingo-2.0/src/components/tone-selector.tsx +++ /dev/null @@ -1,43 +0,0 @@ -import React from 'react' -import { BingConversationStyle } from '@/lib/bots/bing/types' -import { cn } from '@/lib/utils' - -type ToneItem = { - type: BingConversationStyle, - name: string -} - -const ToneList: ToneItem[] = [ - { name: '有创造力', type: BingConversationStyle.Creative }, - { name: '更平衡', type: BingConversationStyle.Balanced }, - { name: '更精确', type: BingConversationStyle.Precise } -] - -interface ToneSelectorProps { - type: BingConversationStyle | '' - onChange?: (type: BingConversationStyle) => void -} - -export function ToneSelector({ type, onChange }: ToneSelectorProps) { - return ( -
      -
      - 选择对话样式 -
      -
      -
        - { - ToneList.map(tone => ( -
      • onChange?.(tone.type)}> - -
      • - )) - } -
      -
      -
      - ) -} diff --git a/spaces/huggingface-projects/huggingbots/app.py b/spaces/huggingface-projects/huggingbots/app.py deleted file mode 100644 index 5fc32d5422755c2da37dbe119d2983fbf2372d2c..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/huggingbots/app.py +++ /dev/null @@ -1,51 +0,0 @@ -import os -import random -import threading - -import discord -import gradio as gr -from discord import app_commands -from discord.ext import commands - - -# HF GUILD SETTINGS -MY_GUILD_ID = 1077674588122648679 if os.getenv("TEST_ENV", False) else 879548962464493619 -MY_GUILD = discord.Object(id=MY_GUILD_ID) -DISCORD_TOKEN = os.environ.get("DISCORD_TOKEN", None) - - -class Bot(commands.Bot): - """This structure allows slash commands to work instantly.""" - - def __init__(self): - super().__init__(command_prefix="/", intents=discord.Intents.all()) - - async def setup_hook(self): - await self.tree.sync(guild=discord.Object(MY_GUILD_ID)) - print(f"Synced slash commands for {self.user}.") - - -client = Bot() - - -@client.event -async def on_ready(): - print(f"Logged in as {client.user} (ID: {client.user.id})") - print("------") - - -def run_bot(): - client.run(DISCORD_TOKEN) - - -threading.Thread(target=run_bot).start() -"""This allows us to run the Discord bot in a Python thread""" -with gr.Blocks() as demo: - gr.Markdown(""" - # Huggingbots Server - This space hosts the huggingbots discord bot. - Currently supported models are Falcon and DeepfloydIF - """) -demo.queue(concurrency_count=100) -demo.queue(max_size=100) -demo.launch() diff --git a/spaces/huggingface-projects/wordalle/ALT-README.md b/spaces/huggingface-projects/wordalle/ALT-README.md deleted file mode 100644 index 135aadd9e04be18f3e6136092b8cc932e100e416..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/wordalle/ALT-README.md +++ /dev/null @@ -1 +0,0 @@ -# Wordalle diff --git a/spaces/hungchiayu/CaptionFLAN-T5/app.py b/spaces/hungchiayu/CaptionFLAN-T5/app.py deleted file mode 100644 index 0ecc2d1d0fc6ffef49fdf815972ebe43a7b57f3d..0000000000000000000000000000000000000000 --- a/spaces/hungchiayu/CaptionFLAN-T5/app.py +++ /dev/null @@ -1,49 +0,0 @@ - -import streamlit as st -from PIL import Image -import urllib.request -import torch -from torch import nn -import numpy as np -from VT5 import VT5 -from transformers import ( - AutoModelForSeq2SeqLM, - AutoTokenizer, - Trainer, - TrainingArguments, - T5Tokenizer, - T5ForConditionalGeneration, - CLIPVisionModelWithProjection, - AutoProcessor -) - - -clip = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-base-patch32") -processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") - -tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-small") -t5 = T5ForConditionalGeneration.from_pretrained("google/flan-t5-small") - -vt5 = VT5(t5,tokenizer,clip) -vt5.load_state_dict(torch.load('weights.bin',map_location=torch.device('cpu'))) - -# Assuming you have this function that generates captions -def generate_caption(image): - # Your model code here - caption = "This is a placeholder caption" - - caption = vt5.generate_caption(image) - return caption - -st.title("Image Captioning App") -#st.image(image.numpy().reshape(224,224,3), caption='Uploaded Image.', clamp=True,use_column_width=True) -uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png"]) - -if uploaded_file is not None: - image = Image.open(uploaded_file) - st.image(image, caption='Uploaded Image.', clamp=True,use_column_width=True) - image = processor(images=image,return_tensors='pt').pixel_values - st.write("") - st.write("Generating caption...") - caption = generate_caption(image) - st.write("Caption: ", caption) diff --git a/spaces/hysts/StyleGAN-Human-Interpolation/README.md b/spaces/hysts/StyleGAN-Human-Interpolation/README.md deleted file mode 100644 index 8c879921d64bec2f84c78acc0ef5cf9b1d71ff93..0000000000000000000000000000000000000000 --- a/spaces/hysts/StyleGAN-Human-Interpolation/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: StyleGAN-Human Interpolation -emoji: 🏢 -colorFrom: blue -colorTo: gray -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false -suggested_hardware: t4-small ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference - -https://arxiv.org/abs/2204.11823 diff --git a/spaces/hysts/daily-papers/papers.py b/spaces/hysts/daily-papers/papers.py deleted file mode 100644 index 9f625a298c4652992a92c63efed4e323a651d0b0..0000000000000000000000000000000000000000 --- a/spaces/hysts/daily-papers/papers.py +++ /dev/null @@ -1,99 +0,0 @@ -import dataclasses -import datetime -import operator -import pathlib - -import pandas as pd -import requests -import tqdm.auto - - -@dataclasses.dataclass(frozen=True) -class PaperInfo: - date: str - arxiv_id: str - github: str - title: str - paper_page: str - upvotes: int - published_at: str - - def __post_init__(self): - object.__setattr__(self, "published_at", PaperInfo.convert_timestamp(self.published_at)) - - @staticmethod - def convert_timestamp(timestamp: str) -> str: - try: - return datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%fZ").strftime("%Y/%m/%d %H:%M:%S") - except ValueError: - return timestamp - - -def get_df(path: pathlib.Path | str) -> pd.DataFrame: - df = pd.read_csv(path, dtype=str).fillna("") - paper_info = [] - for _, row in tqdm.auto.tqdm(df.iterrows(), total=len(df)): - res = requests.get(f"https://huggingface.co/api/papers/{row.arxiv_id}").json() - info = PaperInfo( - **row, - title=res["title"], - paper_page=f"https://huggingface.co/papers/{row.arxiv_id}", - upvotes=res["upvotes"], - published_at=res["publishedAt"], - ) - paper_info.append(info) - return pd.DataFrame([dataclasses.asdict(info) for info in paper_info]) - - -class Prettifier: - @staticmethod - def get_github_link(link: str) -> str: - if not link: - return "" - return Prettifier.create_link("github", link) - - @staticmethod - def create_link(text: str, url: str) -> str: - return f'{text}' - - @staticmethod - def to_div(text: str | None, category_name: str) -> str: - if text is None: - text = "" - class_name = f"{category_name}-{text.lower()}" - return f'
      {text}
      ' - - def __call__(self, df: pd.DataFrame) -> pd.DataFrame: - df = df.sort_values("arxiv_id", ascending=False).reset_index(drop=True) - new_rows = [] - for _, row in df.iterrows(): - new_row = dict(row) | { - "date": Prettifier.create_link(row.date, f"https://huggingface.co/papers?date={row.date}"), - "paper_page": Prettifier.create_link(row.arxiv_id, row.paper_page), - "github": self.get_github_link(row.github), - } - new_rows.append(new_row) - return pd.DataFrame(new_rows, columns=df.columns) - - -class PaperList: - COLUMN_INFO = [ - ["date", "markdown"], - ["paper_page", "markdown"], - ["title", "str"], - ["github", "markdown"], - ["upvotes", "number"], - ] - - def __init__(self, df: pd.DataFrame): - self.df_raw = df - self._prettifier = Prettifier() - self.df_prettified = self._prettifier(df).loc[:, self.column_names] - - @property - def column_names(self): - return list(map(operator.itemgetter(0), self.COLUMN_INFO)) - - @property - def column_datatype(self): - return list(map(operator.itemgetter(1), self.COLUMN_INFO)) diff --git a/spaces/hysts/mmdetection/style.css b/spaces/hysts/mmdetection/style.css deleted file mode 100644 index 00e4b5bfb2a68feba80f955b2735a288e233c776..0000000000000000000000000000000000000000 --- a/spaces/hysts/mmdetection/style.css +++ /dev/null @@ -1,13 +0,0 @@ -h1 { - text-align: center; -} -img#overview { - display: block; - margin: auto; - max-width: 1000px; - max-height: 600px; -} -img#visitor-badge { - display: block; - margin: auto; -} diff --git a/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/models/arcface_torch/dataset.py b/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/models/arcface_torch/dataset.py deleted file mode 100644 index 595eda79c56400a3243b2bd0d13a0dce9b8afd1d..0000000000000000000000000000000000000000 --- a/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/models/arcface_torch/dataset.py +++ /dev/null @@ -1,268 +0,0 @@ -import numbers -import os -import queue as Queue -import threading -from functools import partial -from typing import Iterable - -import mxnet as mx -import numpy as np -import torch -from torch import distributed -from torch.utils.data import DataLoader -from torch.utils.data import Dataset -from torchvision import transforms -from torchvision.datasets import ImageFolder -from utils.utils_distributed_sampler import DistributedSampler -from utils.utils_distributed_sampler import get_dist_info -from utils.utils_distributed_sampler import worker_init_fn - - -def get_dataloader( - root_dir, - local_rank, - batch_size, - dali=False, - seed=2048, - num_workers=2, -) -> Iterable: - - rec = os.path.join(root_dir, "train.rec") - idx = os.path.join(root_dir, "train.idx") - train_set = None - - # Synthetic - if root_dir == "synthetic": - train_set = SyntheticDataset() - dali = False - - # Mxnet RecordIO - elif os.path.exists(rec) and os.path.exists(idx): - train_set = MXFaceDataset(root_dir=root_dir, local_rank=local_rank) - - # Image Folder - else: - transform = transforms.Compose( - [ - transforms.RandomHorizontalFlip(), - transforms.ToTensor(), - transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), - ] - ) - train_set = ImageFolder(root_dir, transform) - - # DALI - if dali: - return dali_data_iter(batch_size=batch_size, rec_file=rec, idx_file=idx, num_threads=2, local_rank=local_rank) - - rank, world_size = get_dist_info() - train_sampler = DistributedSampler(train_set, num_replicas=world_size, rank=rank, shuffle=True, seed=seed) - - if seed is None: - init_fn = None - else: - init_fn = partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) - - train_loader = DataLoaderX( - local_rank=local_rank, - dataset=train_set, - batch_size=batch_size, - sampler=train_sampler, - num_workers=num_workers, - pin_memory=True, - drop_last=True, - worker_init_fn=init_fn, - ) - - return train_loader - - -class BackgroundGenerator(threading.Thread): - def __init__(self, generator, local_rank, max_prefetch=6): - super(BackgroundGenerator, self).__init__() - self.queue = Queue.Queue(max_prefetch) - self.generator = generator - self.local_rank = local_rank - self.daemon = True - self.start() - - def run(self): - torch.cuda.set_device(self.local_rank) - for item in self.generator: - self.queue.put(item) - self.queue.put(None) - - def next(self): - next_item = self.queue.get() - if next_item is None: - raise StopIteration - return next_item - - def __next__(self): - return self.next() - - def __iter__(self): - return self - - -class DataLoaderX(DataLoader): - def __init__(self, local_rank, **kwargs): - super(DataLoaderX, self).__init__(**kwargs) - self.stream = torch.cuda.Stream(local_rank) - self.local_rank = local_rank - - def __iter__(self): - self.iter = super(DataLoaderX, self).__iter__() - self.iter = BackgroundGenerator(self.iter, self.local_rank) - self.preload() - return self - - def preload(self): - self.batch = next(self.iter, None) - if self.batch is None: - return None - with torch.cuda.stream(self.stream): - for k in range(len(self.batch)): - self.batch[k] = self.batch[k].to(device=self.local_rank, non_blocking=True) - - def __next__(self): - torch.cuda.current_stream().wait_stream(self.stream) - batch = self.batch - if batch is None: - raise StopIteration - self.preload() - return batch - - -class MXFaceDataset(Dataset): - def __init__(self, root_dir, local_rank): - super(MXFaceDataset, self).__init__() - self.transform = transforms.Compose( - [ - transforms.ToPILImage(), - transforms.RandomHorizontalFlip(), - transforms.ToTensor(), - transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), - ] - ) - self.root_dir = root_dir - self.local_rank = local_rank - path_imgrec = os.path.join(root_dir, "train.rec") - path_imgidx = os.path.join(root_dir, "train.idx") - self.imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, "r") - s = self.imgrec.read_idx(0) - header, _ = mx.recordio.unpack(s) - if header.flag > 0: - self.header0 = (int(header.label[0]), int(header.label[1])) - self.imgidx = np.array(range(1, int(header.label[0]))) - else: - self.imgidx = np.array(list(self.imgrec.keys)) - - def __getitem__(self, index): - idx = self.imgidx[index] - s = self.imgrec.read_idx(idx) - header, img = mx.recordio.unpack(s) - label = header.label - if not isinstance(label, numbers.Number): - label = label[0] - label = torch.tensor(label, dtype=torch.long) - sample = mx.image.imdecode(img).asnumpy() - if self.transform is not None: - sample = self.transform(sample) - return sample, label - - def __len__(self): - return len(self.imgidx) - - -class SyntheticDataset(Dataset): - def __init__(self): - super(SyntheticDataset, self).__init__() - img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.int32) - img = np.transpose(img, (2, 0, 1)) - img = torch.from_numpy(img).squeeze(0).float() - img = ((img / 255) - 0.5) / 0.5 - self.img = img - self.label = 1 - - def __getitem__(self, index): - return self.img, self.label - - def __len__(self): - return 1000000 - - -def dali_data_iter( - batch_size: int, - rec_file: str, - idx_file: str, - num_threads: int, - initial_fill=32768, - random_shuffle=True, - prefetch_queue_depth=1, - local_rank=0, - name="reader", - mean=(127.5, 127.5, 127.5), - std=(127.5, 127.5, 127.5), -): - """ - Parameters: - ---------- - initial_fill: int - Size of the buffer that is used for shuffling. If random_shuffle is False, this parameter is ignored. - - """ - rank: int = distributed.get_rank() - world_size: int = distributed.get_world_size() - import nvidia.dali.fn as fn - import nvidia.dali.types as types - from nvidia.dali.pipeline import Pipeline - from nvidia.dali.plugin.pytorch import DALIClassificationIterator - - pipe = Pipeline( - batch_size=batch_size, - num_threads=num_threads, - device_id=local_rank, - prefetch_queue_depth=prefetch_queue_depth, - ) - condition_flip = fn.random.coin_flip(probability=0.5) - with pipe: - jpegs, labels = fn.readers.mxnet( - path=rec_file, - index_path=idx_file, - initial_fill=initial_fill, - num_shards=world_size, - shard_id=rank, - random_shuffle=random_shuffle, - pad_last_batch=False, - name=name, - ) - images = fn.decoders.image(jpegs, device="mixed", output_type=types.RGB) - images = fn.crop_mirror_normalize(images, dtype=types.FLOAT, mean=mean, std=std, mirror=condition_flip) - pipe.set_outputs(images, labels) - pipe.build() - return DALIWarper( - DALIClassificationIterator( - pipelines=[pipe], - reader_name=name, - ) - ) - - -@torch.no_grad() -class DALIWarper(object): - def __init__(self, dali_iter): - self.iter = dali_iter - - def __next__(self): - data_dict = self.iter.__next__()[0] - tensor_data = data_dict["data"].cuda() - tensor_label: torch.Tensor = data_dict["label"].cuda().long() - tensor_label.squeeze_() - return tensor_data, tensor_label - - def __iter__(self): - return self - - def reset(self): - self.iter.reset() diff --git a/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/configs/wf4m_mbf.py b/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/configs/wf4m_mbf.py deleted file mode 100644 index 5ee67b62acb4432b9d4916400ec79433f7dd10ea..0000000000000000000000000000000000000000 --- a/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/configs/wf4m_mbf.py +++ /dev/null @@ -1,27 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.margin_list = (1.0, 0.0, 0.4) -config.network = "mbf" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 1.0 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 1e-4 -config.batch_size = 128 -config.lr = 0.1 -config.verbose = 2000 -config.dali = False - -config.rec = "/train_tmp/WebFace4M" -config.num_classes = 205990 -config.num_image = 4235242 -config.num_epoch = 20 -config.warmup_epoch = 0 -config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/eval_ijbc.py b/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/eval_ijbc.py deleted file mode 100644 index 06c3506a8db432049e16b9235d85efe58109b5a8..0000000000000000000000000000000000000000 --- a/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/eval_ijbc.py +++ /dev/null @@ -1,450 +0,0 @@ -# coding: utf-8 -import os -import pickle - -import matplotlib -import pandas as pd - -matplotlib.use("Agg") -import matplotlib.pyplot as plt -import timeit -import sklearn -import argparse -import cv2 -import numpy as np -import torch -from skimage import transform as trans -from backbones import get_model -from sklearn.metrics import roc_curve, auc - -from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap -from prettytable import PrettyTable -from pathlib import Path - -import sys -import warnings - -sys.path.insert(0, "../") -warnings.filterwarnings("ignore") - -parser = argparse.ArgumentParser(description="do ijb test") -# general -parser.add_argument("--model-prefix", default="", help="path to load model.") -parser.add_argument("--image-path", default="", type=str, help="") -parser.add_argument("--result-dir", default=".", type=str, help="") -parser.add_argument("--batch-size", default=128, type=int, help="") -parser.add_argument("--network", default="iresnet50", type=str, help="") -parser.add_argument("--job", default="insightface", type=str, help="job name") -parser.add_argument("--target", default="IJBC", type=str, help="target, set to IJBC or IJBB") -args = parser.parse_args() - -target = args.target -model_path = args.model_prefix -image_path = args.image_path -result_dir = args.result_dir -gpu_id = None -use_norm_score = True # if Ture, TestMode(N1) -use_detector_score = True # if Ture, TestMode(D1) -use_flip_test = True # if Ture, TestMode(F1) -job = args.job -batch_size = args.batch_size - - -class Embedding(object): - def __init__(self, prefix, data_shape, batch_size=1): - image_size = (112, 112) - self.image_size = image_size - weight = torch.load(prefix) - resnet = get_model(args.network, dropout=0, fp16=False).cuda() - resnet.load_state_dict(weight) - model = torch.nn.DataParallel(resnet) - self.model = model - self.model.eval() - src = np.array( - [[30.2946, 51.6963], [65.5318, 51.5014], [48.0252, 71.7366], [33.5493, 92.3655], [62.7299, 92.2041]], - dtype=np.float32, - ) - src[:, 0] += 8.0 - self.src = src - self.batch_size = batch_size - self.data_shape = data_shape - - def get(self, rimg, landmark): - - assert landmark.shape[0] == 68 or landmark.shape[0] == 5 - assert landmark.shape[1] == 2 - if landmark.shape[0] == 68: - landmark5 = np.zeros((5, 2), dtype=np.float32) - landmark5[0] = (landmark[36] + landmark[39]) / 2 - landmark5[1] = (landmark[42] + landmark[45]) / 2 - landmark5[2] = landmark[30] - landmark5[3] = landmark[48] - landmark5[4] = landmark[54] - else: - landmark5 = landmark - tform = trans.SimilarityTransform() - tform.estimate(landmark5, self.src) - M = tform.params[0:2, :] - img = cv2.warpAffine(rimg, M, (self.image_size[1], self.image_size[0]), borderValue=0.0) - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img_flip = np.fliplr(img) - img = np.transpose(img, (2, 0, 1)) # 3*112*112, RGB - img_flip = np.transpose(img_flip, (2, 0, 1)) - input_blob = np.zeros((2, 3, self.image_size[1], self.image_size[0]), dtype=np.uint8) - input_blob[0] = img - input_blob[1] = img_flip - return input_blob - - @torch.no_grad() - def forward_db(self, batch_data): - imgs = torch.Tensor(batch_data).cuda() - imgs.div_(255).sub_(0.5).div_(0.5) - feat = self.model(imgs) - feat = feat.reshape([self.batch_size, 2 * feat.shape[1]]) - return feat.cpu().numpy() - - -# 将一个list尽量均分成n份,限制len(list)==n,份数大于原list内元素个数则分配空list[] -def divideIntoNstrand(listTemp, n): - twoList = [[] for i in range(n)] - for i, e in enumerate(listTemp): - twoList[i % n].append(e) - return twoList - - -def read_template_media_list(path): - # ijb_meta = np.loadtxt(path, dtype=str) - ijb_meta = pd.read_csv(path, sep=" ", header=None).values - templates = ijb_meta[:, 1].astype(np.int) - medias = ijb_meta[:, 2].astype(np.int) - return templates, medias - - -# In[ ]: - - -def read_template_pair_list(path): - # pairs = np.loadtxt(path, dtype=str) - pairs = pd.read_csv(path, sep=" ", header=None).values - # print(pairs.shape) - # print(pairs[:, 0].astype(np.int)) - t1 = pairs[:, 0].astype(np.int) - t2 = pairs[:, 1].astype(np.int) - label = pairs[:, 2].astype(np.int) - return t1, t2, label - - -# In[ ]: - - -def read_image_feature(path): - with open(path, "rb") as fid: - img_feats = pickle.load(fid) - return img_feats - - -# In[ ]: - - -def get_image_feature(img_path, files_list, model_path, epoch, gpu_id): - batch_size = args.batch_size - data_shape = (3, 112, 112) - - files = files_list - print("files:", len(files)) - rare_size = len(files) % batch_size - faceness_scores = [] - batch = 0 - img_feats = np.empty((len(files), 1024), dtype=np.float32) - - batch_data = np.empty((2 * batch_size, 3, 112, 112)) - embedding = Embedding(model_path, data_shape, batch_size) - for img_index, each_line in enumerate(files[: len(files) - rare_size]): - name_lmk_score = each_line.strip().split(" ") - img_name = os.path.join(img_path, name_lmk_score[0]) - img = cv2.imread(img_name) - lmk = np.array([float(x) for x in name_lmk_score[1:-1]], dtype=np.float32) - lmk = lmk.reshape((5, 2)) - input_blob = embedding.get(img, lmk) - - batch_data[2 * (img_index - batch * batch_size)][:] = input_blob[0] - batch_data[2 * (img_index - batch * batch_size) + 1][:] = input_blob[1] - if (img_index + 1) % batch_size == 0: - print("batch", batch) - img_feats[batch * batch_size : batch * batch_size + batch_size][:] = embedding.forward_db(batch_data) - batch += 1 - faceness_scores.append(name_lmk_score[-1]) - - batch_data = np.empty((2 * rare_size, 3, 112, 112)) - embedding = Embedding(model_path, data_shape, rare_size) - for img_index, each_line in enumerate(files[len(files) - rare_size :]): - name_lmk_score = each_line.strip().split(" ") - img_name = os.path.join(img_path, name_lmk_score[0]) - img = cv2.imread(img_name) - lmk = np.array([float(x) for x in name_lmk_score[1:-1]], dtype=np.float32) - lmk = lmk.reshape((5, 2)) - input_blob = embedding.get(img, lmk) - batch_data[2 * img_index][:] = input_blob[0] - batch_data[2 * img_index + 1][:] = input_blob[1] - if (img_index + 1) % rare_size == 0: - print("batch", batch) - img_feats[len(files) - rare_size :][:] = embedding.forward_db(batch_data) - batch += 1 - faceness_scores.append(name_lmk_score[-1]) - faceness_scores = np.array(faceness_scores).astype(np.float32) - # img_feats = np.ones( (len(files), 1024), dtype=np.float32) * 0.01 - # faceness_scores = np.ones( (len(files), ), dtype=np.float32 ) - return img_feats, faceness_scores - - -# In[ ]: - - -def image2template_feature(img_feats=None, templates=None, medias=None): - # ========================================================== - # 1. face image feature l2 normalization. img_feats:[number_image x feats_dim] - # 2. compute media feature. - # 3. compute template feature. - # ========================================================== - unique_templates = np.unique(templates) - template_feats = np.zeros((len(unique_templates), img_feats.shape[1])) - - for count_template, uqt in enumerate(unique_templates): - - (ind_t,) = np.where(templates == uqt) - face_norm_feats = img_feats[ind_t] - face_medias = medias[ind_t] - unique_medias, unique_media_counts = np.unique(face_medias, return_counts=True) - media_norm_feats = [] - for u, ct in zip(unique_medias, unique_media_counts): - (ind_m,) = np.where(face_medias == u) - if ct == 1: - media_norm_feats += [face_norm_feats[ind_m]] - else: # image features from the same video will be aggregated into one feature - media_norm_feats += [np.mean(face_norm_feats[ind_m], axis=0, keepdims=True)] - media_norm_feats = np.array(media_norm_feats) - # media_norm_feats = media_norm_feats / np.sqrt(np.sum(media_norm_feats ** 2, -1, keepdims=True)) - template_feats[count_template] = np.sum(media_norm_feats, axis=0) - if count_template % 2000 == 0: - print("Finish Calculating {} template features.".format(count_template)) - # template_norm_feats = template_feats / np.sqrt(np.sum(template_feats ** 2, -1, keepdims=True)) - template_norm_feats = sklearn.preprocessing.normalize(template_feats) - # print(template_norm_feats.shape) - return template_norm_feats, unique_templates - - -# In[ ]: - - -def verification(template_norm_feats=None, unique_templates=None, p1=None, p2=None): - # ========================================================== - # Compute set-to-set Similarity Score. - # ========================================================== - template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int) - for count_template, uqt in enumerate(unique_templates): - template2id[uqt] = count_template - - score = np.zeros((len(p1),)) # save cosine distance between pairs - - total_pairs = np.array(range(len(p1))) - batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation - sublists = [total_pairs[i : i + batchsize] for i in range(0, len(p1), batchsize)] - total_sublists = len(sublists) - for c, s in enumerate(sublists): - feat1 = template_norm_feats[template2id[p1[s]]] - feat2 = template_norm_feats[template2id[p2[s]]] - similarity_score = np.sum(feat1 * feat2, -1) - score[s] = similarity_score.flatten() - if c % 10 == 0: - print("Finish {}/{} pairs.".format(c, total_sublists)) - return score - - -# In[ ]: -def verification2(template_norm_feats=None, unique_templates=None, p1=None, p2=None): - template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int) - for count_template, uqt in enumerate(unique_templates): - template2id[uqt] = count_template - score = np.zeros((len(p1),)) # save cosine distance between pairs - total_pairs = np.array(range(len(p1))) - batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation - sublists = [total_pairs[i : i + batchsize] for i in range(0, len(p1), batchsize)] - total_sublists = len(sublists) - for c, s in enumerate(sublists): - feat1 = template_norm_feats[template2id[p1[s]]] - feat2 = template_norm_feats[template2id[p2[s]]] - similarity_score = np.sum(feat1 * feat2, -1) - score[s] = similarity_score.flatten() - if c % 10 == 0: - print("Finish {}/{} pairs.".format(c, total_sublists)) - return score - - -def read_score(path): - with open(path, "rb") as fid: - img_feats = pickle.load(fid) - return img_feats - - -# # Step1: Load Meta Data - -# In[ ]: - -assert target == "IJBC" or target == "IJBB" - -# ============================================================= -# load image and template relationships for template feature embedding -# tid --> template id, mid --> media id -# format: -# image_name tid mid -# ============================================================= -start = timeit.default_timer() -templates, medias = read_template_media_list( - os.path.join("%s/meta" % image_path, "%s_face_tid_mid.txt" % target.lower()) -) -stop = timeit.default_timer() -print("Time: %.2f s. " % (stop - start)) - -# In[ ]: - -# ============================================================= -# load template pairs for template-to-template verification -# tid : template id, label : 1/0 -# format: -# tid_1 tid_2 label -# ============================================================= -start = timeit.default_timer() -p1, p2, label = read_template_pair_list( - os.path.join("%s/meta" % image_path, "%s_template_pair_label.txt" % target.lower()) -) -stop = timeit.default_timer() -print("Time: %.2f s. " % (stop - start)) - -# # Step 2: Get Image Features - -# In[ ]: - -# ============================================================= -# load image features -# format: -# img_feats: [image_num x feats_dim] (227630, 512) -# ============================================================= -start = timeit.default_timer() -img_path = "%s/loose_crop" % image_path -img_list_path = "%s/meta/%s_name_5pts_score.txt" % (image_path, target.lower()) -img_list = open(img_list_path) -files = img_list.readlines() -# files_list = divideIntoNstrand(files, rank_size) -files_list = files - -# img_feats -# for i in range(rank_size): -img_feats, faceness_scores = get_image_feature(img_path, files_list, model_path, 0, gpu_id) -stop = timeit.default_timer() -print("Time: %.2f s. " % (stop - start)) -print("Feature Shape: ({} , {}) .".format(img_feats.shape[0], img_feats.shape[1])) - -# # Step3: Get Template Features - -# In[ ]: - -# ============================================================= -# compute template features from image features. -# ============================================================= -start = timeit.default_timer() -# ========================================================== -# Norm feature before aggregation into template feature? -# Feature norm from embedding network and faceness score are able to decrease weights for noise samples (not face). -# ========================================================== -# 1. FaceScore (Feature Norm) -# 2. FaceScore (Detector) - -if use_flip_test: - # concat --- F1 - # img_input_feats = img_feats - # add --- F2 - img_input_feats = img_feats[:, 0 : img_feats.shape[1] // 2] + img_feats[:, img_feats.shape[1] // 2 :] -else: - img_input_feats = img_feats[:, 0 : img_feats.shape[1] // 2] - -if use_norm_score: - img_input_feats = img_input_feats -else: - # normalise features to remove norm information - img_input_feats = img_input_feats / np.sqrt(np.sum(img_input_feats**2, -1, keepdims=True)) - -if use_detector_score: - print(img_input_feats.shape, faceness_scores.shape) - img_input_feats = img_input_feats * faceness_scores[:, np.newaxis] -else: - img_input_feats = img_input_feats - -template_norm_feats, unique_templates = image2template_feature(img_input_feats, templates, medias) -stop = timeit.default_timer() -print("Time: %.2f s. " % (stop - start)) - -# # Step 4: Get Template Similarity Scores - -# In[ ]: - -# ============================================================= -# compute verification scores between template pairs. -# ============================================================= -start = timeit.default_timer() -score = verification(template_norm_feats, unique_templates, p1, p2) -stop = timeit.default_timer() -print("Time: %.2f s. " % (stop - start)) - -# In[ ]: -save_path = os.path.join(result_dir, args.job) -# save_path = result_dir + '/%s_result' % target - -if not os.path.exists(save_path): - os.makedirs(save_path) - -score_save_file = os.path.join(save_path, "%s.npy" % target.lower()) -np.save(score_save_file, score) - -# # Step 5: Get ROC Curves and TPR@FPR Table - -# In[ ]: - -files = [score_save_file] -methods = [] -scores = [] -for file in files: - methods.append(Path(file).stem) - scores.append(np.load(file)) - -methods = np.array(methods) -scores = dict(zip(methods, scores)) -colours = dict(zip(methods, sample_colours_from_colourmap(methods.shape[0], "Set2"))) -x_labels = [10**-6, 10**-5, 10**-4, 10**-3, 10**-2, 10**-1] -tpr_fpr_table = PrettyTable(["Methods"] + [str(x) for x in x_labels]) -fig = plt.figure() -for method in methods: - fpr, tpr, _ = roc_curve(label, scores[method]) - roc_auc = auc(fpr, tpr) - fpr = np.flipud(fpr) - tpr = np.flipud(tpr) # select largest tpr at same fpr - plt.plot( - fpr, tpr, color=colours[method], lw=1, label=("[%s (AUC = %0.4f %%)]" % (method.split("-")[-1], roc_auc * 100)) - ) - tpr_fpr_row = [] - tpr_fpr_row.append("%s-%s" % (method, target)) - for fpr_iter in np.arange(len(x_labels)): - _, min_index = min(list(zip(abs(fpr - x_labels[fpr_iter]), range(len(fpr))))) - tpr_fpr_row.append("%.2f" % (tpr[min_index] * 100)) - tpr_fpr_table.add_row(tpr_fpr_row) -plt.xlim([10**-6, 0.1]) -plt.ylim([0.3, 1.0]) -plt.grid(linestyle="--", linewidth=1) -plt.xticks(x_labels) -plt.yticks(np.linspace(0.3, 1.0, 8, endpoint=True)) -plt.xscale("log") -plt.xlabel("False Positive Rate") -plt.ylabel("True Positive Rate") -plt.title("ROC on IJB") -plt.legend(loc="lower right") -fig.savefig(os.path.join(save_path, "%s.pdf" % target.lower())) -print(tpr_fpr_table) diff --git a/spaces/iamironman4279/SadTalker/src/utils/paste_pic.py b/spaces/iamironman4279/SadTalker/src/utils/paste_pic.py deleted file mode 100644 index f9989e21e48e64f620f9b148e65fdfe806c53b14..0000000000000000000000000000000000000000 --- a/spaces/iamironman4279/SadTalker/src/utils/paste_pic.py +++ /dev/null @@ -1,69 +0,0 @@ -import cv2, os -import numpy as np -from tqdm import tqdm -import uuid - -from src.utils.videoio import save_video_with_watermark - -def paste_pic(video_path, pic_path, crop_info, new_audio_path, full_video_path, extended_crop=False): - - if not os.path.isfile(pic_path): - raise ValueError('pic_path must be a valid path to video/image file') - elif pic_path.split('.')[-1] in ['jpg', 'png', 'jpeg']: - # loader for first frame - full_img = cv2.imread(pic_path) - else: - # loader for videos - video_stream = cv2.VideoCapture(pic_path) - fps = video_stream.get(cv2.CAP_PROP_FPS) - full_frames = [] - while 1: - still_reading, frame = video_stream.read() - if not still_reading: - video_stream.release() - break - break - full_img = frame - frame_h = full_img.shape[0] - frame_w = full_img.shape[1] - - video_stream = cv2.VideoCapture(video_path) - fps = video_stream.get(cv2.CAP_PROP_FPS) - crop_frames = [] - while 1: - still_reading, frame = video_stream.read() - if not still_reading: - video_stream.release() - break - crop_frames.append(frame) - - if len(crop_info) != 3: - print("you didn't crop the image") - return - else: - r_w, r_h = crop_info[0] - clx, cly, crx, cry = crop_info[1] - lx, ly, rx, ry = crop_info[2] - lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry) - # oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx - # oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx - - if extended_crop: - oy1, oy2, ox1, ox2 = cly, cry, clx, crx - else: - oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx - - tmp_path = str(uuid.uuid4())+'.mp4' - out_tmp = cv2.VideoWriter(tmp_path, cv2.VideoWriter_fourcc(*'MP4V'), fps, (frame_w, frame_h)) - for crop_frame in tqdm(crop_frames, 'seamlessClone:'): - p = cv2.resize(crop_frame.astype(np.uint8), (ox2-ox1, oy2 - oy1)) - - mask = 255*np.ones(p.shape, p.dtype) - location = ((ox1+ox2) // 2, (oy1+oy2) // 2) - gen_img = cv2.seamlessClone(p, full_img, mask, location, cv2.NORMAL_CLONE) - out_tmp.write(gen_img) - - out_tmp.release() - - save_video_with_watermark(tmp_path, new_audio_path, full_video_path, watermark=False) - os.remove(tmp_path) diff --git a/spaces/iamstolas/STOLAS/src/app/page.tsx b/spaces/iamstolas/STOLAS/src/app/page.tsx deleted file mode 100644 index 0dff3431b098ce4fe282cc83fc87a93a28a43090..0000000000000000000000000000000000000000 --- a/spaces/iamstolas/STOLAS/src/app/page.tsx +++ /dev/null @@ -1,15 +0,0 @@ -import dynamic from 'next/dynamic' - -const DynamicComponentWithNoSSR = dynamic( - () => import('../components/chat'), - { ssr: false } -) - -export default function IndexPage() { - return ( - <> -
      - - - ) -} diff --git a/spaces/imseldrith/Article_Rewrite-Paraphrasing_Tool/README.md b/spaces/imseldrith/Article_Rewrite-Paraphrasing_Tool/README.md deleted file mode 100644 index dfb963f6181532c5f5c818d5403ea9368da506d0..0000000000000000000000000000000000000000 --- a/spaces/imseldrith/Article_Rewrite-Paraphrasing_Tool/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Article Rewrite-Paraphrasing Tool -emoji: 🏃 -colorFrom: pink -colorTo: pink -sdk: gradio -sdk_version: 3.4.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/inamXcontru/PoeticTTS/Cubase 7 64bit Activation Code 2.md b/spaces/inamXcontru/PoeticTTS/Cubase 7 64bit Activation Code 2.md deleted file mode 100644 index 02116f00ab2a537c3f716d519fa2c4ba79ae3a80..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Cubase 7 64bit Activation Code 2.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Cubase 7 64bit Activation code 2


      Download ❤❤❤ https://gohhs.com/2uz3II



      -
      -Steinberg Cubase 7 Activation Code Txt ->>> ... cubase,7,how,to,activate,with ... Windows 7 Windows 8.1 (64-bit) Windows 10. ... 2; You will be. 1fdad05405
      -
      -
      -

      diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Blackberry Codes Calculator V1.8.4 Download.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Blackberry Codes Calculator V1.8.4 Download.md deleted file mode 100644 index 9ba702076f2c78586f21fd70d0a437bd831d00e0..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Blackberry Codes Calculator V1.8.4 Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

      blackberry codes calculator v1.8.4 download


      DOWNLOAD === https://urlin.us/2uEyyV



      - -Learn More. (You can try uninstalling [6]) (You can try uninstalling [6]) (You can try uninstalling [5]) (You can try uninstalling [5]) (You can try uninstalling [4]) (You can try uninstalling [4]) (You can try uninstalling [3]) (You can try uninstalling [3]) (You can try uninstalling [2]) (You can try uninstalling [2]) (You can try uninstalling [1]) (You can try uninstalling [1]) (You can try uninstalling [0]) (You can try uninstalling [0]) (You can try uninstalling [9]) (You can try uninstalling [9]) (You can try uninstalling [8]) (You can try uninstalling [8]) (You can try uninstalling [7]) (You can try uninstalling [7]) (You can try uninstalling [6]) (You can try uninstalling [6]) (You can try uninstalling [5]) (You can try uninstalling [5]) (You can try uninstalling [4]) (You can try uninstalling [4]) (You can try uninstalling [3]) (You can try uninstalling [3]) (You can try uninstalling [2]) (You can try uninstalling [2]) (You can try uninstalling [1]) (You can try uninstalling [1]) (You can try uninstalling [0]) (You can try uninstalling [0]) (You can try uninstalling [8]) (You can try uninstalling [8]) (You can try uninstalling [7]) (You can try uninstalling [7]) (You can try uninstalling [6]) (You can try uninstalling [6]) (You can try uninstalling [5]) (You can try uninstalling [5]) (You can try uninstalling [4]) (You can try uninstalling [4]) (You can try uninstalling [3]) (You can try uninstalling [3]) (You can try uninstalling [2]) (You can try uninstalling [2]) (You can try uninstalling [1]) (You can try uninstalling [1]) (You can try uninstalling [0 4fefd39f24
      -
      -
      -

      diff --git a/spaces/inreVtussa/clothingai/Examples/Change Folder Icons 8.7 Portable.md b/spaces/inreVtussa/clothingai/Examples/Change Folder Icons 8.7 Portable.md deleted file mode 100644 index 18257864efde71d57b4525c9d1e4b1572be62059..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Change Folder Icons 8.7 Portable.md +++ /dev/null @@ -1,6 +0,0 @@ -

      change folder icons 8.7 portable


      Download Ziphttps://tiurll.com/2uCitH



      -
      -Change Folder Icons 8.7 Portable http://shoxet.com/1avaip f40dba8b6f Add logo.png (230x90) to ThrottleStop folder for custom logo. ... in Stop ... 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/ismot/1702t1/dataset/communal/data_augmentation.py b/spaces/ismot/1702t1/dataset/communal/data_augmentation.py deleted file mode 100644 index d4656acf518be6972276e7cb4e42dcf402a79c98..0000000000000000000000000000000000000000 --- a/spaces/ismot/1702t1/dataset/communal/data_augmentation.py +++ /dev/null @@ -1,279 +0,0 @@ -""" -@Date: 2021/07/27 -@description: -""" -import numpy as np -import cv2 -import functools - -from utils.conversion import pixel2lonlat, lonlat2pixel, uv2lonlat, lonlat2uv, pixel2uv - - -@functools.lru_cache() -def prepare_stretch(w, h): - lon = pixel2lonlat(np.array(range(w)), w=w, axis=0) - lat = pixel2lonlat(np.array(range(h)), h=h, axis=1) - sin_lon = np.sin(lon) - cos_lon = np.cos(lon) - tan_lat = np.tan(lat) - return sin_lon, cos_lon, tan_lat - - -def pano_stretch_image(pano_img, kx, ky, kz): - """ - Note that this is the inverse mapping, which refers to Equation 3 in HorizonNet paper (the coordinate system in - the paper is different from here, xz needs to be swapped) - :param pano_img: a panorama image, shape must be [h,w,c] - :param kx: stretching along left-right direction - :param ky: stretching along up-down direction - :param kz: stretching along front-back direction - :return: - """ - w = pano_img.shape[1] - h = pano_img.shape[0] - - sin_lon, cos_lon, tan_lat = prepare_stretch(w, h) - - n_lon = np.arctan2(sin_lon * kz / kx, cos_lon) - n_lat = np.arctan(tan_lat[..., None] * np.sin(n_lon) / sin_lon * kx / ky) - n_pu = lonlat2pixel(n_lon, w=w, axis=0, need_round=False) - n_pv = lonlat2pixel(n_lat, h=h, axis=1, need_round=False) - - pixel_map = np.empty((h, w, 2), dtype=np.float32) - pixel_map[..., 0] = n_pu - pixel_map[..., 1] = n_pv - map1 = pixel_map[..., 0] - map2 = pixel_map[..., 1] - # using wrap mode because it is continues at left or right of panorama - new_img = cv2.remap(pano_img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_WRAP) - return new_img - - -def pano_stretch_conner(corners, kx, ky, kz): - """ - :param corners: - :param kx: stretching along left-right direction - :param ky: stretching along up-down direction - :param kz: stretching along front-back direction - :return: - """ - - lonlat = uv2lonlat(corners) - sin_lon = np.sin(lonlat[..., 0:1]) - cos_lon = np.cos(lonlat[..., 0:1]) - tan_lat = np.tan(lonlat[..., 1:2]) - - n_lon = np.arctan2(sin_lon * kx / kz, cos_lon) - - a = np.bitwise_or(corners[..., 0] == 0.5, corners[..., 0] == 1) - b = np.bitwise_not(a) - w = np.zeros_like(n_lon) - w[b] = np.sin(n_lon[b]) / sin_lon[b] - w[a] = kx / kz - - n_lat = np.arctan(tan_lat * w / kx * ky) - - lst = [n_lon, n_lat] - lonlat = np.concatenate(lst, axis=-1) - new_corners = lonlat2uv(lonlat) - return new_corners - - -def pano_stretch(pano_img, corners, kx, ky, kz): - """ - :param pano_img: a panorama image, shape must be [h,w,c] - :param corners: - :param kx: stretching along left-right direction - :param ky: stretching along up-down direction - :param kz: stretching along front-back direction - :return: - """ - new_img = pano_stretch_image(pano_img, kx, ky, kz) - new_corners = pano_stretch_conner(corners, kx, ky, kz) - return new_img, new_corners - - -class PanoDataAugmentation: - def __init__(self, aug): - self.aug = aug - self.parameters = {} - - def need_aug(self, name): - return name in self.aug and self.aug[name] - - def execute_space_aug(self, corners, image): - if image is None: - return image - - if self.aug is None: - return corners, image - w = image.shape[1] - h = image.shape[0] - - if self.need_aug('STRETCH'): - kx = np.random.uniform(1, 2) - kx = 1 / kx if np.random.randint(2) == 0 else kx - # we found that the ky transform may cause IoU to drop (HorizonNet also only x and z transform) - # ky = np.random.uniform(1, 2) - # ky = 1 / ky if np.random.randint(2) == 0 else ky - ky = 1 - kz = np.random.uniform(1, 2) - kz = 1 / kz if np.random.randint(2) == 0 else kz - image, corners = pano_stretch(image, corners, kx, ky, kz) - self.parameters['STRETCH'] = {'kx': kx, 'ky': ky, 'kz': kz} - else: - self.parameters['STRETCH'] = None - - if self.need_aug('ROTATE'): - d_pu = np.random.randint(w) - image = np.roll(image, d_pu, axis=1) - corners[..., 0] = (corners[..., 0] + pixel2uv(np.array([d_pu]), w, h)) % pixel2uv(np.array([w]), w, h) - self.parameters['ROTATE'] = d_pu - else: - self.parameters['ROTATE'] = None - - if self.need_aug('FLIP') and np.random.randint(2) == 0: - image = np.flip(image, axis=1).copy() - corners[..., 0] = pixel2uv(np.array([w]), w, h) - corners[..., 0] - corners = corners[::-1] - self.parameters['FLIP'] = True - else: - self.parameters['FLIP'] = None - - return corners, image - - def execute_visual_aug(self, image): - if self.need_aug('GAMMA'): - p = np.random.uniform(1, 2) - if np.random.randint(2) == 0: - p = 1 / p - image = image ** p - self.parameters['GAMMA'] = p - else: - self.parameters['GAMMA'] = None - - # The following visual augmentation methods are only implemented but not tested - if self.need_aug('HUE') or self.need_aug('SATURATION'): - image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) - - if self.need_aug('HUE') and np.random.randint(2) == 0: - p = np.random.uniform(-0.1, 0.1) - image[..., 0] = np.mod(image[..., 0] + p * 180, 180) - self.parameters['HUE'] = p - else: - self.parameters['HUE'] = None - - if self.need_aug('SATURATION') and np.random.randint(2) == 0: - p = np.random.uniform(0.5, 1.5) - image[..., 1] = np.clip(image[..., 1] * p, 0, 1) - self.parameters['SATURATION'] = p - else: - self.parameters['SATURATION'] = None - - image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB) - - if self.need_aug('CONTRAST') and np.random.randint(2) == 0: - p = np.random.uniform(0.9, 1.1) - mean = image.mean(axis=0).mean(axis=0) - image = (image - mean) * p + mean - image = np.clip(image, 0, 1) - self.parameters['CONTRAST'] = p - else: - self.parameters['CONTRAST'] = None - - return image - - def execute_aug(self, corners, image): - corners, image = self.execute_space_aug(corners, image) - if image is not None: - image = self.execute_visual_aug(image) - return corners, image - - -if __name__ == '__main__1': - from tqdm import trange - from visualization.floorplan import draw_floorplan - from dataset.communal.read import read_image, read_label - from utils.time_watch import TimeWatch - from utils.conversion import uv2xyz - from utils.boundary import corners2boundary - - np.random.seed(123) - pano_img_path = "../../src/dataset/mp3d/image/TbHJrupSAjP_f320ae084f3a447da3e8ab11dd5f9320.png" - label_path = "../../src/dataset/mp3d/label/TbHJrupSAjP_f320ae084f3a447da3e8ab11dd5f9320.json" - pano_img = read_image(pano_img_path) - label = read_label(label_path) - - corners = label['corners'] - ratio = label['ratio'] - - pano_aug = PanoDataAugmentation(aug={ - 'STRETCH': True, - 'ROTATE': True, - 'FLIP': True, - 'GAMMA': True, - # 'HUE': True, - # 'SATURATION': True, - # 'CONTRAST': True - }) - - # draw_floorplan(corners, show=True, marker_color=0.5, center_color=0.8, plan_y=1.6, show_radius=8) - # draw_boundaries(pano_img, corners_list=[corners], show=True, length=1024, ratio=ratio) - - w = TimeWatch("test") - for i in trange(50000): - new_corners, new_pano_img = pano_aug.execute_aug(corners.copy(), pano_img.copy()) - # draw_floorplan(uv2xyz(new_corners, plan_y=1.6)[..., ::2], show=True, marker_color=0.5, center_color=0.8, - # show_radius=8) - # draw_boundaries(new_pano_img, corners_list=[new_corners], show=True, length=1024, ratio=ratio) - - -if __name__ == '__main__': - from utils.boundary import corners2boundary - from visualization.floorplan import draw_floorplan - from utils.boundary import visibility_corners - - corners = np.array([[0.7664539, 0.7416811], - [0.06641078, 0.6521386], - [0.30997428, 0.57855356], - [0.383300784, 0.58726823], - [0.383300775, 0.8005296], - [0.5062902, 0.74822706]]) - corners = visibility_corners(corners) - print(corners) - # draw_floorplan(uv2xyz(corners, plan_y=1.6)[..., ::2], show=True, marker_color=0.5, center_color=0.8, - # show_radius=8) - visible_floor_boundary = corners2boundary(corners, length=256, visible=True) - # visible_depth = xyz2depth(uv2xyz(visible_floor_boundary, 1), 1) - print(len(visible_floor_boundary)) - - -if __name__ == '__main__0': - from visualization.floorplan import draw_floorplan - - from dataset.communal.read import read_image, read_label - from utils.time_watch import TimeWatch - from utils.conversion import uv2xyz - - # np.random.seed(1234) - pano_img_path = "../../src/dataset/mp3d/image/VVfe2KiqLaN_35b41dcbfcf84f96878f6ca28c70e5af.png" - label_path = "../../src/dataset/mp3d/label/VVfe2KiqLaN_35b41dcbfcf84f96878f6ca28c70e5af.json" - pano_img = read_image(pano_img_path) - label = read_label(label_path) - - corners = label['corners'] - ratio = label['ratio'] - - # draw_floorplan(corners, show=True, marker_color=0.5, center_color=0.8, plan_y=1.6, show_radius=8) - - w = TimeWatch() - for i in range(5): - kx = np.random.uniform(1, 2) - kx = 1 / kx if np.random.randint(2) == 0 else kx - ky = np.random.uniform(1, 2) - ky = 1 / ky if np.random.randint(2) == 0 else ky - kz = np.random.uniform(1, 2) - kz = 1 / kz if np.random.randint(2) == 0 else kz - new_corners = pano_stretch_conner(corners.copy(), kx, ky, kz) - draw_floorplan(uv2xyz(new_corners, plan_y=1.6)[..., ::2], show=True, marker_color=0.5, center_color=0.8, - show_radius=8) diff --git a/spaces/itskiller/aiimage/README.md b/spaces/itskiller/aiimage/README.md deleted file mode 100644 index a0ef36901855c481b683764f5ac31ecb320015b0..0000000000000000000000000000000000000000 --- a/spaces/itskiller/aiimage/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Aiimage -emoji: 🏃 -colorFrom: yellow -colorTo: gray -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false -license: gpl ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/jbetker/tortoise/tortoise/utils/typical_sampling.py b/spaces/jbetker/tortoise/tortoise/utils/typical_sampling.py deleted file mode 100644 index ff6bf487947e88a55fa45f2ffec1b9540df1d4fd..0000000000000000000000000000000000000000 --- a/spaces/jbetker/tortoise/tortoise/utils/typical_sampling.py +++ /dev/null @@ -1,33 +0,0 @@ -import torch -from transformers import LogitsWarper - - -class TypicalLogitsWarper(LogitsWarper): - def __init__(self, mass: float = 0.9, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): - self.filter_value = filter_value - self.mass = mass - self.min_tokens_to_keep = min_tokens_to_keep - - def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: - # calculate entropy - normalized = torch.nn.functional.log_softmax(scores, dim=-1) - p = torch.exp(normalized) - ent = -(normalized * p).nansum(-1, keepdim=True) - - # shift and sort - shifted_scores = torch.abs((-normalized) - ent) - sorted_scores, sorted_indices = torch.sort(shifted_scores, descending=False) - sorted_logits = scores.gather(-1, sorted_indices) - cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) - - # Remove tokens with cumulative mass above the threshold - last_ind = (cumulative_probs < self.mass).sum(dim=1) - last_ind[last_ind < 0] = 0 - sorted_indices_to_remove = sorted_scores > sorted_scores.gather(1, last_ind.view(-1, 1)) - if self.min_tokens_to_keep > 1: - # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) - sorted_indices_to_remove[..., : self.min_tokens_to_keep] = 0 - indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) - - scores = scores.masked_fill(indices_to_remove, self.filter_value) - return scores \ No newline at end of file diff --git a/spaces/jbilcke-hf/Panoremix/src/components/ui/switch.tsx b/spaces/jbilcke-hf/Panoremix/src/components/ui/switch.tsx deleted file mode 100644 index 9d1e79dffe05b79b4208570f487e506513430355..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/Panoremix/src/components/ui/switch.tsx +++ /dev/null @@ -1,29 +0,0 @@ -"use client" - -import * as React from "react" -import * as SwitchPrimitives from "@radix-ui/react-switch" - -import { cn } from "@/lib/utils" - -const Switch = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - - - -)) -Switch.displayName = SwitchPrimitives.Root.displayName - -export { Switch } diff --git a/spaces/jcjurado/DaVinci/README.md b/spaces/jcjurado/DaVinci/README.md deleted file mode 100644 index dc536541255a6c2c16f7c5a125a084565c992407..0000000000000000000000000000000000000000 --- a/spaces/jcjurado/DaVinci/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: DaVinci -emoji: 🏃 -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/jhlfrfufyfn/old-bel-tts/app.py b/spaces/jhlfrfufyfn/old-bel-tts/app.py deleted file mode 100644 index 685fcced1b5250588cb5614c8f124da2cadea7c1..0000000000000000000000000000000000000000 --- a/spaces/jhlfrfufyfn/old-bel-tts/app.py +++ /dev/null @@ -1,73 +0,0 @@ -from TTS.utils.synthesizer import Synthesizer -from huggingface_hub import hf_hub_download -import gradio as gr -import tempfile -import os - -REPO_ID = "jhlfrfufyfn/old-bel-tts" - -my_title = "Беларускі тэкст-у-маўленне" -my_description = "Беларускамоўная мадэль для агучвання тэксту. " - -be_text = "Гепарды жывуць у адкрытых і прасторных месцах, дзе ёсць шмат здабычы." - -my_inputs = [ - gr.inputs.Textbox(lines=5, label="Input Text", default=be_text), -] - -my_outputs = gr.outputs.Audio(type="file", label="Output Audio") - -def belarusify_russian_text(text: str): - text = text.replace("и", "і") - text = text.replace("іу", "іў") - text = text.replace("оу", "оў") - text = text.replace("ау", "аў") - text = text.replace("ыу", "ыў") - text = text.replace("уу", "уў") - text = text.replace("юу", "юў") - text = text.replace("еу", "еў") - text = text.replace("ёу", "ёў") - text = text.replace("щ", "шч") - return text - - -def tts(text: str): - text = belarusify_russian_text(text) - best_model_path = hf_hub_download(repo_id=REPO_ID, filename="model.pth") - config_path = hf_hub_download(repo_id=REPO_ID, filename="config.json") - vocoder_path = hf_hub_download(repo_id=REPO_ID, filename="vocoder.pth") - scale_stats_path = hf_hub_download(repo_id=REPO_ID, filename="scale_stats.npy") - vocoder_config_path = hf_hub_download(repo_id=REPO_ID, filename="vocoder_config.json") - - # init synthesizer - synthesizer = Synthesizer( - best_model_path, - config_path, - None, - None, - vocoder_path, - vocoder_config_path, - None, - None, - False - ) - - # create audio file - wavs = synthesizer.tts(text) - with tempfile.NamedTemporaryFile(suffix = ".wav", delete = False) as fp: - synthesizer.save_wav(wavs, fp) - return fp.name - -print("CWD IS ", os.getcwd()) -print("LIST IS", os.listdir()) -iface = gr.Interface( - fn=tts, - inputs=my_inputs, - outputs=my_outputs, - title=my_title, - description = my_description, - article = "", - examples = "", - allow_flagging=False -) -iface.launch() diff --git a/spaces/jhwen/bingo/next.config.js b/spaces/jhwen/bingo/next.config.js deleted file mode 100644 index 0e6ccd7fbc91d0459eaaff3e968ce0556789c605..0000000000000000000000000000000000000000 --- a/spaces/jhwen/bingo/next.config.js +++ /dev/null @@ -1,38 +0,0 @@ -/** @type {import('next').NextConfig} */ -const nextConfig = { - // output: 'export', - // assetPrefix: '.', - webpack: (config, { isServer }) => { - if (!isServer) { - config.resolve = { - ...config.resolve, - fallback: { - 'bufferutil': false, - 'utf-8-validate': false, - http: false, - https: false, - stream: false, - // fixes proxy-agent dependencies - net: false, - dns: false, - tls: false, - assert: false, - // fixes next-i18next dependencies - path: false, - fs: false, - // fixes mapbox dependencies - events: false, - // fixes sentry dependencies - process: false - } - }; - } - config.module.exprContextCritical = false; - - return config; - }, -} - -module.exports = (...args) => { - return nextConfig -} diff --git a/spaces/jie1/succ1/DLKcat/DeeplearningApproach/Code/preprocess/sabio_kcat_clean_unisubstrate.py b/spaces/jie1/succ1/DLKcat/DeeplearningApproach/Code/preprocess/sabio_kcat_clean_unisubstrate.py deleted file mode 100644 index e7014084a2b082772c573e916a8edcfda79baf60..0000000000000000000000000000000000000000 --- a/spaces/jie1/succ1/DLKcat/DeeplearningApproach/Code/preprocess/sabio_kcat_clean_unisubstrate.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/python -# coding: utf-8 - -# Author: LE YUAN -# Date: 2020-07-08 Run in python 3.7 - - -import csv - -with open("../../Data/database/Kcat_sabio_4_unisubstrate.tsv", "r", encoding='utf-8') as file : - lines = file.readlines()[1:] - - -Kcat_data = list() -Kcat_data_include_value = list() -for line in lines : - # print(line) - data = line.strip().split('\t') - Type = data[1] - ECNumber = data[2] - Substrate = data[3] - EnzymeType = data[4] - PubMedID = data[5] - Organism =data[6] - UniprotID = data[7] - Value = data[8] - Unit = data[9] - Kcat_data_include_value.append([Type, ECNumber, Substrate, EnzymeType, PubMedID, Organism, UniprotID, Value, Unit]) - Kcat_data.append([Type, ECNumber, Substrate, EnzymeType, PubMedID, Organism, UniprotID]) - -print(len(Kcat_data)) # 22683 items for not unique substrate - - -new_lines = list() -for line in Kcat_data : - if line not in new_lines : - new_lines.append(line) - -# print(len(new_lines)) # 20344 included all elements, 16532 included all except for Kcat value and unit -print(len(new_lines)) # 21627 included all elements, 18296 included all except for Kcat value and unit - -i = 0 -clean_Kcat = list() -for new_line in new_lines : - # print(new_line) - i += 1 - print(i) - value_unit = dict() - Kcat_values = list() - for line in Kcat_data_include_value : - if line[:-2] == new_line : - value = line[-2] - value_unit[str(float(value))] = line[-1] - # print(type(value)) # - Kcat_values.append(float(value)) - # print(value_unit) - # print(Kcat_values) - max_value = max(Kcat_values) # choose the maximum one for duplication Kcat value under the same entry as the data what we use - unit = value_unit[str(max_value)] - # print(max_value) - # print(unit) - - if unit in ['mol*s^(-1)*mol^(-1)', 's^(-', '-'] : - unit = 's^(-1)' - new_line.append(str(max_value)) - new_line.append(unit) - if new_line[-1] == 's^(-1)' : - clean_Kcat.append(new_line) - -# print(clean_Kcat) -print(len(clean_Kcat)) # 18243 after unifing the Kcat value unit to 's^(-1)', in which 16825 has a specific Unipro ID - - -with open("../../Data/database/Kcat_sabio_clean_unisubstrate.tsv", "w") as outfile : - records = ['Type', 'ECNumber', 'Substrate', 'EnzymeType', 'PubMedID', 'Organism', 'UniprotID', 'Value', 'Unit'] - outfile.write('\t'.join(records) + '\n') - for line in clean_Kcat : - outfile.write('\t'.join(line) + '\n') diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/PdfParser.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/PdfParser.py deleted file mode 100644 index dc1012f54d3d0d683e96fed41ee7ace492904e71..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/PdfParser.py +++ /dev/null @@ -1,996 +0,0 @@ -import calendar -import codecs -import collections -import mmap -import os -import re -import time -import zlib - - -# see 7.9.2.2 Text String Type on page 86 and D.3 PDFDocEncoding Character Set -# on page 656 -def encode_text(s): - return codecs.BOM_UTF16_BE + s.encode("utf_16_be") - - -PDFDocEncoding = { - 0x16: "\u0017", - 0x18: "\u02D8", - 0x19: "\u02C7", - 0x1A: "\u02C6", - 0x1B: "\u02D9", - 0x1C: "\u02DD", - 0x1D: "\u02DB", - 0x1E: "\u02DA", - 0x1F: "\u02DC", - 0x80: "\u2022", - 0x81: "\u2020", - 0x82: "\u2021", - 0x83: "\u2026", - 0x84: "\u2014", - 0x85: "\u2013", - 0x86: "\u0192", - 0x87: "\u2044", - 0x88: "\u2039", - 0x89: "\u203A", - 0x8A: "\u2212", - 0x8B: "\u2030", - 0x8C: "\u201E", - 0x8D: "\u201C", - 0x8E: "\u201D", - 0x8F: "\u2018", - 0x90: "\u2019", - 0x91: "\u201A", - 0x92: "\u2122", - 0x93: "\uFB01", - 0x94: "\uFB02", - 0x95: "\u0141", - 0x96: "\u0152", - 0x97: "\u0160", - 0x98: "\u0178", - 0x99: "\u017D", - 0x9A: "\u0131", - 0x9B: "\u0142", - 0x9C: "\u0153", - 0x9D: "\u0161", - 0x9E: "\u017E", - 0xA0: "\u20AC", -} - - -def decode_text(b): - if b[: len(codecs.BOM_UTF16_BE)] == codecs.BOM_UTF16_BE: - return b[len(codecs.BOM_UTF16_BE) :].decode("utf_16_be") - else: - return "".join(PDFDocEncoding.get(byte, chr(byte)) for byte in b) - - -class PdfFormatError(RuntimeError): - """An error that probably indicates a syntactic or semantic error in the - PDF file structure""" - - pass - - -def check_format_condition(condition, error_message): - if not condition: - raise PdfFormatError(error_message) - - -class IndirectReference( - collections.namedtuple("IndirectReferenceTuple", ["object_id", "generation"]) -): - def __str__(self): - return "%s %s R" % self - - def __bytes__(self): - return self.__str__().encode("us-ascii") - - def __eq__(self, other): - return ( - other.__class__ is self.__class__ - and other.object_id == self.object_id - and other.generation == self.generation - ) - - def __ne__(self, other): - return not (self == other) - - def __hash__(self): - return hash((self.object_id, self.generation)) - - -class IndirectObjectDef(IndirectReference): - def __str__(self): - return "%s %s obj" % self - - -class XrefTable: - def __init__(self): - self.existing_entries = {} # object ID => (offset, generation) - self.new_entries = {} # object ID => (offset, generation) - self.deleted_entries = {0: 65536} # object ID => generation - self.reading_finished = False - - def __setitem__(self, key, value): - if self.reading_finished: - self.new_entries[key] = value - else: - self.existing_entries[key] = value - if key in self.deleted_entries: - del self.deleted_entries[key] - - def __getitem__(self, key): - try: - return self.new_entries[key] - except KeyError: - return self.existing_entries[key] - - def __delitem__(self, key): - if key in self.new_entries: - generation = self.new_entries[key][1] + 1 - del self.new_entries[key] - self.deleted_entries[key] = generation - elif key in self.existing_entries: - generation = self.existing_entries[key][1] + 1 - self.deleted_entries[key] = generation - elif key in self.deleted_entries: - generation = self.deleted_entries[key] - else: - msg = ( - "object ID " + str(key) + " cannot be deleted because it doesn't exist" - ) - raise IndexError(msg) - - def __contains__(self, key): - return key in self.existing_entries or key in self.new_entries - - def __len__(self): - return len( - set(self.existing_entries.keys()) - | set(self.new_entries.keys()) - | set(self.deleted_entries.keys()) - ) - - def keys(self): - return ( - set(self.existing_entries.keys()) - set(self.deleted_entries.keys()) - ) | set(self.new_entries.keys()) - - def write(self, f): - keys = sorted(set(self.new_entries.keys()) | set(self.deleted_entries.keys())) - deleted_keys = sorted(set(self.deleted_entries.keys())) - startxref = f.tell() - f.write(b"xref\n") - while keys: - # find a contiguous sequence of object IDs - prev = None - for index, key in enumerate(keys): - if prev is None or prev + 1 == key: - prev = key - else: - contiguous_keys = keys[:index] - keys = keys[index:] - break - else: - contiguous_keys = keys - keys = None - f.write(b"%d %d\n" % (contiguous_keys[0], len(contiguous_keys))) - for object_id in contiguous_keys: - if object_id in self.new_entries: - f.write(b"%010d %05d n \n" % self.new_entries[object_id]) - else: - this_deleted_object_id = deleted_keys.pop(0) - check_format_condition( - object_id == this_deleted_object_id, - f"expected the next deleted object ID to be {object_id}, " - f"instead found {this_deleted_object_id}", - ) - try: - next_in_linked_list = deleted_keys[0] - except IndexError: - next_in_linked_list = 0 - f.write( - b"%010d %05d f \n" - % (next_in_linked_list, self.deleted_entries[object_id]) - ) - return startxref - - -class PdfName: - def __init__(self, name): - if isinstance(name, PdfName): - self.name = name.name - elif isinstance(name, bytes): - self.name = name - else: - self.name = name.encode("us-ascii") - - def name_as_str(self): - return self.name.decode("us-ascii") - - def __eq__(self, other): - return ( - isinstance(other, PdfName) and other.name == self.name - ) or other == self.name - - def __hash__(self): - return hash(self.name) - - def __repr__(self): - return f"PdfName({repr(self.name)})" - - @classmethod - def from_pdf_stream(cls, data): - return cls(PdfParser.interpret_name(data)) - - allowed_chars = set(range(33, 127)) - {ord(c) for c in "#%/()<>[]{}"} - - def __bytes__(self): - result = bytearray(b"/") - for b in self.name: - if b in self.allowed_chars: - result.append(b) - else: - result.extend(b"#%02X" % b) - return bytes(result) - - -class PdfArray(list): - def __bytes__(self): - return b"[ " + b" ".join(pdf_repr(x) for x in self) + b" ]" - - -class PdfDict(collections.UserDict): - def __setattr__(self, key, value): - if key == "data": - collections.UserDict.__setattr__(self, key, value) - else: - self[key.encode("us-ascii")] = value - - def __getattr__(self, key): - try: - value = self[key.encode("us-ascii")] - except KeyError as e: - raise AttributeError(key) from e - if isinstance(value, bytes): - value = decode_text(value) - if key.endswith("Date"): - if value.startswith("D:"): - value = value[2:] - - relationship = "Z" - if len(value) > 17: - relationship = value[14] - offset = int(value[15:17]) * 60 - if len(value) > 20: - offset += int(value[18:20]) - - format = "%Y%m%d%H%M%S"[: len(value) - 2] - value = time.strptime(value[: len(format) + 2], format) - if relationship in ["+", "-"]: - offset *= 60 - if relationship == "+": - offset *= -1 - value = time.gmtime(calendar.timegm(value) + offset) - return value - - def __bytes__(self): - out = bytearray(b"<<") - for key, value in self.items(): - if value is None: - continue - value = pdf_repr(value) - out.extend(b"\n") - out.extend(bytes(PdfName(key))) - out.extend(b" ") - out.extend(value) - out.extend(b"\n>>") - return bytes(out) - - -class PdfBinary: - def __init__(self, data): - self.data = data - - def __bytes__(self): - return b"<%s>" % b"".join(b"%02X" % b for b in self.data) - - -class PdfStream: - def __init__(self, dictionary, buf): - self.dictionary = dictionary - self.buf = buf - - def decode(self): - try: - filter = self.dictionary.Filter - except AttributeError: - return self.buf - if filter == b"FlateDecode": - try: - expected_length = self.dictionary.DL - except AttributeError: - expected_length = self.dictionary.Length - return zlib.decompress(self.buf, bufsize=int(expected_length)) - else: - msg = f"stream filter {repr(self.dictionary.Filter)} unknown/unsupported" - raise NotImplementedError(msg) - - -def pdf_repr(x): - if x is True: - return b"true" - elif x is False: - return b"false" - elif x is None: - return b"null" - elif isinstance(x, (PdfName, PdfDict, PdfArray, PdfBinary)): - return bytes(x) - elif isinstance(x, (int, float)): - return str(x).encode("us-ascii") - elif isinstance(x, time.struct_time): - return b"(D:" + time.strftime("%Y%m%d%H%M%SZ", x).encode("us-ascii") + b")" - elif isinstance(x, dict): - return bytes(PdfDict(x)) - elif isinstance(x, list): - return bytes(PdfArray(x)) - elif isinstance(x, str): - return pdf_repr(encode_text(x)) - elif isinstance(x, bytes): - # XXX escape more chars? handle binary garbage - x = x.replace(b"\\", b"\\\\") - x = x.replace(b"(", b"\\(") - x = x.replace(b")", b"\\)") - return b"(" + x + b")" - else: - return bytes(x) - - -class PdfParser: - """Based on - https://www.adobe.com/content/dam/acom/en/devnet/acrobat/pdfs/PDF32000_2008.pdf - Supports PDF up to 1.4 - """ - - def __init__(self, filename=None, f=None, buf=None, start_offset=0, mode="rb"): - if buf and f: - msg = "specify buf or f or filename, but not both buf and f" - raise RuntimeError(msg) - self.filename = filename - self.buf = buf - self.f = f - self.start_offset = start_offset - self.should_close_buf = False - self.should_close_file = False - if filename is not None and f is None: - self.f = f = open(filename, mode) - self.should_close_file = True - if f is not None: - self.buf = buf = self.get_buf_from_file(f) - self.should_close_buf = True - if not filename and hasattr(f, "name"): - self.filename = f.name - self.cached_objects = {} - if buf: - self.read_pdf_info() - else: - self.file_size_total = self.file_size_this = 0 - self.root = PdfDict() - self.root_ref = None - self.info = PdfDict() - self.info_ref = None - self.page_tree_root = {} - self.pages = [] - self.orig_pages = [] - self.pages_ref = None - self.last_xref_section_offset = None - self.trailer_dict = {} - self.xref_table = XrefTable() - self.xref_table.reading_finished = True - if f: - self.seek_end() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.close() - return False # do not suppress exceptions - - def start_writing(self): - self.close_buf() - self.seek_end() - - def close_buf(self): - try: - self.buf.close() - except AttributeError: - pass - self.buf = None - - def close(self): - if self.should_close_buf: - self.close_buf() - if self.f is not None and self.should_close_file: - self.f.close() - self.f = None - - def seek_end(self): - self.f.seek(0, os.SEEK_END) - - def write_header(self): - self.f.write(b"%PDF-1.4\n") - - def write_comment(self, s): - self.f.write(f"% {s}\n".encode()) - - def write_catalog(self): - self.del_root() - self.root_ref = self.next_object_id(self.f.tell()) - self.pages_ref = self.next_object_id(0) - self.rewrite_pages() - self.write_obj(self.root_ref, Type=PdfName(b"Catalog"), Pages=self.pages_ref) - self.write_obj( - self.pages_ref, - Type=PdfName(b"Pages"), - Count=len(self.pages), - Kids=self.pages, - ) - return self.root_ref - - def rewrite_pages(self): - pages_tree_nodes_to_delete = [] - for i, page_ref in enumerate(self.orig_pages): - page_info = self.cached_objects[page_ref] - del self.xref_table[page_ref.object_id] - pages_tree_nodes_to_delete.append(page_info[PdfName(b"Parent")]) - if page_ref not in self.pages: - # the page has been deleted - continue - # make dict keys into strings for passing to write_page - stringified_page_info = {} - for key, value in page_info.items(): - # key should be a PdfName - stringified_page_info[key.name_as_str()] = value - stringified_page_info["Parent"] = self.pages_ref - new_page_ref = self.write_page(None, **stringified_page_info) - for j, cur_page_ref in enumerate(self.pages): - if cur_page_ref == page_ref: - # replace the page reference with the new one - self.pages[j] = new_page_ref - # delete redundant Pages tree nodes from xref table - for pages_tree_node_ref in pages_tree_nodes_to_delete: - while pages_tree_node_ref: - pages_tree_node = self.cached_objects[pages_tree_node_ref] - if pages_tree_node_ref.object_id in self.xref_table: - del self.xref_table[pages_tree_node_ref.object_id] - pages_tree_node_ref = pages_tree_node.get(b"Parent", None) - self.orig_pages = [] - - def write_xref_and_trailer(self, new_root_ref=None): - if new_root_ref: - self.del_root() - self.root_ref = new_root_ref - if self.info: - self.info_ref = self.write_obj(None, self.info) - start_xref = self.xref_table.write(self.f) - num_entries = len(self.xref_table) - trailer_dict = {b"Root": self.root_ref, b"Size": num_entries} - if self.last_xref_section_offset is not None: - trailer_dict[b"Prev"] = self.last_xref_section_offset - if self.info: - trailer_dict[b"Info"] = self.info_ref - self.last_xref_section_offset = start_xref - self.f.write( - b"trailer\n" - + bytes(PdfDict(trailer_dict)) - + b"\nstartxref\n%d\n%%%%EOF" % start_xref - ) - - def write_page(self, ref, *objs, **dict_obj): - if isinstance(ref, int): - ref = self.pages[ref] - if "Type" not in dict_obj: - dict_obj["Type"] = PdfName(b"Page") - if "Parent" not in dict_obj: - dict_obj["Parent"] = self.pages_ref - return self.write_obj(ref, *objs, **dict_obj) - - def write_obj(self, ref, *objs, **dict_obj): - f = self.f - if ref is None: - ref = self.next_object_id(f.tell()) - else: - self.xref_table[ref.object_id] = (f.tell(), ref.generation) - f.write(bytes(IndirectObjectDef(*ref))) - stream = dict_obj.pop("stream", None) - if stream is not None: - dict_obj["Length"] = len(stream) - if dict_obj: - f.write(pdf_repr(dict_obj)) - for obj in objs: - f.write(pdf_repr(obj)) - if stream is not None: - f.write(b"stream\n") - f.write(stream) - f.write(b"\nendstream\n") - f.write(b"endobj\n") - return ref - - def del_root(self): - if self.root_ref is None: - return - del self.xref_table[self.root_ref.object_id] - del self.xref_table[self.root[b"Pages"].object_id] - - @staticmethod - def get_buf_from_file(f): - if hasattr(f, "getbuffer"): - return f.getbuffer() - elif hasattr(f, "getvalue"): - return f.getvalue() - else: - try: - return mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) - except ValueError: # cannot mmap an empty file - return b"" - - def read_pdf_info(self): - self.file_size_total = len(self.buf) - self.file_size_this = self.file_size_total - self.start_offset - self.read_trailer() - self.root_ref = self.trailer_dict[b"Root"] - self.info_ref = self.trailer_dict.get(b"Info", None) - self.root = PdfDict(self.read_indirect(self.root_ref)) - if self.info_ref is None: - self.info = PdfDict() - else: - self.info = PdfDict(self.read_indirect(self.info_ref)) - check_format_condition(b"Type" in self.root, "/Type missing in Root") - check_format_condition( - self.root[b"Type"] == b"Catalog", "/Type in Root is not /Catalog" - ) - check_format_condition(b"Pages" in self.root, "/Pages missing in Root") - check_format_condition( - isinstance(self.root[b"Pages"], IndirectReference), - "/Pages in Root is not an indirect reference", - ) - self.pages_ref = self.root[b"Pages"] - self.page_tree_root = self.read_indirect(self.pages_ref) - self.pages = self.linearize_page_tree(self.page_tree_root) - # save the original list of page references - # in case the user modifies, adds or deletes some pages - # and we need to rewrite the pages and their list - self.orig_pages = self.pages[:] - - def next_object_id(self, offset=None): - try: - # TODO: support reuse of deleted objects - reference = IndirectReference(max(self.xref_table.keys()) + 1, 0) - except ValueError: - reference = IndirectReference(1, 0) - if offset is not None: - self.xref_table[reference.object_id] = (offset, 0) - return reference - - delimiter = rb"[][()<>{}/%]" - delimiter_or_ws = rb"[][()<>{}/%\000\011\012\014\015\040]" - whitespace = rb"[\000\011\012\014\015\040]" - whitespace_or_hex = rb"[\000\011\012\014\015\0400-9a-fA-F]" - whitespace_optional = whitespace + b"*" - whitespace_mandatory = whitespace + b"+" - # No "\012" aka "\n" or "\015" aka "\r": - whitespace_optional_no_nl = rb"[\000\011\014\040]*" - newline_only = rb"[\r\n]+" - newline = whitespace_optional_no_nl + newline_only + whitespace_optional_no_nl - re_trailer_end = re.compile( - whitespace_mandatory - + rb"trailer" - + whitespace_optional - + rb"<<(.*>>)" - + newline - + rb"startxref" - + newline - + rb"([0-9]+)" - + newline - + rb"%%EOF" - + whitespace_optional - + rb"$", - re.DOTALL, - ) - re_trailer_prev = re.compile( - whitespace_optional - + rb"trailer" - + whitespace_optional - + rb"<<(.*?>>)" - + newline - + rb"startxref" - + newline - + rb"([0-9]+)" - + newline - + rb"%%EOF" - + whitespace_optional, - re.DOTALL, - ) - - def read_trailer(self): - search_start_offset = len(self.buf) - 16384 - if search_start_offset < self.start_offset: - search_start_offset = self.start_offset - m = self.re_trailer_end.search(self.buf, search_start_offset) - check_format_condition(m, "trailer end not found") - # make sure we found the LAST trailer - last_match = m - while m: - last_match = m - m = self.re_trailer_end.search(self.buf, m.start() + 16) - if not m: - m = last_match - trailer_data = m.group(1) - self.last_xref_section_offset = int(m.group(2)) - self.trailer_dict = self.interpret_trailer(trailer_data) - self.xref_table = XrefTable() - self.read_xref_table(xref_section_offset=self.last_xref_section_offset) - if b"Prev" in self.trailer_dict: - self.read_prev_trailer(self.trailer_dict[b"Prev"]) - - def read_prev_trailer(self, xref_section_offset): - trailer_offset = self.read_xref_table(xref_section_offset=xref_section_offset) - m = self.re_trailer_prev.search( - self.buf[trailer_offset : trailer_offset + 16384] - ) - check_format_condition(m, "previous trailer not found") - trailer_data = m.group(1) - check_format_condition( - int(m.group(2)) == xref_section_offset, - "xref section offset in previous trailer doesn't match what was expected", - ) - trailer_dict = self.interpret_trailer(trailer_data) - if b"Prev" in trailer_dict: - self.read_prev_trailer(trailer_dict[b"Prev"]) - - re_whitespace_optional = re.compile(whitespace_optional) - re_name = re.compile( - whitespace_optional - + rb"/([!-$&'*-.0-;=?-Z\\^-z|~]+)(?=" - + delimiter_or_ws - + rb")" - ) - re_dict_start = re.compile(whitespace_optional + rb"<<") - re_dict_end = re.compile(whitespace_optional + rb">>" + whitespace_optional) - - @classmethod - def interpret_trailer(cls, trailer_data): - trailer = {} - offset = 0 - while True: - m = cls.re_name.match(trailer_data, offset) - if not m: - m = cls.re_dict_end.match(trailer_data, offset) - check_format_condition( - m and m.end() == len(trailer_data), - "name not found in trailer, remaining data: " - + repr(trailer_data[offset:]), - ) - break - key = cls.interpret_name(m.group(1)) - value, offset = cls.get_value(trailer_data, m.end()) - trailer[key] = value - check_format_condition( - b"Size" in trailer and isinstance(trailer[b"Size"], int), - "/Size not in trailer or not an integer", - ) - check_format_condition( - b"Root" in trailer and isinstance(trailer[b"Root"], IndirectReference), - "/Root not in trailer or not an indirect reference", - ) - return trailer - - re_hashes_in_name = re.compile(rb"([^#]*)(#([0-9a-fA-F]{2}))?") - - @classmethod - def interpret_name(cls, raw, as_text=False): - name = b"" - for m in cls.re_hashes_in_name.finditer(raw): - if m.group(3): - name += m.group(1) + bytearray.fromhex(m.group(3).decode("us-ascii")) - else: - name += m.group(1) - if as_text: - return name.decode("utf-8") - else: - return bytes(name) - - re_null = re.compile(whitespace_optional + rb"null(?=" + delimiter_or_ws + rb")") - re_true = re.compile(whitespace_optional + rb"true(?=" + delimiter_or_ws + rb")") - re_false = re.compile(whitespace_optional + rb"false(?=" + delimiter_or_ws + rb")") - re_int = re.compile( - whitespace_optional + rb"([-+]?[0-9]+)(?=" + delimiter_or_ws + rb")" - ) - re_real = re.compile( - whitespace_optional - + rb"([-+]?([0-9]+\.[0-9]*|[0-9]*\.[0-9]+))(?=" - + delimiter_or_ws - + rb")" - ) - re_array_start = re.compile(whitespace_optional + rb"\[") - re_array_end = re.compile(whitespace_optional + rb"]") - re_string_hex = re.compile( - whitespace_optional + rb"<(" + whitespace_or_hex + rb"*)>" - ) - re_string_lit = re.compile(whitespace_optional + rb"\(") - re_indirect_reference = re.compile( - whitespace_optional - + rb"([-+]?[0-9]+)" - + whitespace_mandatory - + rb"([-+]?[0-9]+)" - + whitespace_mandatory - + rb"R(?=" - + delimiter_or_ws - + rb")" - ) - re_indirect_def_start = re.compile( - whitespace_optional - + rb"([-+]?[0-9]+)" - + whitespace_mandatory - + rb"([-+]?[0-9]+)" - + whitespace_mandatory - + rb"obj(?=" - + delimiter_or_ws - + rb")" - ) - re_indirect_def_end = re.compile( - whitespace_optional + rb"endobj(?=" + delimiter_or_ws + rb")" - ) - re_comment = re.compile( - rb"(" + whitespace_optional + rb"%[^\r\n]*" + newline + rb")*" - ) - re_stream_start = re.compile(whitespace_optional + rb"stream\r?\n") - re_stream_end = re.compile( - whitespace_optional + rb"endstream(?=" + delimiter_or_ws + rb")" - ) - - @classmethod - def get_value(cls, data, offset, expect_indirect=None, max_nesting=-1): - if max_nesting == 0: - return None, None - m = cls.re_comment.match(data, offset) - if m: - offset = m.end() - m = cls.re_indirect_def_start.match(data, offset) - if m: - check_format_condition( - int(m.group(1)) > 0, - "indirect object definition: object ID must be greater than 0", - ) - check_format_condition( - int(m.group(2)) >= 0, - "indirect object definition: generation must be non-negative", - ) - check_format_condition( - expect_indirect is None - or expect_indirect - == IndirectReference(int(m.group(1)), int(m.group(2))), - "indirect object definition different than expected", - ) - object, offset = cls.get_value(data, m.end(), max_nesting=max_nesting - 1) - if offset is None: - return object, None - m = cls.re_indirect_def_end.match(data, offset) - check_format_condition(m, "indirect object definition end not found") - return object, m.end() - check_format_condition( - not expect_indirect, "indirect object definition not found" - ) - m = cls.re_indirect_reference.match(data, offset) - if m: - check_format_condition( - int(m.group(1)) > 0, - "indirect object reference: object ID must be greater than 0", - ) - check_format_condition( - int(m.group(2)) >= 0, - "indirect object reference: generation must be non-negative", - ) - return IndirectReference(int(m.group(1)), int(m.group(2))), m.end() - m = cls.re_dict_start.match(data, offset) - if m: - offset = m.end() - result = {} - m = cls.re_dict_end.match(data, offset) - while not m: - key, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1) - if offset is None: - return result, None - value, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1) - result[key] = value - if offset is None: - return result, None - m = cls.re_dict_end.match(data, offset) - offset = m.end() - m = cls.re_stream_start.match(data, offset) - if m: - try: - stream_len = int(result[b"Length"]) - except (TypeError, KeyError, ValueError) as e: - msg = "bad or missing Length in stream dict (%r)" % result.get( - b"Length", None - ) - raise PdfFormatError(msg) from e - stream_data = data[m.end() : m.end() + stream_len] - m = cls.re_stream_end.match(data, m.end() + stream_len) - check_format_condition(m, "stream end not found") - offset = m.end() - result = PdfStream(PdfDict(result), stream_data) - else: - result = PdfDict(result) - return result, offset - m = cls.re_array_start.match(data, offset) - if m: - offset = m.end() - result = [] - m = cls.re_array_end.match(data, offset) - while not m: - value, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1) - result.append(value) - if offset is None: - return result, None - m = cls.re_array_end.match(data, offset) - return result, m.end() - m = cls.re_null.match(data, offset) - if m: - return None, m.end() - m = cls.re_true.match(data, offset) - if m: - return True, m.end() - m = cls.re_false.match(data, offset) - if m: - return False, m.end() - m = cls.re_name.match(data, offset) - if m: - return PdfName(cls.interpret_name(m.group(1))), m.end() - m = cls.re_int.match(data, offset) - if m: - return int(m.group(1)), m.end() - m = cls.re_real.match(data, offset) - if m: - # XXX Decimal instead of float??? - return float(m.group(1)), m.end() - m = cls.re_string_hex.match(data, offset) - if m: - # filter out whitespace - hex_string = bytearray( - b for b in m.group(1) if b in b"0123456789abcdefABCDEF" - ) - if len(hex_string) % 2 == 1: - # append a 0 if the length is not even - yes, at the end - hex_string.append(ord(b"0")) - return bytearray.fromhex(hex_string.decode("us-ascii")), m.end() - m = cls.re_string_lit.match(data, offset) - if m: - return cls.get_literal_string(data, m.end()) - # return None, offset # fallback (only for debugging) - msg = "unrecognized object: " + repr(data[offset : offset + 32]) - raise PdfFormatError(msg) - - re_lit_str_token = re.compile( - rb"(\\[nrtbf()\\])|(\\[0-9]{1,3})|(\\(\r\n|\r|\n))|(\r\n|\r|\n)|(\()|(\))" - ) - escaped_chars = { - b"n": b"\n", - b"r": b"\r", - b"t": b"\t", - b"b": b"\b", - b"f": b"\f", - b"(": b"(", - b")": b")", - b"\\": b"\\", - ord(b"n"): b"\n", - ord(b"r"): b"\r", - ord(b"t"): b"\t", - ord(b"b"): b"\b", - ord(b"f"): b"\f", - ord(b"("): b"(", - ord(b")"): b")", - ord(b"\\"): b"\\", - } - - @classmethod - def get_literal_string(cls, data, offset): - nesting_depth = 0 - result = bytearray() - for m in cls.re_lit_str_token.finditer(data, offset): - result.extend(data[offset : m.start()]) - if m.group(1): - result.extend(cls.escaped_chars[m.group(1)[1]]) - elif m.group(2): - result.append(int(m.group(2)[1:], 8)) - elif m.group(3): - pass - elif m.group(5): - result.extend(b"\n") - elif m.group(6): - result.extend(b"(") - nesting_depth += 1 - elif m.group(7): - if nesting_depth == 0: - return bytes(result), m.end() - result.extend(b")") - nesting_depth -= 1 - offset = m.end() - msg = "unfinished literal string" - raise PdfFormatError(msg) - - re_xref_section_start = re.compile(whitespace_optional + rb"xref" + newline) - re_xref_subsection_start = re.compile( - whitespace_optional - + rb"([0-9]+)" - + whitespace_mandatory - + rb"([0-9]+)" - + whitespace_optional - + newline_only - ) - re_xref_entry = re.compile(rb"([0-9]{10}) ([0-9]{5}) ([fn])( \r| \n|\r\n)") - - def read_xref_table(self, xref_section_offset): - subsection_found = False - m = self.re_xref_section_start.match( - self.buf, xref_section_offset + self.start_offset - ) - check_format_condition(m, "xref section start not found") - offset = m.end() - while True: - m = self.re_xref_subsection_start.match(self.buf, offset) - if not m: - check_format_condition( - subsection_found, "xref subsection start not found" - ) - break - subsection_found = True - offset = m.end() - first_object = int(m.group(1)) - num_objects = int(m.group(2)) - for i in range(first_object, first_object + num_objects): - m = self.re_xref_entry.match(self.buf, offset) - check_format_condition(m, "xref entry not found") - offset = m.end() - is_free = m.group(3) == b"f" - if not is_free: - generation = int(m.group(2)) - new_entry = (int(m.group(1)), generation) - if i not in self.xref_table: - self.xref_table[i] = new_entry - return offset - - def read_indirect(self, ref, max_nesting=-1): - offset, generation = self.xref_table[ref[0]] - check_format_condition( - generation == ref[1], - f"expected to find generation {ref[1]} for object ID {ref[0]} in xref " - f"table, instead found generation {generation} at offset {offset}", - ) - value = self.get_value( - self.buf, - offset + self.start_offset, - expect_indirect=IndirectReference(*ref), - max_nesting=max_nesting, - )[0] - self.cached_objects[ref] = value - return value - - def linearize_page_tree(self, node=None): - if node is None: - node = self.page_tree_root - check_format_condition( - node[b"Type"] == b"Pages", "/Type of page tree node is not /Pages" - ) - pages = [] - for kid in node[b"Kids"]: - kid_object = self.read_indirect(kid) - if kid_object[b"Type"] == b"Page": - pages.append(kid) - else: - pages.extend(self.linearize_page_tree(node=kid_object)) - return pages diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiofiles/tempfile/__init__.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiofiles/tempfile/__init__.py deleted file mode 100644 index 75d10b671047db98b8697aab9f4006ff5889c75c..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiofiles/tempfile/__init__.py +++ /dev/null @@ -1,357 +0,0 @@ -import asyncio -from functools import partial, singledispatch -from io import BufferedRandom, BufferedReader, BufferedWriter, FileIO, TextIOBase -from tempfile import NamedTemporaryFile as syncNamedTemporaryFile -from tempfile import SpooledTemporaryFile as syncSpooledTemporaryFile -from tempfile import TemporaryDirectory as syncTemporaryDirectory -from tempfile import TemporaryFile as syncTemporaryFile -from tempfile import _TemporaryFileWrapper as syncTemporaryFileWrapper - -from ..base import AiofilesContextManager -from ..threadpool.binary import AsyncBufferedIOBase, AsyncBufferedReader, AsyncFileIO -from ..threadpool.text import AsyncTextIOWrapper -from .temptypes import AsyncSpooledTemporaryFile, AsyncTemporaryDirectory -import sys - -__all__ = [ - "NamedTemporaryFile", - "TemporaryFile", - "SpooledTemporaryFile", - "TemporaryDirectory", -] - - -# ================================================================ -# Public methods for async open and return of temp file/directory -# objects with async interface -# ================================================================ -if sys.version_info >= (3, 12): - - def NamedTemporaryFile( - mode="w+b", - buffering=-1, - encoding=None, - newline=None, - suffix=None, - prefix=None, - dir=None, - delete=True, - delete_on_close=True, - loop=None, - executor=None, - ): - """Async open a named temporary file""" - return AiofilesContextManager( - _temporary_file( - named=True, - mode=mode, - buffering=buffering, - encoding=encoding, - newline=newline, - suffix=suffix, - prefix=prefix, - dir=dir, - delete=delete, - delete_on_close=delete_on_close, - loop=loop, - executor=executor, - ) - ) - -else: - - def NamedTemporaryFile( - mode="w+b", - buffering=-1, - encoding=None, - newline=None, - suffix=None, - prefix=None, - dir=None, - delete=True, - loop=None, - executor=None, - ): - """Async open a named temporary file""" - return AiofilesContextManager( - _temporary_file( - named=True, - mode=mode, - buffering=buffering, - encoding=encoding, - newline=newline, - suffix=suffix, - prefix=prefix, - dir=dir, - delete=delete, - loop=loop, - executor=executor, - ) - ) - - -def TemporaryFile( - mode="w+b", - buffering=-1, - encoding=None, - newline=None, - suffix=None, - prefix=None, - dir=None, - loop=None, - executor=None, -): - """Async open an unnamed temporary file""" - return AiofilesContextManager( - _temporary_file( - named=False, - mode=mode, - buffering=buffering, - encoding=encoding, - newline=newline, - suffix=suffix, - prefix=prefix, - dir=dir, - loop=loop, - executor=executor, - ) - ) - - -def SpooledTemporaryFile( - max_size=0, - mode="w+b", - buffering=-1, - encoding=None, - newline=None, - suffix=None, - prefix=None, - dir=None, - loop=None, - executor=None, -): - """Async open a spooled temporary file""" - return AiofilesContextManager( - _spooled_temporary_file( - max_size=max_size, - mode=mode, - buffering=buffering, - encoding=encoding, - newline=newline, - suffix=suffix, - prefix=prefix, - dir=dir, - loop=loop, - executor=executor, - ) - ) - - -def TemporaryDirectory(suffix=None, prefix=None, dir=None, loop=None, executor=None): - """Async open a temporary directory""" - return AiofilesContextManagerTempDir( - _temporary_directory( - suffix=suffix, prefix=prefix, dir=dir, loop=loop, executor=executor - ) - ) - - -# ========================================================= -# Internal coroutines to open new temp files/directories -# ========================================================= -if sys.version_info >= (3, 12): - - async def _temporary_file( - named=True, - mode="w+b", - buffering=-1, - encoding=None, - newline=None, - suffix=None, - prefix=None, - dir=None, - delete=True, - delete_on_close=True, - loop=None, - executor=None, - max_size=0, - ): - """Async method to open a temporary file with async interface""" - if loop is None: - loop = asyncio.get_running_loop() - - if named: - cb = partial( - syncNamedTemporaryFile, - mode=mode, - buffering=buffering, - encoding=encoding, - newline=newline, - suffix=suffix, - prefix=prefix, - dir=dir, - delete=delete, - delete_on_close=delete_on_close, - ) - else: - cb = partial( - syncTemporaryFile, - mode=mode, - buffering=buffering, - encoding=encoding, - newline=newline, - suffix=suffix, - prefix=prefix, - dir=dir, - ) - - f = await loop.run_in_executor(executor, cb) - - # Wrap based on type of underlying IO object - if type(f) is syncTemporaryFileWrapper: - # _TemporaryFileWrapper was used (named files) - result = wrap(f.file, f, loop=loop, executor=executor) - result._closer = f._closer - return result - else: - # IO object was returned directly without wrapper - return wrap(f, f, loop=loop, executor=executor) - -else: - - async def _temporary_file( - named=True, - mode="w+b", - buffering=-1, - encoding=None, - newline=None, - suffix=None, - prefix=None, - dir=None, - delete=True, - loop=None, - executor=None, - max_size=0, - ): - """Async method to open a temporary file with async interface""" - if loop is None: - loop = asyncio.get_running_loop() - - if named: - cb = partial( - syncNamedTemporaryFile, - mode=mode, - buffering=buffering, - encoding=encoding, - newline=newline, - suffix=suffix, - prefix=prefix, - dir=dir, - delete=delete, - ) - else: - cb = partial( - syncTemporaryFile, - mode=mode, - buffering=buffering, - encoding=encoding, - newline=newline, - suffix=suffix, - prefix=prefix, - dir=dir, - ) - - f = await loop.run_in_executor(executor, cb) - - # Wrap based on type of underlying IO object - if type(f) is syncTemporaryFileWrapper: - # _TemporaryFileWrapper was used (named files) - result = wrap(f.file, f, loop=loop, executor=executor) - # add delete property - result.delete = f.delete - return result - else: - # IO object was returned directly without wrapper - return wrap(f, f, loop=loop, executor=executor) - - -async def _spooled_temporary_file( - max_size=0, - mode="w+b", - buffering=-1, - encoding=None, - newline=None, - suffix=None, - prefix=None, - dir=None, - loop=None, - executor=None, -): - """Open a spooled temporary file with async interface""" - if loop is None: - loop = asyncio.get_running_loop() - - cb = partial( - syncSpooledTemporaryFile, - max_size=max_size, - mode=mode, - buffering=buffering, - encoding=encoding, - newline=newline, - suffix=suffix, - prefix=prefix, - dir=dir, - ) - - f = await loop.run_in_executor(executor, cb) - - # Single interface provided by SpooledTemporaryFile for all modes - return AsyncSpooledTemporaryFile(f, loop=loop, executor=executor) - - -async def _temporary_directory( - suffix=None, prefix=None, dir=None, loop=None, executor=None -): - """Async method to open a temporary directory with async interface""" - if loop is None: - loop = asyncio.get_running_loop() - - cb = partial(syncTemporaryDirectory, suffix, prefix, dir) - f = await loop.run_in_executor(executor, cb) - - return AsyncTemporaryDirectory(f, loop=loop, executor=executor) - - -class AiofilesContextManagerTempDir(AiofilesContextManager): - """With returns the directory location, not the object (matching sync lib)""" - - async def __aenter__(self): - self._obj = await self._coro - return self._obj.name - - -@singledispatch -def wrap(base_io_obj, file, *, loop=None, executor=None): - """Wrap the object with interface based on type of underlying IO""" - raise TypeError("Unsupported IO type: {}".format(base_io_obj)) - - -@wrap.register(TextIOBase) -def _(base_io_obj, file, *, loop=None, executor=None): - return AsyncTextIOWrapper(file, loop=loop, executor=executor) - - -@wrap.register(BufferedWriter) -def _(base_io_obj, file, *, loop=None, executor=None): - return AsyncBufferedIOBase(file, loop=loop, executor=executor) - - -@wrap.register(BufferedReader) -@wrap.register(BufferedRandom) -def _(base_io_obj, file, *, loop=None, executor=None): - return AsyncBufferedReader(file, loop=loop, executor=executor) - - -@wrap.register(FileIO) -def _(base_io_obj, file, *, loop=None, executor=None): - return AsyncFileIO(file, loop=loop, executor=executor) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/colorLib/table_builder.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/colorLib/table_builder.py deleted file mode 100644 index f1e182c432eae48545b2264d57d58c754ff6cd30..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/colorLib/table_builder.py +++ /dev/null @@ -1,223 +0,0 @@ -""" -colorLib.table_builder: Generic helper for filling in BaseTable derivatives from tuples and maps and such. - -""" - -import collections -import enum -from fontTools.ttLib.tables.otBase import ( - BaseTable, - FormatSwitchingBaseTable, - UInt8FormatSwitchingBaseTable, -) -from fontTools.ttLib.tables.otConverters import ( - ComputedInt, - SimpleValue, - Struct, - Short, - UInt8, - UShort, - IntValue, - FloatValue, - OptionalValue, -) -from fontTools.misc.roundTools import otRound - - -class BuildCallback(enum.Enum): - """Keyed on (BEFORE_BUILD, class[, Format if available]). - Receives (dest, source). - Should return (dest, source), which can be new objects. - """ - - BEFORE_BUILD = enum.auto() - - """Keyed on (AFTER_BUILD, class[, Format if available]). - Receives (dest). - Should return dest, which can be a new object. - """ - AFTER_BUILD = enum.auto() - - """Keyed on (CREATE_DEFAULT, class[, Format if available]). - Receives no arguments. - Should return a new instance of class. - """ - CREATE_DEFAULT = enum.auto() - - -def _assignable(convertersByName): - return {k: v for k, v in convertersByName.items() if not isinstance(v, ComputedInt)} - - -def _isNonStrSequence(value): - return isinstance(value, collections.abc.Sequence) and not isinstance(value, str) - - -def _split_format(cls, source): - if _isNonStrSequence(source): - assert len(source) > 0, f"{cls} needs at least format from {source}" - fmt, remainder = source[0], source[1:] - elif isinstance(source, collections.abc.Mapping): - assert "Format" in source, f"{cls} needs at least Format from {source}" - remainder = source.copy() - fmt = remainder.pop("Format") - else: - raise ValueError(f"Not sure how to populate {cls} from {source}") - - assert isinstance( - fmt, collections.abc.Hashable - ), f"{cls} Format is not hashable: {fmt!r}" - assert fmt in cls.convertersByName, f"{cls} invalid Format: {fmt!r}" - - return fmt, remainder - - -class TableBuilder: - """ - Helps to populate things derived from BaseTable from maps, tuples, etc. - - A table of lifecycle callbacks may be provided to add logic beyond what is possible - based on otData info for the target class. See BuildCallbacks. - """ - - def __init__(self, callbackTable=None): - if callbackTable is None: - callbackTable = {} - self._callbackTable = callbackTable - - def _convert(self, dest, field, converter, value): - enumClass = getattr(converter, "enumClass", None) - - if enumClass: - if isinstance(value, enumClass): - pass - elif isinstance(value, str): - try: - value = getattr(enumClass, value.upper()) - except AttributeError: - raise ValueError(f"{value} is not a valid {enumClass}") - else: - value = enumClass(value) - - elif isinstance(converter, IntValue): - value = otRound(value) - elif isinstance(converter, FloatValue): - value = float(value) - - elif isinstance(converter, Struct): - if converter.repeat: - if _isNonStrSequence(value): - value = [self.build(converter.tableClass, v) for v in value] - else: - value = [self.build(converter.tableClass, value)] - setattr(dest, converter.repeat, len(value)) - else: - value = self.build(converter.tableClass, value) - elif callable(converter): - value = converter(value) - - setattr(dest, field, value) - - def build(self, cls, source): - assert issubclass(cls, BaseTable) - - if isinstance(source, cls): - return source - - callbackKey = (cls,) - fmt = None - if issubclass(cls, FormatSwitchingBaseTable): - fmt, source = _split_format(cls, source) - callbackKey = (cls, fmt) - - dest = self._callbackTable.get( - (BuildCallback.CREATE_DEFAULT,) + callbackKey, lambda: cls() - )() - assert isinstance(dest, cls) - - convByName = _assignable(cls.convertersByName) - skippedFields = set() - - # For format switchers we need to resolve converters based on format - if issubclass(cls, FormatSwitchingBaseTable): - dest.Format = fmt - convByName = _assignable(convByName[dest.Format]) - skippedFields.add("Format") - - # Convert sequence => mapping so before thunk only has to handle one format - if _isNonStrSequence(source): - # Sequence (typically list or tuple) assumed to match fields in declaration order - assert len(source) <= len( - convByName - ), f"Sequence of {len(source)} too long for {cls}; expected <= {len(convByName)} values" - source = dict(zip(convByName.keys(), source)) - - dest, source = self._callbackTable.get( - (BuildCallback.BEFORE_BUILD,) + callbackKey, lambda d, s: (d, s) - )(dest, source) - - if isinstance(source, collections.abc.Mapping): - for field, value in source.items(): - if field in skippedFields: - continue - converter = convByName.get(field, None) - if not converter: - raise ValueError( - f"Unrecognized field {field} for {cls}; expected one of {sorted(convByName.keys())}" - ) - self._convert(dest, field, converter, value) - else: - # let's try as a 1-tuple - dest = self.build(cls, (source,)) - - for field, conv in convByName.items(): - if not hasattr(dest, field) and isinstance(conv, OptionalValue): - setattr(dest, field, conv.DEFAULT) - - dest = self._callbackTable.get( - (BuildCallback.AFTER_BUILD,) + callbackKey, lambda d: d - )(dest) - - return dest - - -class TableUnbuilder: - def __init__(self, callbackTable=None): - if callbackTable is None: - callbackTable = {} - self._callbackTable = callbackTable - - def unbuild(self, table): - assert isinstance(table, BaseTable) - - source = {} - - callbackKey = (type(table),) - if isinstance(table, FormatSwitchingBaseTable): - source["Format"] = int(table.Format) - callbackKey += (table.Format,) - - for converter in table.getConverters(): - if isinstance(converter, ComputedInt): - continue - value = getattr(table, converter.name) - - enumClass = getattr(converter, "enumClass", None) - if enumClass: - source[converter.name] = value.name.lower() - elif isinstance(converter, Struct): - if converter.repeat: - source[converter.name] = [self.unbuild(v) for v in value] - else: - source[converter.name] = self.unbuild(value) - elif isinstance(converter, SimpleValue): - # "simple" values (e.g. int, float, str) need no further un-building - source[converter.name] = value - else: - raise NotImplementedError( - "Don't know how unbuild {value!r} with {converter!r}" - ) - - source = self._callbackTable.get(callbackKey, lambda s: s)(source) - - return source diff --git a/spaces/johnrobinsn/MidasDepthEstimation/README.md b/spaces/johnrobinsn/MidasDepthEstimation/README.md deleted file mode 100644 index e7ce9474b5e17b08f0403fed6c5797b056976378..0000000000000000000000000000000000000000 --- a/spaces/johnrobinsn/MidasDepthEstimation/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: MidasDepthEstimation -emoji: 👀 -colorFrom: red -colorTo: yellow -sdk: gradio -sdk_version: 3.8.2 -app_file: app.py -pinned: true -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/jone/GFPGAN/gfpgan/weights/README.md b/spaces/jone/GFPGAN/gfpgan/weights/README.md deleted file mode 100644 index 4d7b7e642591ef88575d9e6c360a4d29e0cc1a4f..0000000000000000000000000000000000000000 --- a/spaces/jone/GFPGAN/gfpgan/weights/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Weights - -Put the downloaded weights to this folder. diff --git a/spaces/jonigata/PoseMaker2/external/faster_rcnn_r50_fpn_coco.py b/spaces/jonigata/PoseMaker2/external/faster_rcnn_r50_fpn_coco.py deleted file mode 100644 index a9ad9528b22163ae7ce1390375b69227fd6eafd9..0000000000000000000000000000000000000000 --- a/spaces/jonigata/PoseMaker2/external/faster_rcnn_r50_fpn_coco.py +++ /dev/null @@ -1,182 +0,0 @@ -checkpoint_config = dict(interval=1) -# yapf:disable -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - # dict(type='TensorboardLoggerHook') - ]) -# yapf:enable -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = None -resume_from = None -workflow = [('train', 1)] -# optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[8, 11]) -total_epochs = 12 - -model = dict( - type='FasterRCNN', - pretrained='torchvision://resnet50', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch'), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100) - # soft-nms is also supported for rcnn testing - # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) - )) - -dataset_type = 'CocoDataset' -data_root = 'data/coco' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=f'{data_root}/annotations/instances_train2017.json', - img_prefix=f'{data_root}/train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=f'{data_root}/annotations/instances_val2017.json', - img_prefix=f'{data_root}/val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=f'{data_root}/annotations/instances_val2017.json', - img_prefix=f'{data_root}/val2017/', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric='bbox') diff --git a/spaces/jskalbg/ChatDev01/camel/prompts/solution_extraction.py b/spaces/jskalbg/ChatDev01/camel/prompts/solution_extraction.py deleted file mode 100644 index fdc56a497221697e4fa966de2c51dcb7fc0b8269..0000000000000000000000000000000000000000 --- a/spaces/jskalbg/ChatDev01/camel/prompts/solution_extraction.py +++ /dev/null @@ -1,44 +0,0 @@ -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -# Licensed under the Apache License, Version 2.0 (the “License”); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an “AS IS” BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -from typing import Any - -from camel.prompts import TextPrompt, TextPromptDict -from camel.typing import RoleType - - -# flake8: noqa -class SolutionExtractionPromptTemplateDict(TextPromptDict): - r"""A dictionary containing :obj:`TextPrompt` used in the `SolutionExtraction` - task. - - Attributes: - ASSISTANT_PROMPT (TextPrompt): A system prompt for the AI assistant - that outlines the rules of the conversation and provides - instructions for completing tasks. - """ - ASSISTANT_PROMPT = TextPrompt( - """You are an experienced solution extracting agent. -Your task is to extract full and complete solutions by looking at the conversation between a user and an assistant with particular specializations. -You should present me with a final and detailed solution purely based on the conversation. -You should present the solution as if its yours. -Use present tense and as if you are the one presenting the solution. -You should not miss any necessary details or examples. -Keep all provided explanations and codes provided throughout the conversation. -Remember your task is not to summarize rather to extract the full solution.""") - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.update({ - RoleType.ASSISTANT: self.ASSISTANT_PROMPT, - }) diff --git a/spaces/juancopi81/youtube-music-transcribe/t5x/examples/scalable_t5/README.md b/spaces/juancopi81/youtube-music-transcribe/t5x/examples/scalable_t5/README.md deleted file mode 100644 index 6b457b9b71664d596644851dc0bc81127e1b0874..0000000000000000000000000000000000000000 --- a/spaces/juancopi81/youtube-music-transcribe/t5x/examples/scalable_t5/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# Scalable T5 - -NB: This particular example is still WIP. We're investigating a slight training -regression compared to the "vanilla" T5 example. - -This directory is very similar to the vanilla T5X "T5" example, but demonstrates -a host of techniques needed to scale model training to giant models run on -large TPU or GPU cluster environments using XLA's SPMD capabilities. See the -notes for the main "t5" example for general details on setup and execution. - -__Note__: many of the APIs built on top of `pjit` by Flax and T5X for easier -model parallel programming are still experimental, and may change. - -## Intermediate variable annotations - -In larger models, with multi-axis model parallelism, it is typically necessary -to provide additional constraint annotations beyond those for the input and -output parameters for a function. We do this using a special version of the -`pjit` annotation function `with_sharding_constraint` that uses _logical_ axis -names instead of raw mesh axes. This allows us to avoid tightly coupling a -specific partitioning plan to the model code itself. Instead, we merely need -to annotate the axis names used in the model in a coherent scheme, and later -map these logical axes to the physical mesh axes using a small set of rules. -Example usage can be seen in `network.py`. - -## Scan over layers - -One challenge with giant models is the increasing amount of compilation time -required to handle extremely large layer stacks in XLA. At the size of a full -TPU pod this compile time cost can become quite extreme. To remedy this, -instead of handing the compiler a huge stack of unrolled layers, we can use -native XLA control flow constructs to simplify the computational graph given -from JAX. For giant models this can drop the compile time from hour(s) to -minutes, and even at base-scale can be roughly 5x faster. - -In this case, we want to use the [XLA While Op](xla-while) via JAX's -[scan](jax-scan) control flow construct to express the idea that we're looping -over identically-defined layers when using a deep transformer network. We do -this via a custom Flax version of scan called `scan_with_axes` that also handles -the parameter logical axis name metadata needed for partitioning. - -## Rematerialization / Checkpointing - -"Rematerialization" or "checkpointing" is a technique for trading off compute -time for lower peak memory utilization when performing reverse-mode automatic -differentiation. JAX offers several different default rematerialization -"policies" that dictate which kinds of intermediate values are preserved from -the forward-pass to the backwards-pass calculation, and which are discarded to -be recomputed anew in the backwards-pass. - - -[xla-while]: https://www.tensorflow.org/xla/operation_semantics#while -[jax-scan]: https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.scan.html diff --git a/spaces/juancopi81/youtube-music-transcribe/t5x/examples/scalable_t5/t5_1_1/__init__.py b/spaces/juancopi81/youtube-music-transcribe/t5x/examples/scalable_t5/t5_1_1/__init__.py deleted file mode 100644 index da022c16301721a096a208e8bdb2a71bb87f9788..0000000000000000000000000000000000000000 --- a/spaces/juancopi81/youtube-music-transcribe/t5x/examples/scalable_t5/t5_1_1/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2022 The T5X Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This empty file is needed for loading the gin files in this directory. diff --git a/spaces/kabita-choudhary/audio_to_text/README.md b/spaces/kabita-choudhary/audio_to_text/README.md deleted file mode 100644 index 1e9ed1774eb716b0132cde42ca5148c15cc6e06d..0000000000000000000000000000000000000000 --- a/spaces/kabita-choudhary/audio_to_text/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Audio To Text -emoji: 🚀 -colorFrom: red -colorTo: yellow -sdk: gradio -sdk_version: 3.6 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kepl/gpt/g4f/Provider/Providers/Better.py b/spaces/kepl/gpt/g4f/Provider/Providers/Better.py deleted file mode 100644 index bee52870eb3300f25c9762ab204968791a2a30a9..0000000000000000000000000000000000000000 --- a/spaces/kepl/gpt/g4f/Provider/Providers/Better.py +++ /dev/null @@ -1,56 +0,0 @@ -import os -import json -import requests -from typing import Dict, get_type_hints - -url = 'https://openai-proxy-api.vercel.app/v1/' -model = [ - 'gpt-3.5-turbo', - 'gpt-3.5-turbo-0613', - 'gpt-3.5-turbo-16k', - 'gpt-3.5-turbo-16k-0613', - 'gpt-4', -] - -supports_stream = True -needs_auth = False - - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - headers = { - 'Content-Type': 'application/json', - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.58', - 'Referer': 'https://chat.ylokh.xyz/', - 'Origin': 'https://chat.ylokh.xyz', - 'Connection': 'keep-alive', - } - - json_data = { - 'messages': messages, - 'temperature': 1.0, - 'model': model, - 'stream': stream, - } - - response = requests.post( - 'https://openai-proxy-api.vercel.app/v1/chat/completions', headers=headers, json=json_data, stream=True - ) - - for token in response.iter_lines(): - decoded = token.decode('utf-8') - if decoded.startswith('data: '): - data_str = decoded.replace('data: ', '') - data = json.loads(data_str) - if 'choices' in data and 'delta' in data['choices'][0]: - delta = data['choices'][0]['delta'] - content = delta.get('content', '') - finish_reason = delta.get('finish_reason', '') - - if finish_reason == 'stop': - break - if content: - yield content - - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + '(%s)' % ', '.join( - [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker/src/facerender/animate.py b/spaces/kevinwang676/ChatGLM2-SadTalker/src/facerender/animate.py deleted file mode 100644 index 781f5a3318a086049cc6b74393073ddda7001d5e..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker/src/facerender/animate.py +++ /dev/null @@ -1,257 +0,0 @@ -import os -import cv2 -import yaml -import numpy as np -import warnings -from skimage import img_as_ubyte -import safetensors -import safetensors.torch -warnings.filterwarnings('ignore') - - -import imageio -import torch -import torchvision - - -from src.facerender.modules.keypoint_detector import HEEstimator, KPDetector -from src.facerender.modules.mapping import MappingNet -from src.facerender.modules.generator import OcclusionAwareGenerator, OcclusionAwareSPADEGenerator -from src.facerender.modules.make_animation import make_animation - -from pydub import AudioSegment -from src.utils.face_enhancer import enhancer_generator_with_len, enhancer_list -from src.utils.paste_pic import paste_pic -from src.utils.videoio import save_video_with_watermark - -try: - import webui # in webui - in_webui = True -except: - in_webui = False - -class AnimateFromCoeff(): - - def __init__(self, sadtalker_path, device): - - with open(sadtalker_path['facerender_yaml']) as f: - config = yaml.safe_load(f) - - generator = OcclusionAwareSPADEGenerator(**config['model_params']['generator_params'], - **config['model_params']['common_params']) - kp_extractor = KPDetector(**config['model_params']['kp_detector_params'], - **config['model_params']['common_params']) - he_estimator = HEEstimator(**config['model_params']['he_estimator_params'], - **config['model_params']['common_params']) - mapping = MappingNet(**config['model_params']['mapping_params']) - - generator.to(device) - kp_extractor.to(device) - he_estimator.to(device) - mapping.to(device) - for param in generator.parameters(): - param.requires_grad = False - for param in kp_extractor.parameters(): - param.requires_grad = False - for param in he_estimator.parameters(): - param.requires_grad = False - for param in mapping.parameters(): - param.requires_grad = False - - if sadtalker_path is not None: - if 'checkpoint' in sadtalker_path: # use safe tensor - self.load_cpk_facevid2vid_safetensor(sadtalker_path['checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=None) - else: - self.load_cpk_facevid2vid(sadtalker_path['free_view_checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=he_estimator) - else: - raise AttributeError("Checkpoint should be specified for video head pose estimator.") - - if sadtalker_path['mappingnet_checkpoint'] is not None: - self.load_cpk_mapping(sadtalker_path['mappingnet_checkpoint'], mapping=mapping) - else: - raise AttributeError("Checkpoint should be specified for video head pose estimator.") - - self.kp_extractor = kp_extractor - self.generator = generator - self.he_estimator = he_estimator - self.mapping = mapping - - self.kp_extractor.eval() - self.generator.eval() - self.he_estimator.eval() - self.mapping.eval() - - self.device = device - - def load_cpk_facevid2vid_safetensor(self, checkpoint_path, generator=None, - kp_detector=None, he_estimator=None, - device="cpu"): - - checkpoint = safetensors.torch.load_file(checkpoint_path) - - if generator is not None: - x_generator = {} - for k,v in checkpoint.items(): - if 'generator' in k: - x_generator[k.replace('generator.', '')] = v - generator.load_state_dict(x_generator) - if kp_detector is not None: - x_generator = {} - for k,v in checkpoint.items(): - if 'kp_extractor' in k: - x_generator[k.replace('kp_extractor.', '')] = v - kp_detector.load_state_dict(x_generator) - if he_estimator is not None: - x_generator = {} - for k,v in checkpoint.items(): - if 'he_estimator' in k: - x_generator[k.replace('he_estimator.', '')] = v - he_estimator.load_state_dict(x_generator) - - return None - - def load_cpk_facevid2vid(self, checkpoint_path, generator=None, discriminator=None, - kp_detector=None, he_estimator=None, optimizer_generator=None, - optimizer_discriminator=None, optimizer_kp_detector=None, - optimizer_he_estimator=None, device="cpu"): - checkpoint = torch.load(checkpoint_path, map_location=torch.device(device)) - if generator is not None: - generator.load_state_dict(checkpoint['generator']) - if kp_detector is not None: - kp_detector.load_state_dict(checkpoint['kp_detector']) - if he_estimator is not None: - he_estimator.load_state_dict(checkpoint['he_estimator']) - if discriminator is not None: - try: - discriminator.load_state_dict(checkpoint['discriminator']) - except: - print ('No discriminator in the state-dict. Dicriminator will be randomly initialized') - if optimizer_generator is not None: - optimizer_generator.load_state_dict(checkpoint['optimizer_generator']) - if optimizer_discriminator is not None: - try: - optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator']) - except RuntimeError as e: - print ('No discriminator optimizer in the state-dict. Optimizer will be not initialized') - if optimizer_kp_detector is not None: - optimizer_kp_detector.load_state_dict(checkpoint['optimizer_kp_detector']) - if optimizer_he_estimator is not None: - optimizer_he_estimator.load_state_dict(checkpoint['optimizer_he_estimator']) - - return checkpoint['epoch'] - - def load_cpk_mapping(self, checkpoint_path, mapping=None, discriminator=None, - optimizer_mapping=None, optimizer_discriminator=None, device='cpu'): - checkpoint = torch.load(checkpoint_path, map_location=torch.device(device)) - if mapping is not None: - mapping.load_state_dict(checkpoint['mapping']) - if discriminator is not None: - discriminator.load_state_dict(checkpoint['discriminator']) - if optimizer_mapping is not None: - optimizer_mapping.load_state_dict(checkpoint['optimizer_mapping']) - if optimizer_discriminator is not None: - optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator']) - - return checkpoint['epoch'] - - def generate(self, x, video_save_dir, pic_path, crop_info, enhancer=None, background_enhancer=None, preprocess='crop', img_size=256): - - source_image=x['source_image'].type(torch.FloatTensor) - source_semantics=x['source_semantics'].type(torch.FloatTensor) - target_semantics=x['target_semantics_list'].type(torch.FloatTensor) - source_image=source_image.to(self.device) - source_semantics=source_semantics.to(self.device) - target_semantics=target_semantics.to(self.device) - if 'yaw_c_seq' in x: - yaw_c_seq = x['yaw_c_seq'].type(torch.FloatTensor) - yaw_c_seq = x['yaw_c_seq'].to(self.device) - else: - yaw_c_seq = None - if 'pitch_c_seq' in x: - pitch_c_seq = x['pitch_c_seq'].type(torch.FloatTensor) - pitch_c_seq = x['pitch_c_seq'].to(self.device) - else: - pitch_c_seq = None - if 'roll_c_seq' in x: - roll_c_seq = x['roll_c_seq'].type(torch.FloatTensor) - roll_c_seq = x['roll_c_seq'].to(self.device) - else: - roll_c_seq = None - - frame_num = x['frame_num'] - - predictions_video = make_animation(source_image, source_semantics, target_semantics, - self.generator, self.kp_extractor, self.he_estimator, self.mapping, - yaw_c_seq, pitch_c_seq, roll_c_seq, use_exp = True) - - predictions_video = predictions_video.reshape((-1,)+predictions_video.shape[2:]) - predictions_video = predictions_video[:frame_num] - - video = [] - for idx in range(predictions_video.shape[0]): - image = predictions_video[idx] - image = np.transpose(image.data.cpu().numpy(), [1, 2, 0]).astype(np.float32) - video.append(image) - result = img_as_ubyte(video) - - ### the generated video is 256x256, so we keep the aspect ratio, - original_size = crop_info[0] - if original_size: - result = [ cv2.resize(result_i,(img_size, int(img_size * original_size[1]/original_size[0]) )) for result_i in result ] - - video_name = x['video_name'] + '.mp4' - path = os.path.join(video_save_dir, 'temp_'+video_name) - - imageio.mimsave(path, result, fps=float(25)) - - av_path = os.path.join(video_save_dir, video_name) - return_path = av_path - - audio_path = x['audio_path'] - audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0] - new_audio_path = os.path.join(video_save_dir, audio_name+'.wav') - start_time = 0 - # cog will not keep the .mp3 filename - sound = AudioSegment.from_file(audio_path) - frames = frame_num - end_time = start_time + frames*1/25*1000 - word1=sound.set_frame_rate(16000) - word = word1[start_time:end_time] - word.export(new_audio_path, format="wav") - - save_video_with_watermark(path, new_audio_path, av_path, watermark= False) - print(f'The generated video is named {video_save_dir}/{video_name}') - - if 'full' in preprocess.lower(): - # only add watermark to the full image. - video_name_full = x['video_name'] + '_full.mp4' - full_video_path = os.path.join(video_save_dir, video_name_full) - return_path = full_video_path - paste_pic(path, pic_path, crop_info, new_audio_path, full_video_path, extended_crop= True if 'ext' in preprocess.lower() else False) - print(f'The generated video is named {video_save_dir}/{video_name_full}') - else: - full_video_path = av_path - - #### paste back then enhancers - if enhancer: - video_name_enhancer = x['video_name'] + '_enhanced.mp4' - enhanced_path = os.path.join(video_save_dir, 'temp_'+video_name_enhancer) - av_path_enhancer = os.path.join(video_save_dir, video_name_enhancer) - return_path = av_path_enhancer - - try: - enhanced_images_gen_with_len = enhancer_generator_with_len(full_video_path, method=enhancer, bg_upsampler=background_enhancer) - imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25)) - except: - enhanced_images_gen_with_len = enhancer_list(full_video_path, method=enhancer, bg_upsampler=background_enhancer) - imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25)) - - save_video_with_watermark(enhanced_path, new_audio_path, av_path_enhancer, watermark= False) - print(f'The generated video is named {video_save_dir}/{video_name_enhancer}') - os.remove(enhanced_path) - - os.remove(path) - os.remove(new_audio_path) - - return return_path - diff --git a/spaces/kevinwang676/Voice-Changer-Light/app_multi.py b/spaces/kevinwang676/Voice-Changer-Light/app_multi.py deleted file mode 100644 index 7ab8cf372450a25b4b2c89cd6914e1afa6b61ebc..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/Voice-Changer-Light/app_multi.py +++ /dev/null @@ -1,823 +0,0 @@ -from typing import Union - -from argparse import ArgumentParser -from pathlib import Path -import subprocess -import librosa -import os -import time -import random - -import matplotlib.pyplot as plt -import numpy as np -from PIL import Image, ImageDraw, ImageFont -from moviepy.editor import * -from moviepy.video.io.VideoFileClip import VideoFileClip - -import asyncio -import json -import hashlib -from os import path, getenv -from pydub import AudioSegment - -import gradio as gr - -import torch - -import edge_tts - -from datetime import datetime -from scipy.io.wavfile import write - -import config -import util -from infer_pack.models import ( - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono -) -from vc_infer_pipeline import VC - -# Reference: https://huggingface.co/spaces/zomehwh/rvc-models/blob/main/app.py#L21 # noqa -in_hf_space = getenv('SYSTEM') == 'spaces' - -high_quality = True - -# Argument parsing -arg_parser = ArgumentParser() -arg_parser.add_argument( - '--hubert', - default=getenv('RVC_HUBERT', 'hubert_base.pt'), - help='path to hubert base model (default: hubert_base.pt)' -) -arg_parser.add_argument( - '--config', - default=getenv('RVC_MULTI_CFG', 'multi_config.json'), - help='path to config file (default: multi_config.json)' -) -arg_parser.add_argument( - '--api', - action='store_true', - help='enable api endpoint' -) -arg_parser.add_argument( - '--cache-examples', - action='store_true', - help='enable example caching, please remember delete gradio_cached_examples folder when example config has been modified' # noqa -) -args = arg_parser.parse_args() - -app_css = ''' -#model_info img { - max-width: 100px; - max-height: 100px; - float: right; -} - -#model_info p { - margin: unset; -} -''' - -app = gr.Blocks( - theme=gr.themes.Soft(primary_hue="orange", secondary_hue="slate"), - css=app_css, - analytics_enabled=False -) - -# Load hubert model -hubert_model = util.load_hubert_model(config.device, args.hubert) -hubert_model.eval() - -# Load models -multi_cfg = json.load(open(args.config, 'r')) -loaded_models = [] - -for model_name in multi_cfg.get('models'): - print(f'Loading model: {model_name}') - - # Load model info - model_info = json.load( - open(path.join('model', model_name, 'config.json'), 'r') - ) - - # Load RVC checkpoint - cpt = torch.load( - path.join('model', model_name, model_info['model']), - map_location='cpu' - ) - tgt_sr = cpt['config'][-1] - cpt['config'][-3] = cpt['weight']['emb_g.weight'].shape[0] # n_spk - - if_f0 = cpt.get('f0', 1) - net_g: Union[SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono] - if if_f0 == 1: - net_g = SynthesizerTrnMs768NSFsid( - *cpt['config'], - is_half=util.is_half(config.device) - ) - else: - net_g = SynthesizerTrnMs768NSFsid_nono(*cpt['config']) - - del net_g.enc_q - - # According to original code, this thing seems necessary. - print(net_g.load_state_dict(cpt['weight'], strict=False)) - - net_g.eval().to(config.device) - net_g = net_g.half() if util.is_half(config.device) else net_g.float() - - vc = VC(tgt_sr, config) - - loaded_models.append(dict( - name=model_name, - metadata=model_info, - vc=vc, - net_g=net_g, - if_f0=if_f0, - target_sr=tgt_sr - )) - -print(f'Models loaded: {len(loaded_models)}') - -# Edge TTS speakers -tts_speakers_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices()) # noqa - -# Make MV -def make_bars_image(height_values, index, new_height): - - # Define the size of the image - width = 512 - height = new_height - - # Create a new image with a transparent background - image = Image.new('RGBA', (width, height), color=(0, 0, 0, 0)) - - # Get the image drawing context - draw = ImageDraw.Draw(image) - - # Define the rectangle width and spacing - rect_width = 2 - spacing = 2 - - # Define the list of height values for the rectangles - #height_values = [20, 40, 60, 80, 100, 80, 60, 40] - num_bars = len(height_values) - # Calculate the total width of the rectangles and the spacing - total_width = num_bars * rect_width + (num_bars - 1) * spacing - - # Calculate the starting position for the first rectangle - start_x = int((width - total_width) / 2) - # Define the buffer size - buffer_size = 80 - # Draw the rectangles from left to right - x = start_x - for i, height in enumerate(height_values): - - # Define the rectangle coordinates - y0 = buffer_size - y1 = height + buffer_size - x0 = x - x1 = x + rect_width - - # Draw the rectangle - draw.rectangle([x0, y0, x1, y1], fill='white') - - # Move to the next rectangle position - if i < num_bars - 1: - x += rect_width + spacing - - - # Rotate the image by 180 degrees - image = image.rotate(180) - - # Mirror the image - image = image.transpose(Image.FLIP_LEFT_RIGHT) - - # Save the image - image.save('audio_bars_'+ str(index) + '.png') - - return 'audio_bars_'+ str(index) + '.png' - -def db_to_height(db_value): - # Scale the dB value to a range between 0 and 1 - scaled_value = (db_value + 80) / 80 - - # Convert the scaled value to a height between 0 and 100 - height = scaled_value * 50 - - return height - -def infer(title, audio_in, image_in): - # Load the audio file - audio_path = audio_in - audio_data, sr = librosa.load(audio_path) - - # Get the duration in seconds - duration = librosa.get_duration(y=audio_data, sr=sr) - - # Extract the audio data for the desired time - start_time = 0 # start time in seconds - end_time = duration # end time in seconds - - start_index = int(start_time * sr) - end_index = int(end_time * sr) - - audio_data = audio_data[start_index:end_index] - - # Compute the short-time Fourier transform - hop_length = 512 - - - stft = librosa.stft(audio_data, hop_length=hop_length) - spectrogram = librosa.amplitude_to_db(np.abs(stft), ref=np.max) - - # Get the frequency values - freqs = librosa.fft_frequencies(sr=sr, n_fft=stft.shape[0]) - - # Select the indices of the frequency values that correspond to the desired frequencies - n_freqs = 114 - freq_indices = np.linspace(0, len(freqs) - 1, n_freqs, dtype=int) - - # Extract the dB values for the desired frequencies - db_values = [] - for i in range(spectrogram.shape[1]): - db_values.append(list(zip(freqs[freq_indices], spectrogram[freq_indices, i]))) - - # Print the dB values for the first time frame - print(db_values[0]) - - proportional_values = [] - - for frame in db_values: - proportional_frame = [db_to_height(db) for f, db in frame] - proportional_values.append(proportional_frame) - - print(proportional_values[0]) - print("AUDIO CHUNK: " + str(len(proportional_values))) - - # Open the background image - background_image = Image.open(image_in) - - # Resize the image while keeping its aspect ratio - bg_width, bg_height = background_image.size - aspect_ratio = bg_width / bg_height - new_width = 512 - new_height = int(new_width / aspect_ratio) - resized_bg = background_image.resize((new_width, new_height)) - - # Apply black cache for better visibility of the white text - bg_cache = Image.open('black_cache.png') - resized_bg.paste(bg_cache, (0, resized_bg.height - bg_cache.height), mask=bg_cache) - - # Create a new ImageDraw object - draw = ImageDraw.Draw(resized_bg) - - # Define the text to be added - text = title - font = ImageFont.truetype("Lato-Regular.ttf", 16) - text_color = (255, 255, 255) # white color - - # Calculate the position of the text - text_width, text_height = draw.textsize(text, font=font) - x = 30 - y = new_height - 70 - - # Draw the text on the image - draw.text((x, y), text, fill=text_color, font=font) - - # Save the resized image - resized_bg.save('resized_background.jpg') - - generated_frames = [] - for i, frame in enumerate(proportional_values): - bars_img = make_bars_image(frame, i, new_height) - bars_img = Image.open(bars_img) - # Paste the audio bars image on top of the background image - fresh_bg = Image.open('resized_background.jpg') - fresh_bg.paste(bars_img, (0, 0), mask=bars_img) - # Save the image - fresh_bg.save('audio_bars_with_bg' + str(i) + '.jpg') - generated_frames.append('audio_bars_with_bg' + str(i) + '.jpg') - print(generated_frames) - - # Create a video clip from the images - clip = ImageSequenceClip(generated_frames, fps=len(generated_frames)/(end_time-start_time)) - audio_clip = AudioFileClip(audio_in) - clip = clip.set_audio(audio_clip) - # Set the output codec - codec = 'libx264' - audio_codec = 'aac' - # Save the video to a file - clip.write_videofile("my_video.mp4", codec=codec, audio_codec=audio_codec) - - retimed_clip = VideoFileClip("my_video.mp4") - - # Set the desired frame rate - new_fps = 25 - - # Create a new clip with the new frame rate - new_clip = retimed_clip.set_fps(new_fps) - - # Save the new clip as a new video file - new_clip.write_videofile("my_video_retimed.mp4", codec=codec, audio_codec=audio_codec) - - return "my_video_retimed.mp4" - -# mix vocal and non-vocal -def mix(audio1, audio2): - sound1 = AudioSegment.from_file(audio1) - sound2 = AudioSegment.from_file(audio2) - length = len(sound1) - mixed = sound1[:length].overlay(sound2) - - mixed.export("song.wav", format="wav") - - return "song.wav" - -# Bilibili -def youtube_downloader( - video_identifier, - start_time, - end_time, - output_filename="track.wav", - num_attempts=5, - url_base="", - quiet=False, - force=True, -): - output_path = Path(output_filename) - if output_path.exists(): - if not force: - return output_path - else: - output_path.unlink() - - quiet = "--quiet --no-warnings" if quiet else "" - command = f""" - yt-dlp {quiet} -x --audio-format wav -f bestaudio -o "{output_filename}" --download-sections "*{start_time}-{end_time}" "{url_base}{video_identifier}" # noqa: E501 - """.strip() - - attempts = 0 - while True: - try: - _ = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - attempts += 1 - if attempts == num_attempts: - return None - else: - break - - if output_path.exists(): - return output_path - else: - return None - -def audio_separated(audio_input, progress=gr.Progress()): - # start progress - progress(progress=0, desc="Starting...") - time.sleep(0.1) - - # check file input - if audio_input is None: - # show progress - for i in progress.tqdm(range(100), desc="Please wait..."): - time.sleep(0.01) - - return (None, None, 'Please input audio.') - - # create filename - filename = str(random.randint(10000,99999))+datetime.now().strftime("%d%m%Y%H%M%S") - - # progress - progress(progress=0.10, desc="Please wait...") - - # make dir output - os.makedirs("output", exist_ok=True) - - # progress - progress(progress=0.20, desc="Please wait...") - - # write - if high_quality: - write(filename+".wav", audio_input[0], audio_input[1]) - else: - write(filename+".mp3", audio_input[0], audio_input[1]) - - # progress - progress(progress=0.50, desc="Please wait...") - - # demucs process - if high_quality: - command_demucs = "python3 -m demucs --two-stems=vocals -d cpu "+filename+".wav -o output" - else: - command_demucs = "python3 -m demucs --two-stems=vocals --mp3 --mp3-bitrate 128 -d cpu "+filename+".mp3 -o output" - - os.system(command_demucs) - - # progress - progress(progress=0.70, desc="Please wait...") - - # remove file audio - if high_quality: - command_delete = "rm -v ./"+filename+".wav" - else: - command_delete = "rm -v ./"+filename+".mp3" - - os.system(command_delete) - - # progress - progress(progress=0.80, desc="Please wait...") - - # progress - for i in progress.tqdm(range(80,100), desc="Please wait..."): - time.sleep(0.1) - - if high_quality: - return "./output/htdemucs/"+filename+"/vocals.wav","./output/htdemucs/"+filename+"/no_vocals.wav","Successfully..." - else: - return "./output/htdemucs/"+filename+"/vocals.mp3","./output/htdemucs/"+filename+"/no_vocals.mp3","Successfully..." - - -# https://github.com/fumiama/Retrieval-based-Voice-Conversion-WebUI/blob/main/infer-web.py#L118 # noqa -def vc_func( - input_audio, model_index, pitch_adjust, f0_method, feat_ratio, - filter_radius, rms_mix_rate, resample_option -): - if input_audio is None: - return (None, 'Please provide input audio.') - - if model_index is None: - return (None, 'Please select a model.') - - model = loaded_models[model_index] - - # Reference: so-vits - (audio_samp, audio_npy) = input_audio - - # https://huggingface.co/spaces/zomehwh/rvc-models/blob/main/app.py#L49 - # Can be change well, we will see - if (audio_npy.shape[0] / audio_samp) > 600 and in_hf_space: - return (None, 'Input audio is longer than 600 secs.') - - # Bloody hell: https://stackoverflow.com/questions/26921836/ - if audio_npy.dtype != np.float32: # :thonk: - audio_npy = ( - audio_npy / np.iinfo(audio_npy.dtype).max - ).astype(np.float32) - - if len(audio_npy.shape) > 1: - audio_npy = librosa.to_mono(audio_npy.transpose(1, 0)) - - if audio_samp != 16000: - audio_npy = librosa.resample( - audio_npy, - orig_sr=audio_samp, - target_sr=16000 - ) - - pitch_int = int(pitch_adjust) - - resample = ( - 0 if resample_option == 'Disable resampling' - else int(resample_option) - ) - - times = [0, 0, 0] - - checksum = hashlib.sha512() - checksum.update(audio_npy.tobytes()) - - output_audio = model['vc'].pipeline( - hubert_model, - model['net_g'], - model['metadata'].get('speaker_id', 0), - audio_npy, - checksum.hexdigest(), - times, - pitch_int, - f0_method, - path.join('model', model['name'], model['metadata']['feat_index']), - feat_ratio, - model['if_f0'], - filter_radius, - model['target_sr'], - resample, - rms_mix_rate, - 'v2' - ) - - out_sr = ( - resample if resample >= 16000 and model['target_sr'] != resample - else model['target_sr'] - ) - - print(f'npy: {times[0]}s, f0: {times[1]}s, infer: {times[2]}s') - return ((out_sr, output_audio), 'Success') - - -async def edge_tts_vc_func( - input_text, model_index, tts_speaker, pitch_adjust, f0_method, feat_ratio, - filter_radius, rms_mix_rate, resample_option -): - if input_text is None: - return (None, 'Please provide TTS text.') - - if tts_speaker is None: - return (None, 'Please select TTS speaker.') - - if model_index is None: - return (None, 'Please select a model.') - - speaker = tts_speakers_list[tts_speaker]['ShortName'] - (tts_np, tts_sr) = await util.call_edge_tts(speaker, input_text) - return vc_func( - (tts_sr, tts_np), - model_index, - pitch_adjust, - f0_method, - feat_ratio, - filter_radius, - rms_mix_rate, - resample_option - ) - - -def update_model_info(model_index): - if model_index is None: - return str( - '### Model info\n' - 'Please select a model from dropdown above.' - ) - - model = loaded_models[model_index] - model_icon = model['metadata'].get('icon', '') - - return str( - '### Model info\n' - '![model icon]({icon})' - '**{name}**\n\n' - 'Author: {author}\n\n' - 'Source: {source}\n\n' - '{note}' - ).format( - name=model['metadata'].get('name'), - author=model['metadata'].get('author', 'Anonymous'), - source=model['metadata'].get('source', 'Unknown'), - note=model['metadata'].get('note', ''), - icon=( - model_icon - if model_icon.startswith(('http://', 'https://')) - else '/file/model/%s/%s' % (model['name'], model_icon) - ) - ) - - -def _example_vc( - input_audio, model_index, pitch_adjust, f0_method, feat_ratio, - filter_radius, rms_mix_rate, resample_option -): - (audio, message) = vc_func( - input_audio, model_index, pitch_adjust, f0_method, feat_ratio, - filter_radius, rms_mix_rate, resample_option - ) - return ( - audio, - message, - update_model_info(model_index) - ) - - -async def _example_edge_tts( - input_text, model_index, tts_speaker, pitch_adjust, f0_method, feat_ratio, - filter_radius, rms_mix_rate, resample_option -): - (audio, message) = await edge_tts_vc_func( - input_text, model_index, tts_speaker, pitch_adjust, f0_method, - feat_ratio, filter_radius, rms_mix_rate, resample_option - ) - return ( - audio, - message, - update_model_info(model_index) - ) - - -with app: - gr.HTML("
      " - "

      🥳🎶🎡 - AI歌手,RVC歌声转换 + AI变声

      " - "
      ") - gr.Markdown("###
      🦄 - 能够自动提取视频中的声音,并去除背景音;Powered by [RVC-Project](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)
      ") - gr.Markdown("###
      更多精彩应用,敬请关注[滔滔AI](http://www.talktalkai.com);滔滔AI,为爱滔滔!💕
      ") - - with gr.Tab("🤗 - B站视频提取声音"): - with gr.Row(): - with gr.Column(): - ydl_url_input = gr.Textbox(label="B站视频网址(可直接填写相应的BV号)", value = "https://www.bilibili.com/video/BV...") - start = gr.Number(value=0, label="起始时间 (秒)") - end = gr.Number(value=15, label="结束时间 (秒)") - ydl_url_submit = gr.Button("提取声音文件吧", variant="primary") - as_audio_submit = gr.Button("去除背景音吧", variant="primary") - with gr.Column(): - ydl_audio_output = gr.Audio(label="Audio from Bilibili") - as_audio_input = ydl_audio_output - as_audio_vocals = gr.Audio(label="歌曲人声部分") - as_audio_no_vocals = gr.Audio(label="Music only", type="filepath", visible=False) - as_audio_message = gr.Textbox(label="Message", visible=False) - - ydl_url_submit.click(fn=youtube_downloader, inputs=[ydl_url_input, start, end], outputs=[ydl_audio_output]) - as_audio_submit.click(fn=audio_separated, inputs=[as_audio_input], outputs=[as_audio_vocals, as_audio_no_vocals, as_audio_message], show_progress=True, queue=True) - - with gr.Row(): - with gr.Column(): - with gr.Tab('🎶 - 歌声转换'): - input_audio = as_audio_vocals - vc_convert_btn = gr.Button('进行歌声转换吧!', variant='primary') - full_song = gr.Button("加入歌曲伴奏吧!", variant="primary") - new_song = gr.Audio(label="AI歌手+伴奏", type="filepath") - - with gr.Tab('🎙️ - 文本转语音'): - tts_input = gr.Textbox( - label='请填写您想要转换的文本(中英皆可)', - lines=3 - ) - tts_speaker = gr.Dropdown( - [ - '%s (%s)' % ( - s['FriendlyName'], - s['Gender'] - ) - for s in tts_speakers_list - ], - label='请选择一个相应语言的说话人', - type='index' - ) - - tts_convert_btn = gr.Button('进行AI变声吧', variant='primary') - - with gr.Tab("📺 - 音乐视频"): - with gr.Row(): - with gr.Column(): - inp1 = gr.Textbox(label="为视频配上精彩的文案吧(选填;英文)") - inp2 = new_song - inp3 = gr.Image(source='upload', type='filepath', label="上传一张背景图片吧") - btn = gr.Button("生成您的专属音乐视频吧", variant="primary") - - with gr.Column(): - out1 = gr.Video(label='您的专属音乐视频') - btn.click(fn=infer, inputs=[inp1, inp2, inp3], outputs=[out1]) - - pitch_adjust = gr.Slider( - label='Pitch', - minimum=-24, - maximum=24, - step=1, - value=0 - ) - f0_method = gr.Radio( - label='f0 methods', - choices=['pm', 'harvest'], - value='pm', - interactive=True - ) - - with gr.Accordion('更多设置', open=False): - feat_ratio = gr.Slider( - label='Feature ratio', - minimum=0, - maximum=1, - step=0.1, - value=0.6 - ) - filter_radius = gr.Slider( - label='Filter radius', - minimum=0, - maximum=7, - step=1, - value=3 - ) - rms_mix_rate = gr.Slider( - label='Volume envelope mix rate', - minimum=0, - maximum=1, - step=0.1, - value=1 - ) - resample_rate = gr.Dropdown( - [ - 'Disable resampling', - '16000', - '22050', - '44100', - '48000' - ], - label='Resample rate', - value='Disable resampling' - ) - - with gr.Column(): - # Model select - model_index = gr.Dropdown( - [ - '%s - %s' % ( - m['metadata'].get('source', 'Unknown'), - m['metadata'].get('name') - ) - for m in loaded_models - ], - label='请选择您的AI歌手(必选)', - type='index' - ) - - # Model info - with gr.Box(): - model_info = gr.Markdown( - '### AI歌手信息\n' - 'Please select a model from dropdown above.', - elem_id='model_info' - ) - - output_audio = gr.Audio(label='AI歌手(无伴奏)', type="filepath") - output_msg = gr.Textbox(label='Output message') - - multi_examples = multi_cfg.get('examples') - if ( - multi_examples and - multi_examples.get('vc') and multi_examples.get('tts_vc') - ): - with gr.Accordion('Sweet sweet examples', open=False): - with gr.Row(): - # VC Example - if multi_examples.get('vc'): - gr.Examples( - label='Audio conversion examples', - examples=multi_examples.get('vc'), - inputs=[ - input_audio, model_index, pitch_adjust, f0_method, - feat_ratio - ], - outputs=[output_audio, output_msg, model_info], - fn=_example_vc, - cache_examples=args.cache_examples, - run_on_click=args.cache_examples - ) - - # Edge TTS Example - if multi_examples.get('tts_vc'): - gr.Examples( - label='TTS conversion examples', - examples=multi_examples.get('tts_vc'), - inputs=[ - tts_input, model_index, tts_speaker, pitch_adjust, - f0_method, feat_ratio - ], - outputs=[output_audio, output_msg, model_info], - fn=_example_edge_tts, - cache_examples=args.cache_examples, - run_on_click=args.cache_examples - ) - - vc_convert_btn.click( - vc_func, - [ - input_audio, model_index, pitch_adjust, f0_method, feat_ratio, - filter_radius, rms_mix_rate, resample_rate - ], - [output_audio, output_msg], - api_name='audio_conversion' - ) - - tts_convert_btn.click( - edge_tts_vc_func, - [ - tts_input, model_index, tts_speaker, pitch_adjust, f0_method, - feat_ratio, filter_radius, rms_mix_rate, resample_rate - ], - [output_audio, output_msg], - api_name='tts_conversion' - ) - - full_song.click(fn=mix, inputs=[output_audio, as_audio_no_vocals], outputs=[new_song]) - - model_index.change( - update_model_info, - inputs=[model_index], - outputs=[model_info], - show_progress=False, - queue=False - ) - - gr.Markdown("###
      注意❗:请不要生成会对个人以及组织造成侵害的内容,此程序仅供科研、学习及个人娱乐使用。
      ") - gr.Markdown("###
      🧸 - 如何使用此程序:填写视频网址和视频起止时间后,依次点击“提取声音文件吧”、“去除背景音吧”、“进行歌声转换吧!”、“加入歌曲伴奏吧!”四个按键即可。
      ") - gr.HTML(''' - - ''') - -app.queue( - concurrency_count=1, - max_size=20, - api_open=args.api -).launch(show_error=True) \ No newline at end of file diff --git a/spaces/kevinwang676/VoiceChanger/src/face3d/data/flist_dataset.py b/spaces/kevinwang676/VoiceChanger/src/face3d/data/flist_dataset.py deleted file mode 100644 index c0b6945c80aa756074a5d3c02b9443b15ddcfc57..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VoiceChanger/src/face3d/data/flist_dataset.py +++ /dev/null @@ -1,125 +0,0 @@ -"""This script defines the custom dataset for Deep3DFaceRecon_pytorch -""" - -import os.path -from data.base_dataset import BaseDataset, get_transform, get_affine_mat, apply_img_affine, apply_lm_affine -from data.image_folder import make_dataset -from PIL import Image -import random -import util.util as util -import numpy as np -import json -import torch -from scipy.io import loadmat, savemat -import pickle -from util.preprocess import align_img, estimate_norm -from util.load_mats import load_lm3d - - -def default_flist_reader(flist): - """ - flist format: impath label\nimpath label\n ...(same to caffe's filelist) - """ - imlist = [] - with open(flist, 'r') as rf: - for line in rf.readlines(): - impath = line.strip() - imlist.append(impath) - - return imlist - -def jason_flist_reader(flist): - with open(flist, 'r') as fp: - info = json.load(fp) - return info - -def parse_label(label): - return torch.tensor(np.array(label).astype(np.float32)) - - -class FlistDataset(BaseDataset): - """ - It requires one directories to host training images '/path/to/data/train' - You can train the model with the dataset flag '--dataroot /path/to/data'. - """ - - def __init__(self, opt): - """Initialize this dataset class. - - Parameters: - opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions - """ - BaseDataset.__init__(self, opt) - - self.lm3d_std = load_lm3d(opt.bfm_folder) - - msk_names = default_flist_reader(opt.flist) - self.msk_paths = [os.path.join(opt.data_root, i) for i in msk_names] - - self.size = len(self.msk_paths) - self.opt = opt - - self.name = 'train' if opt.isTrain else 'val' - if '_' in opt.flist: - self.name += '_' + opt.flist.split(os.sep)[-1].split('_')[0] - - - def __getitem__(self, index): - """Return a data point and its metadata information. - - Parameters: - index (int) -- a random integer for data indexing - - Returns a dictionary that contains A, B, A_paths and B_paths - img (tensor) -- an image in the input domain - msk (tensor) -- its corresponding attention mask - lm (tensor) -- its corresponding 3d landmarks - im_paths (str) -- image paths - aug_flag (bool) -- a flag used to tell whether its raw or augmented - """ - msk_path = self.msk_paths[index % self.size] # make sure index is within then range - img_path = msk_path.replace('mask/', '') - lm_path = '.'.join(msk_path.replace('mask', 'landmarks').split('.')[:-1]) + '.txt' - - raw_img = Image.open(img_path).convert('RGB') - raw_msk = Image.open(msk_path).convert('RGB') - raw_lm = np.loadtxt(lm_path).astype(np.float32) - - _, img, lm, msk = align_img(raw_img, raw_lm, self.lm3d_std, raw_msk) - - aug_flag = self.opt.use_aug and self.opt.isTrain - if aug_flag: - img, lm, msk = self._augmentation(img, lm, self.opt, msk) - - _, H = img.size - M = estimate_norm(lm, H) - transform = get_transform() - img_tensor = transform(img) - msk_tensor = transform(msk)[:1, ...] - lm_tensor = parse_label(lm) - M_tensor = parse_label(M) - - - return {'imgs': img_tensor, - 'lms': lm_tensor, - 'msks': msk_tensor, - 'M': M_tensor, - 'im_paths': img_path, - 'aug_flag': aug_flag, - 'dataset': self.name} - - def _augmentation(self, img, lm, opt, msk=None): - affine, affine_inv, flip = get_affine_mat(opt, img.size) - img = apply_img_affine(img, affine_inv) - lm = apply_lm_affine(lm, affine, flip, img.size) - if msk is not None: - msk = apply_img_affine(msk, affine_inv, method=Image.BILINEAR) - return img, lm, msk - - - - - def __len__(self): - """Return the total number of images in the dataset. - """ - return self.size diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/models/backbones/cgnet.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/models/backbones/cgnet.py deleted file mode 100644 index f8bca442c8f18179f217e40c298fb5ef39df77c4..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/models/backbones/cgnet.py +++ /dev/null @@ -1,367 +0,0 @@ -import torch -import torch.nn as nn -import torch.utils.checkpoint as cp -from annotator.uniformer.mmcv.cnn import (ConvModule, build_conv_layer, build_norm_layer, - constant_init, kaiming_init) -from annotator.uniformer.mmcv.runner import load_checkpoint -from annotator.uniformer.mmcv.utils.parrots_wrapper import _BatchNorm - -from annotator.uniformer.mmseg.utils import get_root_logger -from ..builder import BACKBONES - - -class GlobalContextExtractor(nn.Module): - """Global Context Extractor for CGNet. - - This class is employed to refine the joint feature of both local feature - and surrounding context. - - Args: - channel (int): Number of input feature channels. - reduction (int): Reductions for global context extractor. Default: 16. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - """ - - def __init__(self, channel, reduction=16, with_cp=False): - super(GlobalContextExtractor, self).__init__() - self.channel = channel - self.reduction = reduction - assert reduction >= 1 and channel >= reduction - self.with_cp = with_cp - self.avg_pool = nn.AdaptiveAvgPool2d(1) - self.fc = nn.Sequential( - nn.Linear(channel, channel // reduction), nn.ReLU(inplace=True), - nn.Linear(channel // reduction, channel), nn.Sigmoid()) - - def forward(self, x): - - def _inner_forward(x): - num_batch, num_channel = x.size()[:2] - y = self.avg_pool(x).view(num_batch, num_channel) - y = self.fc(y).view(num_batch, num_channel, 1, 1) - return x * y - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out - - -class ContextGuidedBlock(nn.Module): - """Context Guided Block for CGNet. - - This class consists of four components: local feature extractor, - surrounding feature extractor, joint feature extractor and global - context extractor. - - Args: - in_channels (int): Number of input feature channels. - out_channels (int): Number of output feature channels. - dilation (int): Dilation rate for surrounding context extractor. - Default: 2. - reduction (int): Reduction for global context extractor. Default: 16. - skip_connect (bool): Add input to output or not. Default: True. - downsample (bool): Downsample the input to 1/2 or not. Default: False. - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN', requires_grad=True). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='PReLU'). - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - """ - - def __init__(self, - in_channels, - out_channels, - dilation=2, - reduction=16, - skip_connect=True, - downsample=False, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - act_cfg=dict(type='PReLU'), - with_cp=False): - super(ContextGuidedBlock, self).__init__() - self.with_cp = with_cp - self.downsample = downsample - - channels = out_channels if downsample else out_channels // 2 - if 'type' in act_cfg and act_cfg['type'] == 'PReLU': - act_cfg['num_parameters'] = channels - kernel_size = 3 if downsample else 1 - stride = 2 if downsample else 1 - padding = (kernel_size - 1) // 2 - - self.conv1x1 = ConvModule( - in_channels, - channels, - kernel_size, - stride, - padding, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - - self.f_loc = build_conv_layer( - conv_cfg, - channels, - channels, - kernel_size=3, - padding=1, - groups=channels, - bias=False) - self.f_sur = build_conv_layer( - conv_cfg, - channels, - channels, - kernel_size=3, - padding=dilation, - groups=channels, - dilation=dilation, - bias=False) - - self.bn = build_norm_layer(norm_cfg, 2 * channels)[1] - self.activate = nn.PReLU(2 * channels) - - if downsample: - self.bottleneck = build_conv_layer( - conv_cfg, - 2 * channels, - out_channels, - kernel_size=1, - bias=False) - - self.skip_connect = skip_connect and not downsample - self.f_glo = GlobalContextExtractor(out_channels, reduction, with_cp) - - def forward(self, x): - - def _inner_forward(x): - out = self.conv1x1(x) - loc = self.f_loc(out) - sur = self.f_sur(out) - - joi_feat = torch.cat([loc, sur], 1) # the joint feature - joi_feat = self.bn(joi_feat) - joi_feat = self.activate(joi_feat) - if self.downsample: - joi_feat = self.bottleneck(joi_feat) # channel = out_channels - # f_glo is employed to refine the joint feature - out = self.f_glo(joi_feat) - - if self.skip_connect: - return x + out - else: - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out - - -class InputInjection(nn.Module): - """Downsampling module for CGNet.""" - - def __init__(self, num_downsampling): - super(InputInjection, self).__init__() - self.pool = nn.ModuleList() - for i in range(num_downsampling): - self.pool.append(nn.AvgPool2d(3, stride=2, padding=1)) - - def forward(self, x): - for pool in self.pool: - x = pool(x) - return x - - -@BACKBONES.register_module() -class CGNet(nn.Module): - """CGNet backbone. - - A Light-weight Context Guided Network for Semantic Segmentation - arXiv: https://arxiv.org/abs/1811.08201 - - Args: - in_channels (int): Number of input image channels. Normally 3. - num_channels (tuple[int]): Numbers of feature channels at each stages. - Default: (32, 64, 128). - num_blocks (tuple[int]): Numbers of CG blocks at stage 1 and stage 2. - Default: (3, 21). - dilations (tuple[int]): Dilation rate for surrounding context - extractors at stage 1 and stage 2. Default: (2, 4). - reductions (tuple[int]): Reductions for global context extractors at - stage 1 and stage 2. Default: (8, 16). - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN', requires_grad=True). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='PReLU'). - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - """ - - def __init__(self, - in_channels=3, - num_channels=(32, 64, 128), - num_blocks=(3, 21), - dilations=(2, 4), - reductions=(8, 16), - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - act_cfg=dict(type='PReLU'), - norm_eval=False, - with_cp=False): - - super(CGNet, self).__init__() - self.in_channels = in_channels - self.num_channels = num_channels - assert isinstance(self.num_channels, tuple) and len( - self.num_channels) == 3 - self.num_blocks = num_blocks - assert isinstance(self.num_blocks, tuple) and len(self.num_blocks) == 2 - self.dilations = dilations - assert isinstance(self.dilations, tuple) and len(self.dilations) == 2 - self.reductions = reductions - assert isinstance(self.reductions, tuple) and len(self.reductions) == 2 - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - if 'type' in self.act_cfg and self.act_cfg['type'] == 'PReLU': - self.act_cfg['num_parameters'] = num_channels[0] - self.norm_eval = norm_eval - self.with_cp = with_cp - - cur_channels = in_channels - self.stem = nn.ModuleList() - for i in range(3): - self.stem.append( - ConvModule( - cur_channels, - num_channels[0], - 3, - 2 if i == 0 else 1, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - cur_channels = num_channels[0] - - self.inject_2x = InputInjection(1) # down-sample for Input, factor=2 - self.inject_4x = InputInjection(2) # down-sample for Input, factor=4 - - cur_channels += in_channels - self.norm_prelu_0 = nn.Sequential( - build_norm_layer(norm_cfg, cur_channels)[1], - nn.PReLU(cur_channels)) - - # stage 1 - self.level1 = nn.ModuleList() - for i in range(num_blocks[0]): - self.level1.append( - ContextGuidedBlock( - cur_channels if i == 0 else num_channels[1], - num_channels[1], - dilations[0], - reductions[0], - downsample=(i == 0), - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - with_cp=with_cp)) # CG block - - cur_channels = 2 * num_channels[1] + in_channels - self.norm_prelu_1 = nn.Sequential( - build_norm_layer(norm_cfg, cur_channels)[1], - nn.PReLU(cur_channels)) - - # stage 2 - self.level2 = nn.ModuleList() - for i in range(num_blocks[1]): - self.level2.append( - ContextGuidedBlock( - cur_channels if i == 0 else num_channels[2], - num_channels[2], - dilations[1], - reductions[1], - downsample=(i == 0), - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - with_cp=with_cp)) # CG block - - cur_channels = 2 * num_channels[2] - self.norm_prelu_2 = nn.Sequential( - build_norm_layer(norm_cfg, cur_channels)[1], - nn.PReLU(cur_channels)) - - def forward(self, x): - output = [] - - # stage 0 - inp_2x = self.inject_2x(x) - inp_4x = self.inject_4x(x) - for layer in self.stem: - x = layer(x) - x = self.norm_prelu_0(torch.cat([x, inp_2x], 1)) - output.append(x) - - # stage 1 - for i, layer in enumerate(self.level1): - x = layer(x) - if i == 0: - down1 = x - x = self.norm_prelu_1(torch.cat([x, down1, inp_4x], 1)) - output.append(x) - - # stage 2 - for i, layer in enumerate(self.level2): - x = layer(x) - if i == 0: - down2 = x - x = self.norm_prelu_2(torch.cat([down2, x], 1)) - output.append(x) - - return output - - def init_weights(self, pretrained=None): - """Initialize the weights in backbone. - - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - if isinstance(pretrained, str): - logger = get_root_logger() - load_checkpoint(self, pretrained, strict=False, logger=logger) - elif pretrained is None: - for m in self.modules(): - if isinstance(m, (nn.Conv2d, nn.Linear)): - kaiming_init(m) - elif isinstance(m, (_BatchNorm, nn.GroupNorm)): - constant_init(m, 1) - elif isinstance(m, nn.PReLU): - constant_init(m, 0) - else: - raise TypeError('pretrained must be a str or None') - - def train(self, mode=True): - """Convert the model into training mode will keeping the normalization - layer freezed.""" - super(CGNet, self).train(mode) - if mode and self.norm_eval: - for m in self.modules(): - # trick: eval have effect on BatchNorm only - if isinstance(m, _BatchNorm): - m.eval() diff --git a/spaces/kirch/Text2Video-Zero/annotator/util.py b/spaces/kirch/Text2Video-Zero/annotator/util.py deleted file mode 100644 index 90831643d19cc1b9b0940df3d4fd4d846ba74a05..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/util.py +++ /dev/null @@ -1,38 +0,0 @@ -import numpy as np -import cv2 -import os - - -annotator_ckpts_path = os.path.join(os.path.dirname(__file__), 'ckpts') - - -def HWC3(x): - assert x.dtype == np.uint8 - if x.ndim == 2: - x = x[:, :, None] - assert x.ndim == 3 - H, W, C = x.shape - assert C == 1 or C == 3 or C == 4 - if C == 3: - return x - if C == 1: - return np.concatenate([x, x, x], axis=2) - if C == 4: - color = x[:, :, 0:3].astype(np.float32) - alpha = x[:, :, 3:4].astype(np.float32) / 255.0 - y = color * alpha + 255.0 * (1.0 - alpha) - y = y.clip(0, 255).astype(np.uint8) - return y - - -def resize_image(input_image, resolution): - H, W, C = input_image.shape - H = float(H) - W = float(W) - k = float(resolution) / min(H, W) - H *= k - W *= k - H = int(np.round(H / 64.0)) * 64 - W = int(np.round(W / 64.0)) * 64 - img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA) - return img diff --git a/spaces/koajoel/PolyFormer/app.py b/spaces/koajoel/PolyFormer/app.py deleted file mode 100644 index 091757de4e901b08563c643b03cc10cd55a5a9f5..0000000000000000000000000000000000000000 --- a/spaces/koajoel/PolyFormer/app.py +++ /dev/null @@ -1,52 +0,0 @@ -# https://huggingface.co/koajoel/PolyFormer -import os -import torch -import numpy as np -from fairseq import utils,tasks -from utils.checkpoint_utils import load_model_ensemble_and_task -from models.polyformer import PolyFormerModel -import cv2 - -import torch -import numpy as np -from fairseq import utils, tasks -from fairseq import checkpoint_utils -from utils.eval_utils import eval_step -from tasks.refcoco import RefcocoTask -from models.polyformer import PolyFormerModel -from PIL import Image -from torchvision import transforms -import cv2 -import gradio as gr -import math -from io import BytesIO -import base64 -import re -from demo import visual_grounding - -title = "PolyFormer for Visual Grounding" - -description = """

      Project Page | Paper | Github Repo

      -

      Demo of PolyFormer for referring image segmentation and referring expression comprehension. Upload your own image or click any one of the examples, and write a description about a certain object. Then click \"Submit\" and wait for the results.

      -

      For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. -
      - -Duplicate Space -

      -""" - -examples = [['demo/vases.jpg', 'the blue vase on the left'], - ['demo/dog.jpg', 'the dog wearing glasses'], - ['demo/bear.jpeg', 'a bear astronaut in the space'], - ['demo/unicorn.jpeg', 'a unicorn doing computer vision research'], - ['demo/pig.jpeg', 'a pig robot preparing a delicious meal'], - ['demo/otta.png', 'a gentleman otter in a 19th century portrait'], - ['demo/pikachu.jpeg', 'a pikachu fine-dining with a view to the Eiffel Tower'], - ['demo/cabin.jpeg', 'a small cabin on top of a snowy mountain in the style of Disney art station'] - ] -io = gr.Interface(fn=visual_grounding, inputs=[gr.inputs.Image(type='pil'), "textbox"], - outputs=[gr.outputs.Image(label="output", type='numpy'), gr.outputs.Image(label="predicted mask", type='numpy')], - title=title, description=description, examples=examples, - allow_flagging=False, allow_screenshot=False, cache_examples=False) -io.launch() - diff --git a/spaces/koajoel/PolyFormer/criterions/__init__.py b/spaces/koajoel/PolyFormer/criterions/__init__.py deleted file mode 100644 index cba954ece281bebf63a60e82130615cfec0bfe6c..0000000000000000000000000000000000000000 --- a/spaces/koajoel/PolyFormer/criterions/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .label_smoothed_cross_entropy import AdjustLabelSmoothedCrossEntropyCriterion diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/latent_depth/README.md b/spaces/koajoel/PolyFormer/fairseq/examples/latent_depth/README.md deleted file mode 100644 index 7774c333053b95d15b180fdfc3ee3cd817790520..0000000000000000000000000000000000000000 --- a/spaces/koajoel/PolyFormer/fairseq/examples/latent_depth/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Deep Transformers with Latent Depth (Li et al., 2020) - -[https://arxiv.org/abs/2009.13102](https://arxiv.org/abs/2009.13102). - -## Introduction - -We present a probabilistic framework to automatically learn which layer(s) to use by learning the posterior distributions of layer selection. As an extension of this framework, we propose a novel method to train one shared Transformer network for multilingual machine translation with different layer selection posteriors for each language pair. - -## Training a multilingual model with latent depth - -Below is an example of training with latent depth in decoder for one-to-many (O2M) related languages. We use the same preprocessed (numberized and binarized) TED8 dataset as in [Balancing Training for Multilingual Neural Machine Translation (Wang et al., 2020)](https://github.com/cindyxinyiwang/multiDDS), which could be generated by [the script](https://github.com/cindyxinyiwang/multiDDS/blob/multiDDS/util_scripts/prepare_multilingual_data.sh) the author provided. -```bash -lang_pairs_str="eng-aze,eng-bel,eng-ces,eng-glg,eng-por,eng-rus,eng-slk,eng-tur" -databin_dir= - -fairseq-train ${databin_dir} \ - --user-dir examples/latent_depth/latent_depth_src \ - --lang-pairs "${lang_pairs_str}" \ - --arch multilingual_transformer_iwslt_de_en \ - --task multilingual_translation_latent_depth \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.1 \ - --share-encoders \ - --share-decoders \ - --decoder-langtok \ - --share-decoder-input-output-embed \ - --dropout 0.3 --attention-dropout 0.3 \ - --optimizer adam --adam-eps 1e-06 --adam-betas '(0.9, 0.98)' \ - --lr-scheduler inverse_sqrt --stop-min-lr 1e-9 --warmup-init-lr 1e-7 --warmup-updates 8000 \ - --max-tokens 4096 --update-freq 1 \ - --lr 0.0015 \ - --clip-norm 1.0 \ - --seed 2 \ - --ddp-backend=legacy_ddp \ - --encoder-layers 12 \ - --decoder-layers 24 \ - --decoder-latent-layer \ - --sparsity-weight 0.1 \ - --anneal-updates 5000 \ - --soft-update 500 \ - --target-layers 12 \ - --share-weight 0.1 -``` -## Inference command - -```bash -lang_pairs_str="eng-aze,eng-bel,eng-ces,eng-glg,eng-por,eng-rus,eng-slk,eng-tur" -databin_dir= -model_path= -src_lang= -tgt_lang= -gen_data= - -fairseq-generate ${databin_dir} \ - --path ${model_path} \ - --task multilingual_translation_latent_depth \ - --decoder-latent-layer \ - --lang-pairs "${lang_pairs_str}" \ - -s ${src_lang} -t ${tgt_lang} \ - --gen-subset $gen_data \ - --scoring sacrebleu \ - --remove-bpe 'sentencepiece' \ - --lenpen 1.0 \ - --beam 5 \ - --decoder-langtok \ - --max-tokens 4096 -``` - - -## Citation -```bibtex -@article{li2020deep, - title={Deep Transformers with Latent Depth}, - author={Li, Xian and Stickland, Asa Cooper and Tang, Yuqing and Kong, Xiang}, - journal={arXiv preprint arXiv:2009.13102}, - year={2020} -} -``` diff --git a/spaces/kukuhtw/AutoGPT/autogpt/processing/__init__.py b/spaces/kukuhtw/AutoGPT/autogpt/processing/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-f724f960.css b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-f724f960.css deleted file mode 100644 index d643e582c28b4d2288e07fad934bb8ab58811ea8..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-f724f960.css +++ /dev/null @@ -1 +0,0 @@ -.container.svelte-wgo10s.svelte-wgo10s{display:flex;position:relative;flex-direction:column;justify-content:center;align-items:center;width:var(--size-full);height:var(--size-full)}.image-container.svelte-wgo10s.svelte-wgo10s{position:relative;top:0;left:0;flex-grow:1;width:100%;overflow:hidden}.fit-height.svelte-wgo10s.svelte-wgo10s{position:absolute;top:0;left:0;width:100%;height:100%;object-fit:contain}.mask.svelte-wgo10s.svelte-wgo10s{opacity:.85;transition:all .2s ease-in-out}.image-container.svelte-wgo10s:hover .mask.svelte-wgo10s{opacity:.3}.mask.active.svelte-wgo10s.svelte-wgo10s{opacity:1}.mask.inactive.svelte-wgo10s.svelte-wgo10s{opacity:0}.legend.svelte-wgo10s.svelte-wgo10s{display:flex;flex-direction:row;flex-wrap:wrap;align-content:center;justify-content:center;align-items:center;gap:var(--spacing-sm);padding:var(--spacing-sm)}.legend-item.svelte-wgo10s.svelte-wgo10s{display:flex;flex-direction:row;align-items:center;cursor:pointer;border-radius:var(--radius-sm);padding:var(--spacing-sm)} diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/isympy.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/isympy.py deleted file mode 100644 index 50e9bc78d08904b8c177105ee90d984ea4b01d20..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/isympy.py +++ /dev/null @@ -1,342 +0,0 @@ -""" -Python shell for SymPy. - -This is just a normal Python shell (IPython shell if you have the -IPython package installed), that executes the following commands for -the user: - - >>> from __future__ import division - >>> from sympy import * - >>> x, y, z, t = symbols('x y z t') - >>> k, m, n = symbols('k m n', integer=True) - >>> f, g, h = symbols('f g h', cls=Function) - >>> init_printing() - -So starting 'isympy' is equivalent to starting Python (or IPython) and -executing the above commands by hand. It is intended for easy and quick -experimentation with SymPy. isympy is a good way to use SymPy as an -interactive calculator. If you have IPython and Matplotlib installed, then -interactive plotting is enabled by default. - -COMMAND LINE OPTIONS --------------------- - --c CONSOLE, --console=CONSOLE - - Use the specified shell (Python or IPython) shell as the console - backend instead of the default one (IPython if present, Python - otherwise), e.g.: - - $isympy -c python - - CONSOLE must be one of 'ipython' or 'python' - --p PRETTY, --pretty PRETTY - - Setup pretty-printing in SymPy. When pretty-printing is enabled, - expressions can be printed with Unicode or ASCII. The default is - to use pretty-printing (with Unicode if the terminal supports it). - When this option is 'no', expressions will not be pretty-printed - and ASCII will be used: - - $isympy -p no - - PRETTY must be one of 'unicode', 'ascii', or 'no' - --t TYPES, --types=TYPES - - Setup the ground types for the polys. By default, gmpy ground types - are used if gmpy2 or gmpy is installed, otherwise it falls back to python - ground types, which are a little bit slower. You can manually - choose python ground types even if gmpy is installed (e.g., for - testing purposes): - - $isympy -t python - - TYPES must be one of 'gmpy', 'gmpy1' or 'python' - - Note that the ground type gmpy1 is primarily intended for testing; it - forces the use of gmpy version 1 even if gmpy2 is available. - - This is the same as setting the environment variable - SYMPY_GROUND_TYPES to the given ground type (e.g., - SYMPY_GROUND_TYPES='gmpy') - - The ground types can be determined interactively from the variable - sympy.polys.domains.GROUND_TYPES. - --o ORDER, --order ORDER - - Setup the ordering of terms for printing. The default is lex, which - orders terms lexicographically (e.g., x**2 + x + 1). You can choose - other orderings, such as rev-lex, which will use reverse - lexicographic ordering (e.g., 1 + x + x**2): - - $isympy -o rev-lex - - ORDER must be one of 'lex', 'rev-lex', 'grlex', 'rev-grlex', - 'grevlex', 'rev-grevlex', 'old', or 'none'. - - Note that for very large expressions, ORDER='none' may speed up - printing considerably but the terms will have no canonical order. - --q, --quiet - - Print only Python's and SymPy's versions to stdout at startup. - --d, --doctest - - Use the same format that should be used for doctests. This is - equivalent to -c python -p no. - --C, --no-cache - - Disable the caching mechanism. Disabling the cache may slow certain - operations down considerably. This is useful for testing the cache, - or for benchmarking, as the cache can result in deceptive timings. - - This is equivalent to setting the environment variable - SYMPY_USE_CACHE to 'no'. - --a, --auto-symbols (requires at least IPython 0.11) - - Automatically create missing symbols. Normally, typing a name of a - Symbol that has not been instantiated first would raise NameError, - but with this option enabled, any undefined name will be - automatically created as a Symbol. - - Note that this is intended only for interactive, calculator style - usage. In a script that uses SymPy, Symbols should be instantiated - at the top, so that it's clear what they are. - - This will not override any names that are already defined, which - includes the single character letters represented by the mnemonic - QCOSINE (see the "Gotchas and Pitfalls" document in the - documentation). You can delete existing names by executing "del - name". If a name is defined, typing "'name' in dir()" will return True. - - The Symbols that are created using this have default assumptions. - If you want to place assumptions on symbols, you should create them - using symbols() or var(). - - Finally, this only works in the top level namespace. So, for - example, if you define a function in isympy with an undefined - Symbol, it will not work. - - See also the -i and -I options. - --i, --int-to-Integer (requires at least IPython 0.11) - - Automatically wrap int literals with Integer. This makes it so that - things like 1/2 will come out as Rational(1, 2), rather than 0.5. This - works by preprocessing the source and wrapping all int literals with - Integer. Note that this will not change the behavior of int literals - assigned to variables, and it also won't change the behavior of functions - that return int literals. - - If you want an int, you can wrap the literal in int(), e.g. int(3)/int(2) - gives 1.5 (with division imported from __future__). - --I, --interactive (requires at least IPython 0.11) - - This is equivalent to --auto-symbols --int-to-Integer. Future options - designed for ease of interactive use may be added to this. - --D, --debug - - Enable debugging output. This is the same as setting the - environment variable SYMPY_DEBUG to 'True'. The debug status is set - in the variable SYMPY_DEBUG within isympy. - --- IPython options - - Additionally you can pass command line options directly to the IPython - interpreter (the standard Python shell is not supported). However you - need to add the '--' separator between two types of options, e.g the - startup banner option and the colors option. You need to enter the - options as required by the version of IPython that you are using, too: - - in IPython 0.11, - - $isympy -q -- --colors=NoColor - - or older versions of IPython, - - $isympy -q -- -colors NoColor - -See also isympy --help. -""" - -import os -import sys - -# DO NOT IMPORT SYMPY HERE! Or the setting of the sympy environment variables -# by the command line will break. - -def main() -> None: - from argparse import ArgumentParser, RawDescriptionHelpFormatter - - VERSION = None - if '--version' in sys.argv: - # We cannot import sympy before this is run, because flags like -C and - # -t set environment variables that must be set before SymPy is - # imported. The only thing we need to import it for is to get the - # version, which only matters with the --version flag. - import sympy - VERSION = sympy.__version__ - - usage = 'isympy [options] -- [ipython options]' - parser = ArgumentParser( - usage=usage, - description=__doc__, - formatter_class=RawDescriptionHelpFormatter, - ) - - parser.add_argument('--version', action='version', version=VERSION) - - parser.add_argument( - '-c', '--console', - dest='console', - action='store', - default=None, - choices=['ipython', 'python'], - metavar='CONSOLE', - help='select type of interactive session: ipython | python; defaults ' - 'to ipython if IPython is installed, otherwise python') - - parser.add_argument( - '-p', '--pretty', - dest='pretty', - action='store', - default=None, - metavar='PRETTY', - choices=['unicode', 'ascii', 'no'], - help='setup pretty printing: unicode | ascii | no; defaults to ' - 'unicode printing if the terminal supports it, otherwise ascii') - - parser.add_argument( - '-t', '--types', - dest='types', - action='store', - default=None, - metavar='TYPES', - choices=['gmpy', 'gmpy1', 'python'], - help='setup ground types: gmpy | gmpy1 | python; defaults to gmpy if gmpy2 ' - 'or gmpy is installed, otherwise python') - - parser.add_argument( - '-o', '--order', - dest='order', - action='store', - default=None, - metavar='ORDER', - choices=['lex', 'grlex', 'grevlex', 'rev-lex', 'rev-grlex', 'rev-grevlex', 'old', 'none'], - help='setup ordering of terms: [rev-]lex | [rev-]grlex | [rev-]grevlex | old | none; defaults to lex') - - parser.add_argument( - '-q', '--quiet', - dest='quiet', - action='store_true', - default=False, - help='print only version information at startup') - - parser.add_argument( - '-d', '--doctest', - dest='doctest', - action='store_true', - default=False, - help='use the doctest format for output (you can just copy and paste it)') - - parser.add_argument( - '-C', '--no-cache', - dest='cache', - action='store_false', - default=True, - help='disable caching mechanism') - - parser.add_argument( - '-a', '--auto-symbols', - dest='auto_symbols', - action='store_true', - default=False, - help='automatically construct missing symbols') - - parser.add_argument( - '-i', '--int-to-Integer', - dest='auto_int_to_Integer', - action='store_true', - default=False, - help="automatically wrap int literals with Integer") - - parser.add_argument( - '-I', '--interactive', - dest='interactive', - action='store_true', - default=False, - help="equivalent to -a -i") - - parser.add_argument( - '-D', '--debug', - dest='debug', - action='store_true', - default=False, - help='enable debugging output') - - (options, ipy_args) = parser.parse_known_args() - if '--' in ipy_args: - ipy_args.remove('--') - - if not options.cache: - os.environ['SYMPY_USE_CACHE'] = 'no' - - if options.types: - os.environ['SYMPY_GROUND_TYPES'] = options.types - - if options.debug: - os.environ['SYMPY_DEBUG'] = str(options.debug) - - if options.doctest: - options.pretty = 'no' - options.console = 'python' - - session = options.console - - if session is not None: - ipython = session == 'ipython' - else: - try: - import IPython - ipython = True - except ImportError: - if not options.quiet: - from sympy.interactive.session import no_ipython - print(no_ipython) - ipython = False - - args = { - 'pretty_print': True, - 'use_unicode': None, - 'use_latex': None, - 'order': None, - 'argv': ipy_args, - } - - if options.pretty == 'unicode': - args['use_unicode'] = True - elif options.pretty == 'ascii': - args['use_unicode'] = False - elif options.pretty == 'no': - args['pretty_print'] = False - - if options.order is not None: - args['order'] = options.order - - args['quiet'] = options.quiet - args['auto_symbols'] = options.auto_symbols or options.interactive - args['auto_int_to_Integer'] = options.auto_int_to_Integer or options.interactive - - from sympy.interactive import init_session - init_session(ipython, **args) - -if __name__ == "__main__": - main() diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/markdown_it/tree.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/markdown_it/tree.py deleted file mode 100644 index 09476b226c671e8644fb40923a2c6f40a1009124..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/markdown_it/tree.py +++ /dev/null @@ -1,330 +0,0 @@ -"""A tree representation of a linear markdown-it token stream. - -This module is not part of upstream JavaScript markdown-it. -""" -from __future__ import annotations - -from collections.abc import Generator, Sequence -import textwrap -from typing import Any, NamedTuple, TypeVar, overload - -from .token import Token -from .utils import _removesuffix - - -class _NesterTokens(NamedTuple): - opening: Token - closing: Token - - -_NodeType = TypeVar("_NodeType", bound="SyntaxTreeNode") - - -class SyntaxTreeNode: - """A Markdown syntax tree node. - - A class that can be used to construct a tree representation of a linear - `markdown-it-py` token stream. - - Each node in the tree represents either: - - root of the Markdown document - - a single unnested `Token` - - a `Token` "_open" and "_close" token pair, and the tokens nested in - between - """ - - def __init__( - self, tokens: Sequence[Token] = (), *, create_root: bool = True - ) -> None: - """Initialize a `SyntaxTreeNode` from a token stream. - - If `create_root` is True, create a root node for the document. - """ - # Only nodes representing an unnested token have self.token - self.token: Token | None = None - - # Only containers have nester tokens - self.nester_tokens: _NesterTokens | None = None - - # Root node does not have self.parent - self._parent: Any = None - - # Empty list unless a non-empty container, or unnested token that has - # children (i.e. inline or img) - self._children: list = [] - - if create_root: - self._set_children_from_tokens(tokens) - return - - if not tokens: - raise ValueError( - "Can only create root from empty token sequence." - " Set `create_root=True`." - ) - elif len(tokens) == 1: - inline_token = tokens[0] - if inline_token.nesting: - raise ValueError( - "Unequal nesting level at the start and end of token stream." - ) - self.token = inline_token - if inline_token.children: - self._set_children_from_tokens(inline_token.children) - else: - self.nester_tokens = _NesterTokens(tokens[0], tokens[-1]) - self._set_children_from_tokens(tokens[1:-1]) - - def __repr__(self) -> str: - return f"{type(self).__name__}({self.type})" - - @overload - def __getitem__(self: _NodeType, item: int) -> _NodeType: - ... - - @overload - def __getitem__(self: _NodeType, item: slice) -> list[_NodeType]: - ... - - def __getitem__(self: _NodeType, item: int | slice) -> _NodeType | list[_NodeType]: - return self.children[item] - - def to_tokens(self: _NodeType) -> list[Token]: - """Recover the linear token stream.""" - - def recursive_collect_tokens(node: _NodeType, token_list: list[Token]) -> None: - if node.type == "root": - for child in node.children: - recursive_collect_tokens(child, token_list) - elif node.token: - token_list.append(node.token) - else: - assert node.nester_tokens - token_list.append(node.nester_tokens.opening) - for child in node.children: - recursive_collect_tokens(child, token_list) - token_list.append(node.nester_tokens.closing) - - tokens: list[Token] = [] - recursive_collect_tokens(self, tokens) - return tokens - - @property - def children(self: _NodeType) -> list[_NodeType]: - return self._children - - @children.setter - def children(self: _NodeType, value: list[_NodeType]) -> None: - self._children = value - - @property - def parent(self: _NodeType) -> _NodeType | None: - return self._parent - - @parent.setter - def parent(self: _NodeType, value: _NodeType | None) -> None: - self._parent = value - - @property - def is_root(self) -> bool: - """Is the node a special root node?""" - return not (self.token or self.nester_tokens) - - @property - def is_nested(self) -> bool: - """Is this node nested?. - - Returns `True` if the node represents a `Token` pair and tokens in the - sequence between them, where `Token.nesting` of the first `Token` in - the pair is 1 and nesting of the other `Token` is -1. - """ - return bool(self.nester_tokens) - - @property - def siblings(self: _NodeType) -> Sequence[_NodeType]: - """Get siblings of the node. - - Gets the whole group of siblings, including self. - """ - if not self.parent: - return [self] - return self.parent.children - - @property - def type(self) -> str: - """Get a string type of the represented syntax. - - - "root" for root nodes - - `Token.type` if the node represents an unnested token - - `Token.type` of the opening token, with "_open" suffix stripped, if - the node represents a nester token pair - """ - if self.is_root: - return "root" - if self.token: - return self.token.type - assert self.nester_tokens - return _removesuffix(self.nester_tokens.opening.type, "_open") - - @property - def next_sibling(self: _NodeType) -> _NodeType | None: - """Get the next node in the sequence of siblings. - - Returns `None` if this is the last sibling. - """ - self_index = self.siblings.index(self) - if self_index + 1 < len(self.siblings): - return self.siblings[self_index + 1] - return None - - @property - def previous_sibling(self: _NodeType) -> _NodeType | None: - """Get the previous node in the sequence of siblings. - - Returns `None` if this is the first sibling. - """ - self_index = self.siblings.index(self) - if self_index - 1 >= 0: - return self.siblings[self_index - 1] - return None - - def _add_child( - self, - tokens: Sequence[Token], - ) -> None: - """Make a child node for `self`.""" - child = type(self)(tokens, create_root=False) - child.parent = self - self.children.append(child) - - def _set_children_from_tokens(self, tokens: Sequence[Token]) -> None: - """Convert the token stream to a tree structure and set the resulting - nodes as children of `self`.""" - reversed_tokens = list(reversed(tokens)) - while reversed_tokens: - token = reversed_tokens.pop() - - if not token.nesting: - self._add_child([token]) - continue - if token.nesting != 1: - raise ValueError("Invalid token nesting") - - nested_tokens = [token] - nesting = 1 - while reversed_tokens and nesting: - token = reversed_tokens.pop() - nested_tokens.append(token) - nesting += token.nesting - if nesting: - raise ValueError(f"unclosed tokens starting {nested_tokens[0]}") - - self._add_child(nested_tokens) - - def pretty( - self, *, indent: int = 2, show_text: bool = False, _current: int = 0 - ) -> str: - """Create an XML style string of the tree.""" - prefix = " " * _current - text = prefix + f"<{self.type}" - if not self.is_root and self.attrs: - text += " " + " ".join(f"{k}={v!r}" for k, v in self.attrs.items()) - text += ">" - if show_text and not self.is_root and self.type == "text" and self.content: - text += "\n" + textwrap.indent(self.content, prefix + " " * indent) - for child in self.children: - text += "\n" + child.pretty( - indent=indent, show_text=show_text, _current=_current + indent - ) - return text - - def walk( - self: _NodeType, *, include_self: bool = True - ) -> Generator[_NodeType, None, None]: - """Recursively yield all descendant nodes in the tree starting at self. - - The order mimics the order of the underlying linear token - stream (i.e. depth first). - """ - if include_self: - yield self - for child in self.children: - yield from child.walk(include_self=True) - - # NOTE: - # The values of the properties defined below directly map to properties - # of the underlying `Token`s. A root node does not translate to a `Token` - # object, so calling these property getters on a root node will raise an - # `AttributeError`. - # - # There is no mapping for `Token.nesting` because the `is_nested` property - # provides that data, and can be called on any node type, including root. - - def _attribute_token(self) -> Token: - """Return the `Token` that is used as the data source for the - properties defined below.""" - if self.token: - return self.token - if self.nester_tokens: - return self.nester_tokens.opening - raise AttributeError("Root node does not have the accessed attribute") - - @property - def tag(self) -> str: - """html tag name, e.g. \"p\" """ - return self._attribute_token().tag - - @property - def attrs(self) -> dict[str, str | int | float]: - """Html attributes.""" - return self._attribute_token().attrs - - def attrGet(self, name: str) -> None | str | int | float: - """Get the value of attribute `name`, or null if it does not exist.""" - return self._attribute_token().attrGet(name) - - @property - def map(self) -> tuple[int, int] | None: - """Source map info. Format: `tuple[ line_begin, line_end ]`""" - map_ = self._attribute_token().map - if map_: - # Type ignore because `Token`s attribute types are not perfect - return tuple(map_) # type: ignore - return None - - @property - def level(self) -> int: - """nesting level, the same as `state.level`""" - return self._attribute_token().level - - @property - def content(self) -> str: - """In a case of self-closing tag (code, html, fence, etc.), it - has contents of this tag.""" - return self._attribute_token().content - - @property - def markup(self) -> str: - """'*' or '_' for emphasis, fence string for fence, etc.""" - return self._attribute_token().markup - - @property - def info(self) -> str: - """fence infostring""" - return self._attribute_token().info - - @property - def meta(self) -> dict: - """A place for plugins to store an arbitrary data.""" - return self._attribute_token().meta - - @property - def block(self) -> bool: - """True for block-level tokens, false for inline tokens.""" - return self._attribute_token().block - - @property - def hidden(self) -> bool: - """If it's true, ignore this element when rendering. - Used for tight lists to hide paragraphs.""" - return self._attribute_token().hidden diff --git a/spaces/langvision/ChatWeb/_next/static/6IdLO6aTsNNii8PXpVk8p/_ssgManifest.js b/spaces/langvision/ChatWeb/_next/static/6IdLO6aTsNNii8PXpVk8p/_ssgManifest.js deleted file mode 100644 index 5b3ff592fd46c8736892a12864fdf3fed8775202..0000000000000000000000000000000000000000 --- a/spaces/langvision/ChatWeb/_next/static/6IdLO6aTsNNii8PXpVk8p/_ssgManifest.js +++ /dev/null @@ -1 +0,0 @@ -self.__SSG_MANIFEST=new Set([]);self.__SSG_MANIFEST_CB&&self.__SSG_MANIFEST_CB() \ No newline at end of file diff --git a/spaces/limcheekin/Mistral-7B-Instruct-v0.1-GGUF/start_server.sh b/spaces/limcheekin/Mistral-7B-Instruct-v0.1-GGUF/start_server.sh deleted file mode 100644 index 9ec315638ea647912c58381a9409f1bea74d0180..0000000000000000000000000000000000000000 --- a/spaces/limcheekin/Mistral-7B-Instruct-v0.1-GGUF/start_server.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh - -# For mlock support -ulimit -l unlimited - -python3 -B main.py diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Donato Karizi Saptac.pdf.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Donato Karizi Saptac.pdf.md deleted file mode 100644 index a88f4c23f4aa531a918323e280a4cdc0c32a6f59..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Donato Karizi Saptac.pdf.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Donato Karizi Saptac.pdf


      Downloadhttps://bytlly.com/2uGwGK



      -
      -More ideas from Andjela. šaptač - donato karizi Free Pdf Books, Romance Books, Ebook Pdf, Books To. saptak book. More information. Free books in PDF format. Romantic books. Ebook Pdf. Books To Read. Saptak book. Pdruchnik in pdf format. To view the book in pdf format, you need to install Adobe Acrobat Reader or Foxit Reader. Assistant in PDF format. Included: pdf - file; djvu - file; djvu with an envelope; djvu with envelope for Kindle; fb2 with envelope; fb2 with envelope for Kindle. Pdruchnik in pdf format. Pdruchnik in pdf format. Pdruchnik in pdf format. Pdruchnik in pdf format. Pdruchnik in pdf format. A handyman in pdf format. 8a78ff9644
      -
      -
      -

      diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Flasheff 2 0 Free With EXCLUSIVE Crack.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Flasheff 2 0 Free With EXCLUSIVE Crack.md deleted file mode 100644 index 833975d4d5c266a13b0659dc21ba0de53a94b717..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Flasheff 2 0 Free With EXCLUSIVE Crack.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Flasheff 2 0 Free With Crack


      Download File >>> https://bytlly.com/2uGx9a



      - -The POWDER DIFFRACTION FILE (PDF-2, PDF-4) databases are property ... May 29 2020 0. icdd, icdd band, icdd mac, icddrb, icd data, icdd uta, ... vReveal 3.3.1.13029 Premium (Crack) setup free ... Flasheff 2 Premium Plus 1fdad05405
      -
      -
      -

      diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Game-Of-Thrones-Season-3-Bluray-720p-X264-BEST.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Game-Of-Thrones-Season-3-Bluray-720p-X264-BEST.md deleted file mode 100644 index d8580f354567fd17f40b4984d2744a1282452e95..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Game-Of-Thrones-Season-3-Bluray-720p-X264-BEST.md +++ /dev/null @@ -1,72 +0,0 @@ -## Game Of Thrones Season 3 Bluray 720p X264 - - - - - - ![Game Of Thrones Season 3 Bluray 720p X264 \[BEST\]](https://static2.opensubtitles.org/gfx/thumbs/4/8/7/8/2178784.jpg) - - - - - -**LINK >> [https://fienislile.blogspot.com/?download=2txySL](https://fienislile.blogspot.com/?download=2txySL)** - - - - - - - - - - - - Here is the title and article I generated for you: - -# Game Of Thrones Season 3 Bluray 720p X264: A Review - - - -If you are a fan of the epic fantasy series Game Of Thrones, you might want to get your hands on the third season on Blu-ray. This season features 10 episodes that cover the events of the first half of A Storm of Swords, the third book in George R.R. Martin's A Song of Ice and Fire saga. The season follows the aftermath of the War of the Five Kings, as new alliances are formed and old enemies are confronted. The season also introduces new characters and locations, such as the wildlings beyond the Wall, the Brotherhood Without Banners, and the slave city of Astapor. - - - -The Blu-ray release of Game Of Thrones Season 3 offers a stunning visual and audio quality that enhances the immersive experience of watching the show. The 720p resolution and x264 codec deliver a sharp and detailed picture that captures every nuance of the rich production design and cinematography. The TrueHD Atmos 7.1 sound track provides a dynamic and immersive sound field that envelops you in the action and drama. The Blu-ray also comes with a plethora of bonus features, such as audio commentaries, deleted scenes, behind-the-scenes documentaries, and interactive guides. - - - -Game Of Thrones Season 3 Bluray 720p X264 is a must-have for any fan of the show or the books. It offers a thrilling and captivating journey into a world of fantasy, politics, and intrigue. Whether you want to relive your favorite moments or catch up on what you missed, this Blu-ray release will not disappoint you. - -Here is the continuation of the article: - -Game Of Thrones Season 3 Bluray 720p X264 not only offers a high-quality viewing experience, but also a thrilling and engaging story that will keep you hooked from start to finish. Here is a brief summary of each episode: - - - -- Episode 1: Valar Dohaeris - Jon Snow (Kit Harington) meets Mance Rayder (Ciarán Hinds), the King Beyond the Wall, and tries to infiltrate the wildling army. Tyrion Lannister (Peter Dinklage) demands his reward from his father Tywin (Charles Dance) for saving King's Landing. Daenerys Targaryen (Emilia Clarke) arrives in Astapor, where she sees the Unsullied, an army of slave soldiers. - -- Episode 2: Dark Wings, Dark Words - Bran Stark (Isaac Hempstead Wright) meets Jojen (Thomas Brodie-Sangster) and Meera Reed (Ellie Kendrick), who have special abilities like him. Arya Stark (Maisie Williams) encounters the Brotherhood Without Banners, a group of outlaws led by Beric Dondarrion (Richard Dormer). Jaime Lannister (Nikolaj Coster-Waldau) and Brienne of Tarth (Gwendoline Christie) are captured by Roose Bolton's men. - -- Episode 3: Walk of Punishment - Robb Stark (Richard Madden) attends his grandfather's funeral at Riverrun and tries to mend his relationship with Walder Frey (David Bradley). Tyrion becomes the new Master of Coin and discovers the crown's debt. Daenerys agrees to buy all the Unsullied in exchange for one of her dragons. - -- Episode 4: And Now His Watch Is Ended - The Night's Watch mutinies against Craster (Robert Pugh) and Lord Commander Mormont (James Cosmo). Jaime loses his right hand to Locke (Noah Taylor), one of Bolton's men. Daenerys reveals her plan to free the Unsullied and sack Astapor. - -- Episode 5: Kissed by Fire - Jon and Ygritte (Rose Leslie) consummate their relationship in a cave. The Hound (Rory McCann) fights Beric in a trial by combat and wins. Stannis Baratheon (Stephen Dillane) confesses his infidelity to his wife Selyse (Tara Fitzgerald). Tywin arranges marriages for Tyrion and Cersei Lannister (Lena Headey). - -- Episode 6: The Climb - Jon and the wildlings climb the Wall. Theon Greyjoy (Alfie Allen) is tortured by Ramsay Snow (Iwan Rheon), Bolton's bastard son. Melisandre (Carice van Houten) takes Gendry (Joe Dempsie), Robert Baratheon's bastard son, from the Brotherhood. Littlefinger (Aidan Gillen) betrays Ros (Esmé Bianco) to Joffrey Baratheon (Jack Gleeson). Tywin and Olenna Tyrell (Diana Rigg) negotiate their grandchildren's marriages. - -- Episode 7: The Bear and the Maiden Fair - Robb learns that his wife Talisa (Oona Chaplin) is pregnant. Jaime rescues Brienne from a bear pit and decides to return to King's Landing with her. Daenerys meets with the representatives of Yunkai, a slave city, and demands their surrender. Sansa Stark (Sophie Turner) is distraught over her impending marriage to Tyrion. - -- Episode 8: Second Sons - Tyrion and Sansa wed in an awkward ceremony. Daenerys gains the loyalty of Daario Naharis (Ed Skrein), the leader of the Second Sons, a mercenary company hired by Yunkai. Samwell Tarly (John Bradley) kills a White Walker with a dragonglass dagger while protecting Gilly (Hannah Murray) and her baby. - -- Episode 9: The Rains of Castamere - Robb attends the wedding of his uncle Edmure Tully (Tobias Menzies) to Roslin Frey, but is betrayed by Walder Frey and Roose Bolton in an event known as the Red Wedding, where he, his wife, his mother Catelyn Stark (Michelle Fairley), and most of his army are slaughtered. Arya arrives at the Twins with Sandor Clegane, but is dfd1c89656 - - - - - - - - - diff --git a/spaces/ljjggr/bingo/src/components/chat-image.tsx b/spaces/ljjggr/bingo/src/components/chat-image.tsx deleted file mode 100644 index 05ecc9771eada27a0f2d160bb01cba170d37bb09..0000000000000000000000000000000000000000 --- a/spaces/ljjggr/bingo/src/components/chat-image.tsx +++ /dev/null @@ -1,170 +0,0 @@ -import { - useEffect, - useState, - useCallback, - ChangeEvent, - ClipboardEvent, - MouseEventHandler, - FormEvent, - useRef -} from "react" -import Image from 'next/image' -import PasteIcon from '@/assets/images/paste.svg' -import UploadIcon from '@/assets/images/upload.svg' -import CameraIcon from '@/assets/images/camera.svg' -import { useBing } from '@/lib/hooks/use-bing' -import { cn } from '@/lib/utils' - -interface ChatImageProps extends Pick, 'uploadImage'> {} - -const preventDefault: MouseEventHandler = (event) => { - event.nativeEvent.stopImmediatePropagation() -} - -const toBase64 = (file: File): Promise => new Promise((resolve, reject) => { - const reader = new FileReader() - reader.readAsDataURL(file) - reader.onload = () => resolve(reader.result as string) - reader.onerror = reject -}) - -export function ChatImage({ children, uploadImage }: React.PropsWithChildren) { - const videoRef = useRef(null) - const canvasRef = useRef(null) - const mediaStream = useRef() - const [panel, setPanel] = useState('none') - - const upload = useCallback((url: string) => { - if (url) { - uploadImage(url) - } - setPanel('none') - }, [panel]) - - const onUpload = useCallback(async (event: ChangeEvent) => { - const file = event.target.files?.[0] - if (file) { - const fileDataUrl = await toBase64(file) - if (fileDataUrl) { - upload(fileDataUrl) - } - } - }, []) - - const onPaste = useCallback((event: ClipboardEvent) => { - const pasteUrl = event.clipboardData.getData('text') ?? '' - upload(pasteUrl) - }, []) - - const onEnter = useCallback((event: FormEvent) => { - event.preventDefault() - event.stopPropagation() - // @ts-ignore - const inputUrl = event.target.elements.image.value - if (inputUrl) { - upload(inputUrl) - } - }, []) - - const openVideo: MouseEventHandler = async (event) => { - event.stopPropagation() - setPanel('camera-mode') - } - - const onCapture = () => { - if (canvasRef.current && videoRef.current) { - const canvas = canvasRef.current - canvas.width = videoRef.current!.videoWidth - canvas.height = videoRef.current!.videoHeight - canvas.getContext('2d')?.drawImage(videoRef.current, 0, 0, canvas.width, canvas.height) - const cameraUrl = canvas.toDataURL('image/jpeg') - upload(cameraUrl) - } - } - - useEffect(() => { - const handleBlur = () => { - if (panel !== 'none') { - setPanel('none') - } - } - document.addEventListener('click', handleBlur) - return () => { - document.removeEventListener('click', handleBlur) - } - }, [panel]) - - useEffect(() => { - if (panel === 'camera-mode') { - navigator.mediaDevices.getUserMedia({ video: true, audio: false }) - .then(videoStream => { - mediaStream.current = videoStream - if (videoRef.current) { - videoRef.current.srcObject = videoStream - } - }) - } else { - if (mediaStream.current) { - mediaStream.current.getTracks().forEach(function(track) { - track.stop() - }) - mediaStream.current = undefined - } - } - }, [panel]) - - return ( -
      -
      panel === 'none' ? setPanel('normal') : setPanel('none')}>{children}
      -
      -
      -
      -

      添加图像

      -
      -
      - paste -
      - e.stopPropagation()} - /> -
      -
      -
      - - -
      -
      - {panel === 'camera-mode' &&
      -
      -
      -
      -
      -
      -
      -
      } -
      -
      - ) -} diff --git a/spaces/llmonitor/benchmarks/app/layout.js b/spaces/llmonitor/benchmarks/app/layout.js deleted file mode 100644 index aaba2cb7b896fafc08eaab1e1e7954d0550c7fd8..0000000000000000000000000000000000000000 --- a/spaces/llmonitor/benchmarks/app/layout.js +++ /dev/null @@ -1,57 +0,0 @@ -import Link from "next/link" -import "@/styles/globals.css" -import { Suspense } from "react" -import PlausibleProvider from "next-plausible" - -export const metadata = { - title: "LLMonitor Benchmarks", - description: "Benchmarks and scoring of LLMs", -} - -export default function RootLayout({ children }) { - return ( - - - - - -
      -

      LLMonitor Benchmarks

      - -

      - leaderboard - {" | "} - dataset - {" | "} - compare - {" | "} - about -

      -
      - - Loading...

      }>{children}
      -
      - - - - ) -} diff --git a/spaces/ltgoslo/ssa-perin/model/head/node_centric_head.py b/spaces/ltgoslo/ssa-perin/model/head/node_centric_head.py deleted file mode 100644 index ce4a39c78f5c2f144cf2992a9f25f8d10364633d..0000000000000000000000000000000000000000 --- a/spaces/ltgoslo/ssa-perin/model/head/node_centric_head.py +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python3 -# coding=utf-8 - -import torch - -from model.head.abstract_head import AbstractHead -from data.parser.to_mrp.node_centric_parser import NodeCentricParser -from utility.cross_entropy import binary_cross_entropy - - -class NodeCentricHead(AbstractHead): - def __init__(self, dataset, args, initialize): - config = { - "label": True, - "edge presence": True, - "edge label": False, - "anchor": True, - "source_anchor": False, - "target_anchor": False - } - super(NodeCentricHead, self).__init__(dataset, args, config, initialize) - - self.source_id = dataset.label_field.vocab.stoi["Source"] + 1 - self.target_id = dataset.label_field.vocab.stoi["Target"] + 1 - self.parser = NodeCentricParser(dataset) diff --git a/spaces/lxe/simple-llm-finetuner/config.py b/spaces/lxe/simple-llm-finetuner/config.py deleted file mode 100644 index c940df06cf264097850ab81b63bd35f141acb4e0..0000000000000000000000000000000000000000 --- a/spaces/lxe/simple-llm-finetuner/config.py +++ /dev/null @@ -1,64 +0,0 @@ -import argparse -import torch - -HAS_CUDA = torch.cuda.is_available() -DEVICE = torch.device('cuda' if HAS_CUDA else 'cpu') - -parser = argparse.ArgumentParser(description='Simple LLM Finetuner') -parser.add_argument('--models', nargs='+', default=[ - 'decapoda-research/llama-7b-hf', - 'cerebras/Cerebras-GPT-2.7B', - 'cerebras/Cerebras-GPT-1.3B', - 'EleutherAI/gpt-neo-2.7B' -], help='List of models to use') - -parser.add_argument('--device-map', type=str, default='', help='Device map to use') -parser.add_argument('--model', type=str, default='cerebras/Cerebras-GPT-2.7B', help='Model to use') -parser.add_argument('--max-seq-length', type=int, default=256, help='Max sequence length') -parser.add_argument('--micro-batch-size', type=int, default=12, help='Micro batch size') -parser.add_argument('--gradient-accumulation-steps', type=int, default=8, help='Gradient accumulation steps') -parser.add_argument('--epochs', type=int, default=3, help='Number of epochs') -parser.add_argument('--learning-rate', type=float, default=3e-4, help='Learning rate') -parser.add_argument('--lora-r', type=int, default=8, help='LORA r') -parser.add_argument('--lora-alpha', type=int, default=32, help='LORA alpha') -parser.add_argument('--lora-dropout', type=float, default=0.01, help='LORA dropout') -parser.add_argument('--max-new-tokens', type=int, default=80, help='Max new tokens') -parser.add_argument('--temperature', type=float, default=0.1, help='Temperature') -parser.add_argument('--top-k', type=int, default=40, help='Top k') -parser.add_argument('--top-p', type=float, default=0.3, help='Top p') -parser.add_argument('--repetition-penalty', type=float, default=1.5, help='Repetition penalty') -parser.add_argument('--do-sample', action='store_true', help='Enable sampling') -parser.add_argument('--num-beams', type=int, default=1, help='Number of beams') -parser.add_argument('--share', action='store_true', default=False, help='Whether to deploy the interface with Gradio') - -args = parser.parse_args() - -MODELS = args.models -DEVICE_MAP = {'': 0} if not args.device_map else args.device_map -MODEL = args.model - -TRAINING_PARAMS = { - 'max_seq_length': args.max_seq_length, - 'micro_batch_size': args.micro_batch_size, - 'gradient_accumulation_steps': args.gradient_accumulation_steps, - 'epochs': args.epochs, - 'learning_rate': args.learning_rate, -} - -LORA_TRAINING_PARAMS = { - 'lora_r': args.lora_r, - 'lora_alpha': args.lora_alpha, - 'lora_dropout': args.lora_dropout, -} - -GENERATION_PARAMS = { - 'max_new_tokens': args.max_new_tokens, - 'temperature': args.temperature, - 'top_k': args.top_k, - 'top_p': args.top_p, - 'repetition_penalty': args.repetition_penalty, - 'do_sample': args.do_sample, - 'num_beams': args.num_beams, -} - -SHARE = args.share diff --git a/spaces/ma-xu/LIVE/thrust/internal/scripts/wiki2tex.py b/spaces/ma-xu/LIVE/thrust/internal/scripts/wiki2tex.py deleted file mode 100644 index 67f658b2d6fd9d08f7e6e0d4eb468b9905ab43d9..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/internal/scripts/wiki2tex.py +++ /dev/null @@ -1,194 +0,0 @@ -''' -Convert Google Code .wiki files into .tex formatted files. - -Output is designed to be included within a larger TeX project, it is -not standalone. - -''' - -import sys -import re -import codecs - -print(sys.argv) - -''' -A "rule" is a begin tag, an end tag, and how to reformat the inner text -(function) -''' - -def encase(pre, post, strip=False): - """Return a function that prepends pre and postpends post""" - def f(txt): - if strip: - return pre + txt.strip() + post - else: - return pre + txt + post - return f - -def constant(text): - def f(txt): - return text - return f - -def encase_with_rules(pre, post, rules, strip=False): - def f(txt): - if strip: - return pre + apply_rules(txt, rules).strip() + post - else: - return pre + apply_rules(txt, rules) + post - return f - -def encase_escape_underscore(pre, post): - def f(txt): - txt = sub(r'_', r'\_', txt) - return pre + txt + post - return f - -def sub(pat, repl, txt): - """Substitute in repl for pat in txt, txt can be multiple lines""" - return re.compile(pat, re.MULTILINE).sub(repl, txt) - -def process_list(rules): - def f(txt): - txt = ' *' + txt # was removed to match begin tag of list - res = '\\begin{itemize}\n' - for ln in txt.split('\n'): - # Convert " *" to "\item " - ln = sub(r'^ \*', r'\\item ', ln) - res += apply_rules(ln, rules) + '\n' - res += '\\end{itemize}\n' - return res - return f - -def process_link(rules): - def f(txt): - lst = txt.split(' ') - lnk = lst[0] - desc = apply_rules(' '.join(lst[1:]), rules) - if lnk[:7] == 'http://': - desc = apply_rules(' '.join(lst[1:]), rules) - return r'\href{' + lnk + r'}{' + desc + r'}' - if len(lst) > 1: - return r'\href{}{' + desc + r'}' - return r'\href{}{' + lnk + r'}' - return f - -# Some rules can be used inside some other rules (backticks in section names) - -link_rules = [ - ['_', '', constant(r'\_')], -] - -section_rules = [ - ['`', '`', encase_escape_underscore(r'\texttt{', r'}')], -] - -item_rules = [ - ['`', '`', encase(r'\verb|', r'|')], - ['[', ']', process_link(link_rules)], -] - -# Main rules for Latex formatting - -rules = [ - ['{{{', '}}}', encase(r'\begin{lstlisting}[language=c++]', r'\end{lstlisting}')], - ['[', ']', process_link(link_rules)], - [' *', '\n\n', process_list(item_rules)], - ['"', '"', encase("``", "''")], - ['`', '`', encase(r'\verb|', r'|')], - ['*', '*', encase(r'\emph{', r'}')], - ['_', '_', encase(r'\emph{', r'}')], - ['==', '==', encase_with_rules(r'\section{', r'}', section_rules, True)], - ['=', '=', encase_with_rules(r'\chapter{', r'}', section_rules, True)], - ['(e.g. f(x) -> y and f(x,y) -> ', 'z)', constant(r'(e.g. $f(x)\to y$ and $f(x,y)\to z$)')], -] - -def match_rules(txt, rules): - """Find rule that first matches in txt""" - # Find first begin tag - first_begin_loc = 10e100 - matching_rule = None - for rule in rules: - begin_tag, end_tag, func = rule - loc = txt.find(begin_tag) - if loc > -1 and loc < first_begin_loc: - first_begin_loc = loc - matching_rule = rule - return (matching_rule, first_begin_loc) - -def apply_rules(txt, rules): - """Apply set of rules to give txt, return transformed version of txt""" - matching_rule, first_begin_loc = match_rules(txt, rules) - if matching_rule is None: - return txt - begin_tag, end_tag, func = matching_rule - end_loc = txt.find(end_tag, first_begin_loc + 1) - if end_loc == -1: - sys.exit('Could not find end tag {0} after position {1}'.format(end_tag, first_begin_loc + 1)) - inner_txt = txt[first_begin_loc + len(begin_tag) : end_loc] - # Copy characters up until begin tag - # Then have output of rule function on inner text - new_txt_start = txt[:first_begin_loc] + func(inner_txt) - # Follow with the remaining processed text - remaining_txt = txt[end_loc + len(end_tag):] - return new_txt_start + apply_rules(remaining_txt, rules) - -def split_sections(contents): - """Given one string of all file contents, return list of sections - - Return format is list of pairs, each pair has section title - and list of lines. Result is ordered as the original input. - - """ - res = [] - cur_section = '' - section = [] - for ln in contents.split('\n'): - if len(ln) > 0 and ln[0] == '=': - # remove = formatting from line - section_title = sub(r'^\=+ (.*) \=+', r'\1', ln) - res.append((cur_section, section)) - cur_section = section_title - section = [ln] - else: - section.append(ln) - res.append((cur_section, section)) - return res - -def filter_sections(splitinput, removelst): - """Take split input and remove sections in removelst""" - res = [] - for sectname, sectcontents in splitinput: - if sectname in removelst: - pass - else: - res.extend(sectcontents) - # convert to single string for output - return '\n'.join(res) - - -def main(): - infile = codecs.open(sys.argv[1], encoding='utf-8') - outfile = codecs.open(sys.argv[2], mode='w', encoding='utf-8') - - contents = infile.read() - - # Remove first three lines - contents = '\n'.join(contents.split('\n')[3:]) - - # Split sections and filter out some of them - sections = split_sections(contents) - contents = filter_sections(sections, ['Introduction', 'Prerequisites', 'Simple Example']) - - # Convert to latex format - contents = apply_rules(contents, rules) - - infile.close() - outfile.write(contents) - outfile.close() - return 0 - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/spaces/maher13/arabic-asr/README.md b/spaces/maher13/arabic-asr/README.md deleted file mode 100644 index e1cbb40e7df5c1a33ef51439750149df1a4e0334..0000000000000000000000000000000000000000 --- a/spaces/maher13/arabic-asr/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Arabic Asr -emoji: 🦀 -colorFrom: gray -colorTo: purple -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/manhkhanhUIT/Image_Restoration_Colorization/Face_Enhancement/models/networks/sync_batchnorm/__init__.py b/spaces/manhkhanhUIT/Image_Restoration_Colorization/Face_Enhancement/models/networks/sync_batchnorm/__init__.py deleted file mode 100644 index 6d9b36c74b1808b56ded68cf080a689db7e0ee4e..0000000000000000000000000000000000000000 --- a/spaces/manhkhanhUIT/Image_Restoration_Colorization/Face_Enhancement/models/networks/sync_batchnorm/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# -*- coding: utf-8 -*- -# File : __init__.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -from .batchnorm import set_sbn_eps_mode -from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d -from .batchnorm import patch_sync_batchnorm, convert_model -from .replicate import DataParallelWithCallback, patch_replication_callback diff --git a/spaces/marcusphantom/01-3DmodelDemo/README.md b/spaces/marcusphantom/01-3DmodelDemo/README.md deleted file mode 100644 index 31c75c24ffb2e9e7ce01f1227f78a72f20fd18a9..0000000000000000000000000000000000000000 --- a/spaces/marcusphantom/01-3DmodelDemo/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 01 3DmodelDemo -emoji: 😻 -colorFrom: yellow -colorTo: gray -sdk: gradio -sdk_version: 3.6 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/mattricesound/RemFx/scripts/download_eval_datasets.sh b/spaces/mattricesound/RemFx/scripts/download_eval_datasets.sh deleted file mode 100644 index 409a9dffaa06e7c5d4d462672006c21375c60e91..0000000000000000000000000000000000000000 --- a/spaces/mattricesound/RemFx/scripts/download_eval_datasets.sh +++ /dev/null @@ -1,25 +0,0 @@ -#! /bin/bash - -mkdir -p RemFX_eval_datasets -cd RemFX_eval_datasets -mkdir -p processed -cd processed -wget https://zenodo.org/record/8187288/files/0-0.zip?download=1 -O 0-0.zip -wget https://zenodo.org/record/8187288/files/1-1.zip?download=1 -O 1-1.zip -wget https://zenodo.org/record/8187288/files/2-2.zip?download=1 -O 2-2.zip -wget https://zenodo.org/record/8187288/files/3-3.zip?download=1 -O 3-3.zip -wget https://zenodo.org/record/8187288/files/4-4.zip?download=1 -O 4-4.zip -wget https://zenodo.org/record/8187288/files/5-5.zip?download=1 -O 5-5.zip -unzip 0-0.zip -unzip 1-1.zip -unzip 2-2.zip -unzip 3-3.zip -unzip 4-4.zip -unzip 5-5.zip -rm 0-0.zip -rm 1-1.zip -rm 2-2.zip -rm 3-3.zip -rm 4-4.zip -rm 5-5.zip - diff --git a/spaces/merve/fill-in-the-blank/server-side/fill-in-the-blank/zari-convert/README.md b/spaces/merve/fill-in-the-blank/server-side/fill-in-the-blank/zari-convert/README.md deleted file mode 100644 index 9fa8bdacc8cd9e04ace5253460efebb65fca205c..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/server-side/fill-in-the-blank/zari-convert/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# Saves models to disk to package in dockerfile - -## zari-bert-cda - -Converts [zari-bert-cda](https://github.com/google-research-datasets/Zari) to a Hugging Face model. - -Download original model - -``` -mkdir raw -cd raw -curl https://storage.googleapis.com/bert_models/filbert/2020_10_13/zari-bert-cda.tar.gz -o zari-bert-cda.tar.gz -tar xvzf zari-bert-cda.tar.gz -``` - -Convert - -``` -source ../../env/bin/activate -transformers-cli convert --model_type bert \ - --tf_checkpoint zari-bert-cda/model.ckpt \ - --config zari-bert-cda/bert_config.json \ - --pytorch_dump_output zari-bert-cda/pytorch_model.bin - -cp zari-bert-cda/bert_config.json zari-bert-cda/config.json -``` - -Copy to docker directory - -``` -mkdir ../../py/zari-bert-cda - -cp zari-bert-cda/config.json ../../py/zari-bert-cda/config.json -cp zari-bert-cda/vocab.txt ../../py/zari-bert-cda/vocab.txt -cp zari-bert-cda/pytorch_model.bin ../../py/zari-bert-cda/pytorch_model.bin -``` - -## bert-large-uncased-whole-word-masking - -``` -cd ../py -source env/bin/activate -python model_bert_large_export.py -``` - -## Upload files - -``` -cd ../py - -gsutil -o "GSUtil:parallel_process_count=1" -m rsync -r zari-bert-cda gs://uncertainty-over-space/zari-bert-cda -``` - -https://storage.googleapis.com/uncertainty-over-space/zari/zari-bert-cda/vocab.txt diff --git a/spaces/merve/hidden-bias/README.md b/spaces/merve/hidden-bias/README.md deleted file mode 100644 index 231a06c9c19266924a363f4772fc789098480355..0000000000000000000000000000000000000000 --- a/spaces/merve/hidden-bias/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: hidden-bias -emoji: 🪄 -colorFrom: green -colorTo: purple -sdk: static -pinned: false -license: apache-2.0 -app_file: public/hidden-bias/index.html ---- diff --git a/spaces/merve/hidden-bias/public/uncertainty-calibration/style.css b/spaces/merve/hidden-bias/public/uncertainty-calibration/style.css deleted file mode 100644 index 8073cf0a59eac0be0e293b35af5255c40c063e21..0000000000000000000000000000000000000000 --- a/spaces/merve/hidden-bias/public/uncertainty-calibration/style.css +++ /dev/null @@ -1,89 +0,0 @@ -svg{ - overflow: visible; -} - -text{ - fill: #202124; - user-select: none; -} - -.domain{ - display: none; -} - -.thresholds, .threshold > g{ - cursor: pointer; -} - -svg{ - user-select: none; -} - -text.axis-label .legend-text{ - font-family: 'Roboto'; - font-style: normal; - font-size: 16px; - line-height: 20px; - /* identical to box height, or 125% */ - - fill: #000; -} - -.axis text{ - font-size: 10px; -} - -text{ - text-shadow: 0 1px 0 #fff, 1px 0 0 #fff, 0 -1px 0 #fff, -1px 0 0 #fff; -} - - - - -.bucket text{ - /*text-shadow: 0 1px 0 #000, 1px 0 0 #000, 0 -1px 0 #000, -1px 0 0 #000;*/ - /*fill: #fff;*/ - font-size: 11px; -} - - -.big-text{ - font-variant-numeric: tabular-nums; - font-size: 16px; -} - -#card{ - display: flex; - flex-direction: column; - align-items: flex-start; - padding: 24px 24px; - gap: 6px; - - background: #EDF4EC; - border: 1px solid #34A853; - box-sizing: border-box; - border-radius: 4px; -} - -text.val-text{ - background: #DFE9E1; - border: 1px solid #476C63; - box-sizing: border-box; - border-radius: 4px; - fill: #2A4C4A; - text-shadow: none; -} - -.val-box{ - fill: #DFE9E1; - stroke: #476C63; - opacity: 1; -} - -.legend-title{ - fill: #002622; -} - -h3 { - color: #00695C; -} \ No newline at end of file diff --git a/spaces/merve/measuring-fairness/public/dataset-worldviews/person-photos.js b/spaces/merve/measuring-fairness/public/dataset-worldviews/person-photos.js deleted file mode 100644 index 305b037acebf14e083ead577ce566ad39b81c531..0000000000000000000000000000000000000000 --- a/spaces/merve/measuring-fairness/public/dataset-worldviews/person-photos.js +++ /dev/null @@ -1,119 +0,0 @@ - -function createPhotoScroller(){ - - var base_path = 'img/woman_washing_clothes.jpeg' - var data = [ - { - 'path': 'img/labels_1.svg', - 'alt': 'Image of a woman washing clothes with bounding boxes including \'person\', and \'bucket\'', - 'x': 198, - 'y': 30, - 'width': 305, - 'height': 400, - }, - - { - 'path': 'img/labels_4.svg', - 'alt': 'Image of a woman washing clothes with bounding boxes including \'parent\', and \'laundry\'', - 'x': 110, - 'y': 60, - 'width': 450, - 'height': 470, - }, - - - { - 'path': 'img/labels_2.svg', - 'alt': 'Image of a woman washing clothes with bounding boxes including \'hair_boho\', and \'decor_outdoor_rustic\'', - 'x': 198, - 'y': -35, - 'width': 395, - 'height': 500 - }, - - { - 'path': 'img/labels_3.svg', - 'alt': 'Image of a woman washing clothes with one bounding box around her, labeled \'pedestrian\'', - 'x': 190, - 'y': 65, - 'width': 190, - 'height': 315 - }, - ]; - - - var photoIndex = 0; - - var c = d3.conventions({ - sel: d3.select('.person-photos').html(''), - height: 550 - }) - - var photoSel = c.svg.append('svg:image') - .attr('x', 50) - .attr('y', 50) - .attr('width', 700) - .attr('height', 500) - .attr('xlink:href', base_path) - - var photoSel = c.svg.appendMany('svg:image', data) - .attr('x', d => d.x) - .attr('y', d => d.y) - .attr('width', d => d.width) - .attr('height', d => d.height) - .attr('xlink:href', d => d.path) - .attr('alt', d => d.alt) - - - var buttonHeight = 35 - var buttonWidth = 130 - - var buttonSel = c.svg.appendMany('g.photo-button', data) - .translate((d,i) => [(i * 170) + 100, 0]) - .at({ - // class: "dropdown" - }) - .on('click', function(d, i){ - photoIndex = i - setActiveImage() - timer.stop(); - }) - - buttonSel.append('rect') - .at({ - height: buttonHeight, - width: buttonWidth, - // fill: '#fff' - }) - - buttonSel.append('text') - .at({ - textAnchor: 'middle', - // dominantBaseline: 'central', - dy: '.33em', - x: buttonWidth/2, - y: buttonHeight/2, - class: "monospace" - }) - .text((d,i) => 'ground truth ' + (i + 1)) - - // buttonSel.classed('dropdown', true); - - if (window.__photoPersonTimer) window.__photoPersonTimer.stop() - var timer = window.__photoPersonTimer = d3.interval(() => { - photoIndex = (photoIndex + 1) % data.length; - setActiveImage() - }, 2000) - - function setActiveImage(i){ - photoSel.st({opacity: (d, i) => i == photoIndex ? 1 : 0 }) - buttonSel.classed('is-active-button', (d, i) => i == photoIndex) - } - setActiveImage() -} - -createPhotoScroller(); - - - - diff --git a/spaces/merve/measuring-fairness/source/third_party/regl.min.js b/spaces/merve/measuring-fairness/source/third_party/regl.min.js deleted file mode 100644 index 7ecf11321eda67a76e019d6881f42b52f3d39c78..0000000000000000000000000000000000000000 --- a/spaces/merve/measuring-fairness/source/third_party/regl.min.js +++ /dev/null @@ -1,171 +0,0 @@ -(function(Z,ka){"object"===typeof exports&&"undefined"!==typeof module?module.exports=ka():"function"===typeof define&&define.amd?define(ka):Z.createREGL=ka()})(this,function(){function Z(a,b){this.id=Db++;this.type=a;this.data=b}function ka(a){if(0===a.length)return[];var b=a.charAt(0),c=a.charAt(a.length-1);if(1>>=b;c=(255>>=c;b|=c;c=(15>>=c;b|=c;c=(3>>c>>1}function hb(){function a(a){a:{for(var b=16;268435456>=b;b*=16)if(a<=b){a=b;break a}a=0}b=c[gb(a)>>2];return 0>2].push(a)}var c=R(8,function(){return[]});return{alloc:a,free:b,allocType:function(b,c){var d=null;switch(b){case 5120:d=new Int8Array(a(c),0,c);break;case 5121:d=new Uint8Array(a(c),0,c);break;case 5122:d=new Int16Array(a(2*c),0,c);break;case 5123:d=new Uint16Array(a(2*c),0,c);break;case 5124:d=new Int32Array(a(4*c),0,c);break;case 5125:d=new Uint32Array(a(4*c),0,c);break;case 5126:d=new Float32Array(a(4*c),0,c);break;default:return null}return d.length!== -c?d.subarray(0,c):d},freeType:function(a){b(a.buffer)}}}function la(a){return!!a&&"object"===typeof a&&Array.isArray(a.shape)&&Array.isArray(a.stride)&&"number"===typeof a.offset&&a.shape.length===a.stride.length&&(Array.isArray(a.data)||O(a.data))}function ib(a,b,c,e,f,d){for(var q=0;qe&&(e=d.buffer.byteLength,5123===k?e>>=1:5125===k&&(e>>=2));d.vertCount=e;e=g;0>g&&(e=4,g=d.buffer.dimension,1===g&&(e=0),2===g&&(e=1),3===g&&(e=4));d.primType=e}function q(a){e.elementsCount--;delete n[a.id];a.buffer.destroy();a.buffer=null}var n={},v=0,k={uint8:5121,uint16:5123};b.oes_element_index_uint&&(k.uint32=5125);f.prototype.bind=function(){this.buffer.bind()};var u=[];return{create:function(a, -b){function l(a){if(a)if("number"===typeof a)g(a),h.primType=4,h.vertCount=a|0,h.type=5121;else{var b=null,c=35044,e=-1,f=-1,m=0,n=0;if(Array.isArray(a)||O(a)||la(a))b=a;else if("data"in a&&(b=a.data),"usage"in a&&(c=nb[a.usage]),"primitive"in a&&(e=Ka[a.primitive]),"count"in a&&(f=a.count|0),"type"in a&&(n=k[a.type]),"length"in a)m=a.length|0;else if(m=f,5123===n||5122===n)m*=2;else if(5125===n||5124===n)m*=4;d(h,b,c,e,f,m,n)}else g(),h.primType=4,h.vertCount=0,h.type=5121;return l}var g=c.create(null, -34963,!0),h=new f(g._buffer);e.elementsCount++;l(a);l._reglType="elements";l._elements=h;l.subdata=function(a,b){g.subdata(a,b);return l};l.destroy=function(){q(h)};return l},createStream:function(a){var b=u.pop();b||(b=new f(c.create(null,34963,!0,!1)._buffer));d(b,a,35040,-1,-1,0,0);return b},destroyStream:function(a){u.push(a)},getElements:function(a){return"function"===typeof a&&a._elements instanceof f?a._elements:null},clear:function(){I(n).forEach(q)}}}function ob(a){for(var b=G.allocType(5123, -a.length),c=0;c>>31<<15,d=(e<<1>>>24)-127,e=e>>13&1023;b[c]=-24>d?f:-14>d?f+(e+1024>>-14-d):15>=e,c.height>>=e,x(c,d[e]),a.mipmask|=1<b;++b)a.images[b]=null;return a}function ya(a){for(var b=a.images,c=0;cb){for(var c=0;c=--this.refCount&&F(this)}});q.profile&&(d.getTotalTextureSize=function(){var a=0;Object.keys(ea).forEach(function(b){a+=ea[b].stats.size});return a});return{create2D:function(b,c){function e(a,b){var c=f.texInfo;w.call(c);var d=ma();"number"===typeof a?"number"===typeof b?p(d,a|0,b|0):p(d,a|0,a|0):a?(H(c,a),P(d,a)):p(d,1,1);c.genMipmaps&&(d.mipmask=(d.width<<1)-1);f.mipmask=d.mipmask;v(f, -d);f.internalformat=d.internalformat;e.width=d.width;e.height=d.height;T(f);t(d,3553);M(c,3553);wa();ya(d);q.profile&&(f.stats.size=La(f.internalformat,f.type,d.width,d.height,c.genMipmaps,!1));e.format=ca[f.internalformat];e.type=K[f.type];e.mag=Fa[c.magFilter];e.min=pa[c.minFilter];e.wrapS=qa[c.wrapS];e.wrapT=qa[c.wrapT];return e}var f=new y(3553);ea[f.id]=f;d.textureCount++;e(b,c);e.subimage=function(a,b,c,d){b|=0;c|=0;d|=0;var y=g();v(y,f);y.width=0;y.height=0;x(y,a);y.width=y.width||(f.width>> -d)-b;y.height=y.height||(f.height>>d)-c;T(f);l(y,3553,b,c,d);wa();h(y);return e};e.resize=function(b,c){var d=b|0,g=c|0||d;if(d===f.width&&g===f.height)return e;e.width=f.width=d;e.height=f.height=g;T(f);for(var y=0;f.mipmask>>y;++y){var h=d>>y,z=g>>y;if(!h||!z)break;a.texImage2D(3553,y,f.format,h,z,0,f.format,f.type,null)}wa();q.profile&&(f.stats.size=La(f.internalformat,f.type,d,g,!1,!1));return e};e._reglType="texture2d";e._texture=f;q.profile&&(e.stats=f.stats);e.destroy=function(){f.decRef()}; -return e},createCube:function(b,c,e,f,n,r){function m(a,b,c,d,e,f){var g,da=A.texInfo;w.call(da);for(g=0;6>g;++g)F[g]=ma();if("number"===typeof a||!a)for(a=a|0||1,g=0;6>g;++g)p(F[g],a,a);else if("object"===typeof a)if(b)P(F[0],a),P(F[1],b),P(F[2],c),P(F[3],d),P(F[4],e),P(F[5],f);else if(H(da,a),k(A,a),"faces"in a)for(a=a.faces,g=0;6>g;++g)v(F[g],A),P(F[g],a[g]);else for(g=0;6>g;++g)P(F[g],a);v(A,F[0]);A.mipmask=da.genMipmaps?(F[0].width<<1)-1:F[0].mipmask;A.internalformat=F[0].internalformat;m.width= -F[0].width;m.height=F[0].height;T(A);for(g=0;6>g;++g)t(F[g],34069+g);M(da,34067);wa();q.profile&&(A.stats.size=La(A.internalformat,A.type,m.width,m.height,da.genMipmaps,!0));m.format=ca[A.internalformat];m.type=K[A.type];m.mag=Fa[da.magFilter];m.min=pa[da.minFilter];m.wrapS=qa[da.wrapS];m.wrapT=qa[da.wrapT];for(g=0;6>g;++g)ya(F[g]);return m}var A=new y(34067);ea[A.id]=A;d.cubeCount++;var F=Array(6);m(b,c,e,f,n,r);m.subimage=function(a,b,c,d,e){c|=0;d|=0;e|=0;var f=g();v(f,A);f.width=0;f.height=0; -x(f,b);f.width=f.width||(A.width>>e)-c;f.height=f.height||(A.height>>e)-d;T(A);l(f,34069+a,c,d,e);wa();h(f);return m};m.resize=function(b){b|=0;if(b!==A.width){m.width=A.width=b;m.height=A.height=b;T(A);for(var c=0;6>c;++c)for(var d=0;A.mipmask>>d;++d)a.texImage2D(34069+c,d,A.format,b>>d,b>>d,0,A.format,A.type,null);wa();q.profile&&(A.stats.size=La(A.internalformat,A.type,m.width,m.height,!1,!0));return m}};m._reglType="textureCube";m._texture=A;q.profile&&(m.stats=A.stats);m.destroy=function(){A.decRef()}; -return m},clear:function(){for(var b=0;bc;++c)if(0!==(b.mipmask&1<>c,b.height>>c,0,b.internalformat, -b.type,null);else for(var d=0;6>d;++d)a.texImage2D(34069+d,c,b.internalformat,b.width>>c,b.height>>c,0,b.internalformat,b.type,null);M(b.texInfo,b.target)})},refresh:function(){for(var b=0;bd;++d){for(p= -0;pa;++a)c[a].resize(d);b.width=b.height=d;return b},_reglType:"framebufferCube",destroy:function(){c.forEach(function(a){a.destroy()})}})},clear:function(){I(M).forEach(r)}, -restore:function(){t.cur=null;t.next=null;t.dirty=!0;I(M).forEach(function(b){b.framebuffer=a.createFramebuffer();p(b)})}})}function $a(){this.w=this.z=this.y=this.x=this.state=0;this.buffer=null;this.size=0;this.normalized=!1;this.type=5126;this.divisor=this.stride=this.offset=0}function Sb(a,b,c,e,f,d,q){function n(a){if(a!==r.currentVAO){var c=b.oes_vertex_array_object;a?c.bindVertexArrayOES(a.vao):c.bindVertexArrayOES(null);r.currentVAO=a}}function v(c){if(c!==r.currentVAO){if(c)c.bindAttrs(); -else{for(var d=b.angle_instanced_arrays,e=0;e=m.byteLength?l.subdata(m): -(l.destroy(),c.buffers[h]=null));c.buffers[h]||(l=c.buffers[h]=f.create(p,34962,!1,!0));k.buffer=f.getBuffer(l);k.size=k.buffer.dimension|0;k.normalized=!1;k.type=k.buffer.dtype;k.offset=0;k.stride=0;k.divisor=0;k.state=1;a[h]=1}else f.getBuffer(p)?(k.buffer=f.getBuffer(p),k.size=k.buffer.dimension|0,k.normalized=!1,k.type=k.buffer.dtype,k.offset=0,k.stride=0,k.divisor=0,k.state=1):f.getBuffer(p.buffer)?(k.buffer=f.getBuffer(p.buffer),k.size=(+p.size||k.buffer.dimension)|0,k.normalized=!!p.normalized|| -!1,k.type="type"in p?Ja[p.type]:k.buffer.dtype,k.offset=(p.offset||0)|0,k.stride=(p.stride||0)|0,k.divisor=(p.divisor||0)|0,k.state=1):"x"in p&&(k.x=+p.x||0,k.y=+p.y||0,k.z=+p.z||0,k.w=+p.w||0,k.state=2)}for(l=0;la&&(a=b.stats.uniformsCount)});return a},c.getMaxAttributesCount=function(){var a=0;x.forEach(function(b){b.stats.attributesCount>a&&(a=b.stats.attributesCount)});return a});return{clear:function(){var b=a.deleteShader.bind(a);I(k).forEach(b);k={};I(u).forEach(b); -u={};x.forEach(function(b){a.deleteProgram(b.program)});x.length=0;m={};c.shaderCount=0},program:function(b,d,e,f){var l=m[d];l||(l=m[d]={});var q=l[b];if(q&&(q.refCount++,!f))return q;var w=new n(d,b);c.shaderCount++;v(w,e,f);q||(l[b]=w);x.push(w);return L(w,{destroy:function(){w.refCount--;if(0>=w.refCount){a.deleteProgram(w.program);var b=x.indexOf(w);x.splice(b,1);c.shaderCount--}0>=l[w.vertId].refCount&&(a.deleteShader(u[w.vertId]),delete u[w.vertId],delete m[w.fragId][w.vertId]);Object.keys(m[w.fragId]).length|| -(a.deleteShader(k[w.fragId]),delete k[w.fragId],delete m[w.fragId])}})},restore:function(){k={};u={};for(var a=0;a"+b+"?"+e+".constant["+b+"]:0;"}).join(""),"}}else{","if(",g,"(",e,".buffer)){",k,"=",f,".createStream(",34962,",",e,".buffer);","}else{",k,"=",f,".getBuffer(",e,".buffer);","}",m,'="type" in ',e,"?",z.glTypes,"[",e,".type]:",k,".dtype;",B.normalized,"=!!", -e,".normalized;");d("size");d("offset");d("stride");d("divisor");c("}}");c.exit("if(",B.isStream,"){",f,".destroyStream(",k,");","}");return B})});return g}function F(a){var b=a["static"],c=a.dynamic,d={};Object.keys(b).forEach(function(a){var c=b[a];d[a]=w(function(a,b){return"number"===typeof c||"boolean"===typeof c?""+c:a.link(c)})});Object.keys(c).forEach(function(a){var b=c[a];d[a]=K(b,function(a,c){return a.invoke(c,b)})});return d}function A(a,b,d,e,f){function g(a){var b=p[a];b&&(ja[a]=b)} -var m=O(a,b),l=G(a,f),p=C(a,l,f),X=M(a,f),ja=y(a,f),q=H(a,f,m);g("viewport");g(h("scissor.box"));var n=0>1)",u],");")}function b(){c(t,".drawArraysInstancedANGLE(",[n,q,r,u],");")}p&&"null"!==p?v?a():(c("if(",p,"){"),a(),c("}else{"),b(),c("}")):b()}function g(){function a(){c(l+".drawElements("+[n,r,x,q+"<<(("+x+"-5121)>>1)"]+");")}function b(){c(l+".drawArrays("+[n,q,r]+");")}p&&"null"!==p?v?a():(c("if(",p,"){"),a(),c("}else{"),b(),c("}")):b()}var h=a.shared,l=h.gl,k=h.draw,m=d.draw, -p=function(){var e=m.elements,f=b;if(e){if(e.contextDep&&d.contextDynamic||e.propDep)f=c;e=e.append(a,f);m.elementsActive&&f("if("+e+")"+l+".bindBuffer(34963,"+e+".buffer.buffer);")}else e=f.def(),f(e,"=",k,".","elements",";","if(",e,"){",l,".bindBuffer(",34963,",",e,".buffer.buffer);}","else if(",h.vao,".currentVAO){",e,"=",a.shared.elements+".getElements("+h.vao,".currentVAO.elements);",na?"":"if("+e+")"+l+".bindBuffer(34963,"+e+".buffer.buffer);","}");return e}(),n=e("primitive"),q=e("offset"), -r=function(){var e=m.count,f=b;if(e){if(e.contextDep&&d.contextDynamic||e.propDep)f=c;e=e.append(a,f)}else e=f.def(k,".","count");return e}();if("number"===typeof r){if(0===r)return}else c("if(",r,"){"),c.exit("}");var u,t;W&&(u=e("instances"),t=a.instancing);var x=p+".type",v=m.elements&&xa(m.elements)&&!m.vaoActive;W&&("number"!==typeof u||0<=u)?"string"===typeof u?(c("if(",u,">0){"),f(),c("}else if(",u,"<0){"),g(),c("}")):f():g()}function ca(a,b,c,d,e){b=P();e=b.proc("body",e);W&&(b.instancing= -e.def(b.shared.extensions,".angle_instanced_arrays"));a(b,e,c,d);return b.compile().body}function Z(a,b,c,d){N(a,b);c.useVAO?c.drawVAO?b(a.shared.vao,".setVAO(",c.drawVAO.append(a,b),");"):b(a.shared.vao,".setVAO(",a.shared.vao,".targetVAO);"):(b(a.shared.vao,".setVAO(null);"),ga(a,b,c,d.attributes,function(){return!0}));Q(a,b,c,d.uniforms,function(){return!0},!1);U(a,b,b,c)}function Fa(a,b){var c=a.proc("draw",1);N(a,c);ia(a,c,b.context);S(a,c,b.framebuffer);Aa(a,c,b);I(a,c,b.state);E(a,c,b,!1,!0); -var d=b.shader.progVar.append(a,c);c(a.shared.gl,".useProgram(",d,".program);");if(b.shader.program)Z(a,c,b,b.shader.program);else{c(a.shared.vao,".setVAO(null);");var e=a.global.def("{}"),f=c.def(d,".id"),g=c.def(e,"[",f,"]");c(a.cond(g).then(g,".call(this,a0);")["else"](g,"=",e,"[",f,"]=",a.link(function(c){return ca(Z,a,b,c,1)}),"(",d,");",g,".call(this,a0);"))}0=--this.refCount&&q(this)};f.profile&&(e.getTotalRenderbufferSize=function(){var a=0;Object.keys(u).forEach(function(b){a+=u[b].stats.size});return a});return{create:function(b, -c){function l(b,c){var d=0,e=0,k=32854;"object"===typeof b&&b?("shape"in b?(e=b.shape,d=e[0]|0,e=e[1]|0):("radius"in b&&(d=e=b.radius|0),"width"in b&&(d=b.width|0),"height"in b&&(e=b.height|0)),"format"in b&&(k=n[b.format])):"number"===typeof b?(d=b|0,e="number"===typeof c?c|0:d):b||(d=e=1);if(d!==g.width||e!==g.height||k!==g.format)return l.width=g.width=d,l.height=g.height=e,g.format=k,a.bindRenderbuffer(36161,g.renderbuffer),a.renderbufferStorage(36161,k,d,e),f.profile&&(g.stats.size=Q[g.format]* -g.width*g.height),l.format=v[g.format],l}var g=new d(a.createRenderbuffer());u[g.id]=g;e.renderbufferCount++;l(b,c);l.resize=function(b,c){var d=b|0,e=c|0||d;if(d===g.width&&e===g.height)return l;l.width=g.width=d;l.height=g.height=e;a.bindRenderbuffer(36161,g.renderbuffer);a.renderbufferStorage(36161,g.format,d,e);f.profile&&(g.stats.size=Q[g.format]*g.width*g.height);return l};l._reglType="renderbuffer";l._renderbuffer=g;f.profile&&(l.stats=g.stats);l.destroy=function(){g.decRef()};return l},clear:function(){I(u).forEach(q)}, -restore:function(){I(u).forEach(function(b){b.renderbuffer=a.createRenderbuffer();a.bindRenderbuffer(36161,b.renderbuffer);a.renderbufferStorage(36161,b.format,b.width,b.height)});a.bindRenderbuffer(36161,null)}}},Za=[];Za[6408]=4;Za[6407]=3;var Ra=[];Ra[5121]=1;Ra[5126]=4;Ra[36193]=2;var Da=["x","y","z","w"],Xb="blend.func blend.equation stencil.func stencil.opFront stencil.opBack sample.coverage viewport scissor.box polygonOffset.offset".split(" "),Ga={0:0,1:1,zero:0,one:1,"src color":768,"one minus src color":769, -"src alpha":770,"one minus src alpha":771,"dst color":774,"one minus dst color":775,"dst alpha":772,"one minus dst alpha":773,"constant color":32769,"one minus constant color":32770,"constant alpha":32771,"one minus constant alpha":32772,"src alpha saturate":776},ab={never:512,less:513,"<":513,equal:514,"=":514,"==":514,"===":514,lequal:515,"<=":515,greater:516,">":516,notequal:517,"!=":517,"!==":517,gequal:518,">=":518,always:519},Ta={0:0,zero:0,keep:7680,replace:7681,increment:7682,decrement:7683, -"increment wrap":34055,"decrement wrap":34056,invert:5386},zb={cw:2304,ccw:2305},Ab=new J(!1,!1,!1,function(){}),$b=function(a,b){function c(){this.endQueryIndex=this.startQueryIndex=-1;this.sum=0;this.stats=null}function e(a,b,d){var e=q.pop()||new c;e.startQueryIndex=a;e.endQueryIndex=b;e.sum=0;e.stats=d;n.push(e)}if(!b.ext_disjoint_timer_query)return null;var f=[],d=[],q=[],n=[],v=[],k=[];return{beginQuery:function(a){var c=f.pop()||b.ext_disjoint_timer_query.createQueryEXT();b.ext_disjoint_timer_query.beginQueryEXT(35007, -c);d.push(c);e(d.length-1,d.length,a)},endQuery:function(){b.ext_disjoint_timer_query.endQueryEXT(35007)},pushScopeStats:e,update:function(){var a,c;a=d.length;if(0!==a){k.length=Math.max(k.length,a+1);v.length=Math.max(v.length,a+1);v[0]=0;var e=k[0]=0;for(c=a=0;c=E.length&&e()}var c=Bb(E,a);E[c]=b}}}function k(){var a=Q.viewport,b=Q.scissor_box;a[0]=a[1]=b[0]=b[1]=0;H.viewportWidth=H.framebufferWidth=H.drawingBufferWidth=a[2]=b[2]=l.drawingBufferWidth;H.viewportHeight=H.framebufferHeight=H.drawingBufferHeight=a[3]=b[3]=l.drawingBufferHeight}function u(){H.tick+=1;H.time=x();k();I.procs.poll()}function m(){A.refresh();k();I.procs.refresh();t&&t.update()}function x(){return(Cb()- -G)/1E3}a=Hb(a);if(!a)return null;var l=a.gl,g=l.getContextAttributes();l.isContextLost();var h=Ib(l,a);if(!h)return null;var r=Eb(),p={vaoCount:0,bufferCount:0,elementsCount:0,framebufferCount:0,shaderCount:0,textureCount:0,cubeCount:0,renderbufferCount:0,maxTextureUnits:0},w=h.extensions,t=$b(l,w),G=Cb(),C=l.drawingBufferWidth,J=l.drawingBufferHeight,H={tick:0,time:0,viewportWidth:C,viewportHeight:J,framebufferWidth:C,framebufferHeight:J,drawingBufferWidth:C,drawingBufferHeight:J,pixelRatio:a.pixelRatio}, -C={elements:null,primitive:4,count:-1,offset:0,instances:-1},M=Yb(l,w),y=Jb(l,p,a,function(a){return K.destroyBuffer(a)}),T=Kb(l,w,y,p),K=Sb(l,w,M,p,y,T,C),F=Tb(l,r,p,a),A=Nb(l,w,M,function(){I.procs.poll()},H,p,a),O=Zb(l,w,M,p,a),S=Rb(l,w,M,A,O,p),I=Wb(l,r,w,M,y,T,A,S,{},K,F,C,H,t,a),r=Ub(l,S,I.procs.poll,H,g,w,M),Q=I.next,N=l.canvas,E=[],R=[],U=[],Z=[a.onDestroy],ca=null;N&&(N.addEventListener("webglcontextlost",f,!1),N.addEventListener("webglcontextrestored",d,!1));var aa=S.setFBO=q({framebuffer:Y.define.call(null, -1,"framebuffer")});m();g=L(q,{clear:function(a){if("framebuffer"in a)if(a.framebuffer&&"framebufferCube"===a.framebuffer_reglType)for(var b=0;6>b;++b)aa(L({framebuffer:a.framebuffer.faces[b]},a),n);else aa(a,n);else n(null,a)},prop:Y.define.bind(null,1),context:Y.define.bind(null,2),"this":Y.define.bind(null,3),draw:q({}),buffer:function(a){return y.create(a,34962,!1,!1)},elements:function(a){return T.create(a,!1)},texture:A.create2D,cube:A.createCube,renderbuffer:O.create,framebuffer:S.create,framebufferCube:S.createCube, -vao:K.createVAO,attributes:g,frame:v,on:function(a,b){var c;switch(a){case "frame":return v(b);case "lost":c=R;break;case "restore":c=U;break;case "destroy":c=Z}c.push(b);return{cancel:function(){for(var a=0;a { - window.vocab = res[0] - d3.select('#graph').html('').datum(jsData).each(drawSentence) -}) - -async function drawSentence({s0, s1, tidyCSV, minYear}, i){ - var tidy = d3.csvParse(jsData.tidyCSV) - - console.log(minYear) - tidy.forEach(d => { - d.year = minYear + +d.year_index - d.i = +d.token_index - d.e0 = +d.e0 - d.e1 = +d.e1 - d.mean = d.e0 + d.e1 - d.dif = d.e0 - d.e1 - }) - - var sel = d3.select(this).st({marginRight: 20}) - sel.append('div').st({color: colors[0]}).text(s0) - sel.append('div').st({color: colors[1]}).text(s1) - var c = d3.conventions({ - sel, - height: 300, - width: 300, - margin: {left: 0, bottom: 50, right: 60} - }) - - c.svg.append('rect') - .at({width: c.width, height: c.height/2, opacity: .1, fill: colors[0]}) - - c.svg.append('rect') - .at({width: c.width, height: c.height/2, opacity: .1, fill: colors[1], y: c.height/2}) - - console.log(tidy[0]) - c.x.domain(d3.extent(tidy, d => d.year)) - - var minY = d3.min(d3.extent(tidy, d => d.dif), Math.abs) - var maxY = d3.max(d3.extent(tidy, d => d.dif), Math.abs) - c.y.domain([-minY, minY]) - c.y.domain([-maxY, maxY]) - // c.y.domain([-4.5, 4.5]) - - c.xAxis.tickFormat(d => d) - c.yAxis.ticks(5) - d3.drawAxis(c) - - var byToken = d3.nestBy(tidy, d => d.i) - byToken.forEach(d => { - d.endY = c.y(_.last(d).dif) - d.str = vocab[+d.key].replace('▁', '') - d.displayLabel = true - d.mean = d3.sum(d, e => e.mean) - }) - console.log(tidy[0]) - - d3.nestBy(_.sortBy(byToken, d => -d.mean), d => Math.round(d.endY/12)) - .forEach(d => d.forEach((e, i) => e.displayLabel = !i)) - - var line = d3.line() - .x(d => c.x(d.year)) - .y(d => c.y(d.dif)) - - var tokenSel = c.svg.appendMany('g', byToken) - // .call(d3.attachTooltip) - .on('mouseover', function(){ - tokenSel.classed('active', 0) - d3.select(this) - .raise() - .classed('active', 1) - }) - - c.svg.on('mouseleave', function(){ - tokenSel.classed('active', 0) - }) - - tokenSel.append('text') - .text(d => d.str) - .translate(d => [c.width + 2, d.endY]) - .at({fontSize: 10, dy: '.33em', fill: (d, i) => d.displayLabel ? '#999' : 'rgba(0,0,0,0)'}) - - tokenSel.append('path') - .at({ - d: line, - stroke: '#000', - opacity: .2, - - fill: 'none', - }) -} - - diff --git a/spaces/mfrashad/ClothingGAN/models/stylegan/stylegan_tf/pretrained_example.py b/spaces/mfrashad/ClothingGAN/models/stylegan/stylegan_tf/pretrained_example.py deleted file mode 100644 index 63baef08bfa4bf34f52a0cf63e10a0b6783ac316..0000000000000000000000000000000000000000 --- a/spaces/mfrashad/ClothingGAN/models/stylegan/stylegan_tf/pretrained_example.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -"""Minimal script for generating an image using pre-trained StyleGAN generator.""" - -import os -import pickle -import numpy as np -import PIL.Image -import dnnlib -import dnnlib.tflib as tflib -import config - -def main(): - # Initialize TensorFlow. - tflib.init_tf() - - # Load pre-trained network. - url = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl - with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f: - _G, _D, Gs = pickle.load(f) - # _G = Instantaneous snapshot of the generator. Mainly useful for resuming a previous training run. - # _D = Instantaneous snapshot of the discriminator. Mainly useful for resuming a previous training run. - # Gs = Long-term average of the generator. Yields higher-quality results than the instantaneous snapshot. - - # Print network details. - Gs.print_layers() - - # Pick latent vector. - rnd = np.random.RandomState(5) - latents = rnd.randn(1, Gs.input_shape[1]) - - # Generate image. - fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True) - images = Gs.run(latents, None, truncation_psi=0.7, randomize_noise=True, output_transform=fmt) - - # Save image. - os.makedirs(config.result_dir, exist_ok=True) - png_filename = os.path.join(config.result_dir, 'example.png') - PIL.Image.fromarray(images[0], 'RGB').save(png_filename) - -if __name__ == "__main__": - main() diff --git a/spaces/mimimibimimimi/ACertainModel/app.py b/spaces/mimimibimimimi/ACertainModel/app.py deleted file mode 100644 index 94a89f53d68bbb3c9310f37a212e0e1ebd7fcb87..0000000000000000000000000000000000000000 --- a/spaces/mimimibimimimi/ACertainModel/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/JosephusCheung/ACertainModel").launch() \ No newline at end of file diff --git a/spaces/mithril-security/blind_chat/src/routes/r/[id]/+page.server.ts b/spaces/mithril-security/blind_chat/src/routes/r/[id]/+page.server.ts deleted file mode 100644 index e09e70bac0c3ec133ea691d4c808160973ef2a7b..0000000000000000000000000000000000000000 --- a/spaces/mithril-security/blind_chat/src/routes/r/[id]/+page.server.ts +++ /dev/null @@ -1,34 +0,0 @@ -import type { PageServerLoad } from "./$types"; -import { collections } from "$lib/server/database"; -import { error } from "@sveltejs/kit"; -import type { WebSearchMessageResult } from "$lib/types/WebSearch"; - -export const load: PageServerLoad = async ({ params }) => { - /*const conversation = await collections.sharedConversations.findOne({ - _id: params.id, - }); - - if (!conversation) { - throw error(404, "Conversation not found"); - } - - const webSearchesId = conversation.messages - .filter((message) => message.webSearchId) - .map((message) => new ObjectId(message.webSearchId)); - - const results = await collections.webSearches.find({ _id: { $in: webSearchesId } }).toArray(); - - const searches = Object.fromEntries( - results.map((x) => [ - x._id.toString(), - [...x.messages, { type: "result", id: x._id.toString() } satisfies WebSearchMessageResult], - ]) - ); - - return { - messages: conversation.messages, - title: conversation.title, - model: conversation.model, - searches, - };*/ -}; diff --git a/spaces/miyaaa666/bingo/cloudflare/worker.js b/spaces/miyaaa666/bingo/cloudflare/worker.js deleted file mode 100644 index e0debd750615f1329b2c72fbce73e1b9291f7137..0000000000000000000000000000000000000000 --- a/spaces/miyaaa666/bingo/cloudflare/worker.js +++ /dev/null @@ -1,18 +0,0 @@ -const TRAGET_HOST='hf4all-bingo.hf.space' // 请将此域名改成你自己的,域名信息在设置》站点域名查看。 - -export default { - async fetch(request) { - const uri = new URL(request.url); - if (uri.protocol === 'http:') { - uri.protocol = 'https:'; - return new Response('', { - status: 301, - headers: { - location: uri.toString(), - }, - }) - } - uri.host = TRAGET_HOST - return fetch(new Request(uri.toString(), request)); - }, -}; diff --git a/spaces/mms-meta/MMS/vits/utils.py b/spaces/mms-meta/MMS/vits/utils.py deleted file mode 100644 index b445fb65836a0b97e46426300eea9a820179797a..0000000000000000000000000000000000000000 --- a/spaces/mms-meta/MMS/vits/utils.py +++ /dev/null @@ -1,258 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict= {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})" .format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict(), - 'learning_rate': learning_rate}, checkpoint_path) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10,2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/noisychannel/__init__.py b/spaces/mshukor/UnIVAL/fairseq/examples/noisychannel/__init__.py deleted file mode 100644 index 89f1aef4f6328d25425e0bcabb42dfffd2ed35f0..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/noisychannel/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .rerank_options import * # noqa diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/speech_recognition/data/replabels.py b/spaces/mshukor/UnIVAL/fairseq/examples/speech_recognition/data/replabels.py deleted file mode 100644 index 441f1bd432b95865fc981c6c695cee299b07ed62..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/speech_recognition/data/replabels.py +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -""" -Replabel transforms for use with flashlight's ASG criterion. -""" - - -def replabel_symbol(i): - """ - Replabel symbols used in flashlight, currently just "1", "2", ... - This prevents training with numeral tokens, so this might change in the future - """ - return str(i) - - -def pack_replabels(tokens, dictionary, max_reps): - """ - Pack a token sequence so that repeated symbols are replaced by replabels - """ - if len(tokens) == 0 or max_reps <= 0: - return tokens - - replabel_value_to_idx = [0] * (max_reps + 1) - for i in range(1, max_reps + 1): - replabel_value_to_idx[i] = dictionary.index(replabel_symbol(i)) - - result = [] - prev_token = -1 - num_reps = 0 - for token in tokens: - if token == prev_token and num_reps < max_reps: - num_reps += 1 - else: - if num_reps > 0: - result.append(replabel_value_to_idx[num_reps]) - num_reps = 0 - result.append(token) - prev_token = token - if num_reps > 0: - result.append(replabel_value_to_idx[num_reps]) - return result - - -def unpack_replabels(tokens, dictionary, max_reps): - """ - Unpack a token sequence so that replabels are replaced by repeated symbols - """ - if len(tokens) == 0 or max_reps <= 0: - return tokens - - replabel_idx_to_value = {} - for i in range(1, max_reps + 1): - replabel_idx_to_value[dictionary.index(replabel_symbol(i))] = i - - result = [] - prev_token = -1 - for token in tokens: - try: - for _ in range(replabel_idx_to_value[token]): - result.append(prev_token) - prev_token = -1 - except KeyError: - result.append(token) - prev_token = token - return result diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/data/replace_dataset.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/data/replace_dataset.py deleted file mode 100644 index 5aac2ba96bee0a8bb65f4c9e56fa0b17248ee1d9..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/data/replace_dataset.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from . import BaseWrapperDataset - - -class ReplaceDataset(BaseWrapperDataset): - """Replaces tokens found in the dataset by a specified replacement token - - Args: - dataset (~torch.utils.data.Dataset): dataset to replace tokens in - replace_map(Dictionary[int,int]): map of token to replace -> replacement token - offsets (List[int]): do not replace tokens before (from left if pos, right if neg) this offset. should be - as many as the number of objects returned by the underlying dataset __getitem__ method. - """ - - def __init__(self, dataset, replace_map, offsets): - super().__init__(dataset) - assert len(replace_map) > 0 - self.replace_map = replace_map - self.offsets = offsets - - def __getitem__(self, index): - item = self.dataset[index] - is_tuple = isinstance(item, tuple) - srcs = item if is_tuple else [item] - - for offset, src in zip(self.offsets, srcs): - for k, v in self.replace_map.items(): - src_off = src[offset:] if offset >= 0 else src[:offset] - src_off.masked_fill_(src_off == k, v) - - item = srcs if is_tuple else srcs[0] - return item diff --git a/spaces/mueller-franzes/medfusion-app/medical_diffusion/external/diffusers/unet_blocks.py b/spaces/mueller-franzes/medfusion-app/medical_diffusion/external/diffusers/unet_blocks.py deleted file mode 100644 index a895d520afad8e325a5387368f4c21d2c29cf4e5..0000000000000000000000000000000000000000 --- a/spaces/mueller-franzes/medfusion-app/medical_diffusion/external/diffusers/unet_blocks.py +++ /dev/null @@ -1,1557 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and - -import numpy as np - -# limitations under the License. -import torch -from torch import nn - -from .attention import AttentionBlock, SpatialTransformer -from .resnet import Downsample2D, FirDownsample2D, FirUpsample2D, ResnetBlock2D, Upsample2D - - -def get_down_block( - down_block_type, - num_layers, - in_channels, - out_channels, - temb_channels, - add_downsample, - resnet_eps, - resnet_act_fn, - attn_num_head_channels, - resnet_groups=None, - cross_attention_dim=None, - downsample_padding=None, -): - down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type - if down_block_type == "DownBlock2D": - return DownBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - downsample_padding=downsample_padding, - ) - elif down_block_type == "AttnDownBlock2D": - return AttnDownBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - downsample_padding=downsample_padding, - attn_num_head_channels=attn_num_head_channels, - ) - elif down_block_type == "CrossAttnDownBlock2D": - if cross_attention_dim is None: - raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock2D") - return CrossAttnDownBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - downsample_padding=downsample_padding, - cross_attention_dim=cross_attention_dim, - attn_num_head_channels=attn_num_head_channels, - ) - elif down_block_type == "SkipDownBlock2D": - return SkipDownBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - downsample_padding=downsample_padding, - ) - elif down_block_type == "AttnSkipDownBlock2D": - return AttnSkipDownBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - downsample_padding=downsample_padding, - attn_num_head_channels=attn_num_head_channels, - ) - elif down_block_type == "DownEncoderBlock2D": - return DownEncoderBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - downsample_padding=downsample_padding, - ) - - -def get_up_block( - up_block_type, - num_layers, - in_channels, - out_channels, - prev_output_channel, - temb_channels, - add_upsample, - resnet_eps, - resnet_act_fn, - attn_num_head_channels, - resnet_groups=None, - cross_attention_dim=None, -): - up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type - if up_block_type == "UpBlock2D": - return UpBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - prev_output_channel=prev_output_channel, - temb_channels=temb_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - ) - elif up_block_type == "CrossAttnUpBlock2D": - if cross_attention_dim is None: - raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock2D") - return CrossAttnUpBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - prev_output_channel=prev_output_channel, - temb_channels=temb_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - cross_attention_dim=cross_attention_dim, - attn_num_head_channels=attn_num_head_channels, - ) - elif up_block_type == "AttnUpBlock2D": - return AttnUpBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - prev_output_channel=prev_output_channel, - temb_channels=temb_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - attn_num_head_channels=attn_num_head_channels, - ) - elif up_block_type == "SkipUpBlock2D": - return SkipUpBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - prev_output_channel=prev_output_channel, - temb_channels=temb_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - ) - elif up_block_type == "AttnSkipUpBlock2D": - return AttnSkipUpBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - prev_output_channel=prev_output_channel, - temb_channels=temb_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - attn_num_head_channels=attn_num_head_channels, - ) - elif up_block_type == "UpDecoderBlock2D": - return UpDecoderBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - ) - raise ValueError(f"{up_block_type} does not exist.") - - -class UNetMidBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - attention_type="default", - output_scale_factor=1.0, - **kwargs, - ): - super().__init__() - - self.attention_type = attention_type - resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) - - # there is always at least one resnet - resnets = [ - ResnetBlock2D( - in_channels=in_channels, - out_channels=in_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ] - attentions = [] - - for _ in range(num_layers): - attentions.append( - AttentionBlock( - in_channels, - num_head_channels=attn_num_head_channels, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - num_groups=resnet_groups, - ) - ) - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=in_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - def forward(self, hidden_states, temb=None, encoder_states=None): - hidden_states = self.resnets[0](hidden_states, temb) - for attn, resnet in zip(self.attentions, self.resnets[1:]): - if self.attention_type == "default": - hidden_states = attn(hidden_states) - else: - hidden_states = attn(hidden_states, encoder_states) - hidden_states = resnet(hidden_states, temb) - - return hidden_states - - -class UNetMidBlock2DCrossAttn(nn.Module): - def __init__( - self, - in_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - attention_type="default", - output_scale_factor=1.0, - cross_attention_dim=1280, - **kwargs, - ): - super().__init__() - - self.attention_type = attention_type - self.attn_num_head_channels = attn_num_head_channels - resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) - - # there is always at least one resnet - resnets = [ - ResnetBlock2D( - in_channels=in_channels, - out_channels=in_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ] - attentions = [] - - for _ in range(num_layers): - attentions.append( - SpatialTransformer( - in_channels, - attn_num_head_channels, - in_channels // attn_num_head_channels, - depth=1, - context_dim=cross_attention_dim, - num_groups=resnet_groups, - ) - ) - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=in_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - def set_attention_slice(self, slice_size): - if slice_size is not None and self.attn_num_head_channels % slice_size != 0: - raise ValueError( - f"Make sure slice_size {slice_size} is a divisor of " - f"the number of heads used in cross_attention {self.attn_num_head_channels}" - ) - if slice_size is not None and slice_size > self.attn_num_head_channels: - raise ValueError( - f"Chunk_size {slice_size} has to be smaller or equal to " - f"the number of heads used in cross_attention {self.attn_num_head_channels}" - ) - - for attn in self.attentions: - attn._set_attention_slice(slice_size) - - def forward(self, hidden_states, temb=None, encoder_hidden_states=None): - hidden_states = self.resnets[0](hidden_states, temb) - for attn, resnet in zip(self.attentions, self.resnets[1:]): - hidden_states = attn(hidden_states, encoder_hidden_states) - hidden_states = resnet(hidden_states, temb) - - return hidden_states - - -class AttnDownBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - attention_type="default", - output_scale_factor=1.0, - downsample_padding=1, - add_downsample=True, - ): - super().__init__() - resnets = [] - attentions = [] - - self.attention_type = attention_type - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - attentions.append( - AttentionBlock( - out_channels, - num_head_channels=attn_num_head_channels, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - num_groups=resnet_groups, - ) - ) - - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - if add_downsample: - self.downsamplers = nn.ModuleList( - [ - Downsample2D( - in_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" - ) - ] - ) - else: - self.downsamplers = None - - def forward(self, hidden_states, temb=None): - output_states = () - - for resnet, attn in zip(self.resnets, self.attentions): - hidden_states = resnet(hidden_states, temb) - hidden_states = attn(hidden_states) - output_states += (hidden_states,) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - output_states += (hidden_states,) - - return hidden_states, output_states - - -class CrossAttnDownBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - cross_attention_dim=1280, - attention_type="default", - output_scale_factor=1.0, - downsample_padding=1, - add_downsample=True, - ): - super().__init__() - resnets = [] - attentions = [] - - self.attention_type = attention_type - self.attn_num_head_channels = attn_num_head_channels - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - attentions.append( - SpatialTransformer( - out_channels, - attn_num_head_channels, - out_channels // attn_num_head_channels, - depth=1, - context_dim=cross_attention_dim, - num_groups=resnet_groups, - ) - ) - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - if add_downsample: - self.downsamplers = nn.ModuleList( - [ - Downsample2D( - in_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" - ) - ] - ) - else: - self.downsamplers = None - - self.gradient_checkpointing = False - - def set_attention_slice(self, slice_size): - if slice_size is not None and self.attn_num_head_channels % slice_size != 0: - raise ValueError( - f"Make sure slice_size {slice_size} is a divisor of " - f"the number of heads used in cross_attention {self.attn_num_head_channels}" - ) - if slice_size is not None and slice_size > self.attn_num_head_channels: - raise ValueError( - f"Chunk_size {slice_size} has to be smaller or equal to " - f"the number of heads used in cross_attention {self.attn_num_head_channels}" - ) - - for attn in self.attentions: - attn._set_attention_slice(slice_size) - - def forward(self, hidden_states, temb=None, encoder_hidden_states=None): - output_states = () - - for resnet, attn in zip(self.resnets, self.attentions): - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs) - - return custom_forward - - hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(attn), hidden_states, encoder_hidden_states - ) - else: - hidden_states = resnet(hidden_states, temb) - hidden_states = attn(hidden_states, context=encoder_hidden_states) - - output_states += (hidden_states,) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - output_states += (hidden_states,) - - return hidden_states, output_states - - -class DownBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - output_scale_factor=1.0, - add_downsample=True, - downsample_padding=1, - ): - super().__init__() - resnets = [] - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.resnets = nn.ModuleList(resnets) - - if add_downsample: - self.downsamplers = nn.ModuleList( - [ - Downsample2D( - in_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" - ) - ] - ) - else: - self.downsamplers = None - - self.gradient_checkpointing = False - - def forward(self, hidden_states, temb=None): - output_states = () - - for resnet in self.resnets: - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs) - - return custom_forward - - hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) - else: - hidden_states = resnet(hidden_states, temb) - - output_states += (hidden_states,) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - output_states += (hidden_states,) - - return hidden_states, output_states - - -class DownEncoderBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - output_scale_factor=1.0, - add_downsample=True, - downsample_padding=1, - ): - super().__init__() - resnets = [] - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=None, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.resnets = nn.ModuleList(resnets) - - if add_downsample: - self.downsamplers = nn.ModuleList( - [ - Downsample2D( - out_channels if len(resnets)>0 else in_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" - ) - ] - ) - else: - self.downsamplers = None - - def forward(self, hidden_states): - for resnet in self.resnets: - hidden_states = resnet(hidden_states, temb=None) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - return hidden_states - - -class AttnDownEncoderBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - output_scale_factor=1.0, - add_downsample=True, - downsample_padding=1, - ): - super().__init__() - resnets = [] - attentions = [] - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=None, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - attentions.append( - AttentionBlock( - out_channels, - num_head_channels=attn_num_head_channels, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - num_groups=resnet_groups, - ) - ) - - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - if add_downsample: - self.downsamplers = nn.ModuleList( - [ - Downsample2D( - in_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" - ) - ] - ) - else: - self.downsamplers = None - - def forward(self, hidden_states): - for resnet, attn in zip(self.resnets, self.attentions): - hidden_states = resnet(hidden_states, temb=None) - hidden_states = attn(hidden_states) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - return hidden_states - - -class AttnSkipDownBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - attention_type="default", - output_scale_factor=np.sqrt(2.0), - downsample_padding=1, - add_downsample=True, - ): - super().__init__() - self.attentions = nn.ModuleList([]) - self.resnets = nn.ModuleList([]) - - self.attention_type = attention_type - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - self.resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(in_channels // 4, 32), - groups_out=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - self.attentions.append( - AttentionBlock( - out_channels, - num_head_channels=attn_num_head_channels, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - ) - ) - - if add_downsample: - self.resnet_down = ResnetBlock2D( - in_channels=out_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - use_in_shortcut=True, - down=True, - kernel="fir", - ) - self.downsamplers = nn.ModuleList([FirDownsample2D(in_channels, out_channels=out_channels)]) - self.skip_conv = nn.Conv2d(3, out_channels, kernel_size=(1, 1), stride=(1, 1)) - else: - self.resnet_down = None - self.downsamplers = None - self.skip_conv = None - - def forward(self, hidden_states, temb=None, skip_sample=None): - output_states = () - - for resnet, attn in zip(self.resnets, self.attentions): - hidden_states = resnet(hidden_states, temb) - hidden_states = attn(hidden_states) - output_states += (hidden_states,) - - if self.downsamplers is not None: - hidden_states = self.resnet_down(hidden_states, temb) - for downsampler in self.downsamplers: - skip_sample = downsampler(skip_sample) - - hidden_states = self.skip_conv(skip_sample) + hidden_states - - output_states += (hidden_states,) - - return hidden_states, output_states, skip_sample - - -class SkipDownBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_pre_norm: bool = True, - output_scale_factor=np.sqrt(2.0), - add_downsample=True, - downsample_padding=1, - ): - super().__init__() - self.resnets = nn.ModuleList([]) - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - self.resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(in_channels // 4, 32), - groups_out=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - if add_downsample: - self.resnet_down = ResnetBlock2D( - in_channels=out_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - use_in_shortcut=True, - down=True, - kernel="fir", - ) - self.downsamplers = nn.ModuleList([FirDownsample2D(in_channels, out_channels=out_channels)]) - self.skip_conv = nn.Conv2d(3, out_channels, kernel_size=(1, 1), stride=(1, 1)) - else: - self.resnet_down = None - self.downsamplers = None - self.skip_conv = None - - def forward(self, hidden_states, temb=None, skip_sample=None): - output_states = () - - for resnet in self.resnets: - hidden_states = resnet(hidden_states, temb) - output_states += (hidden_states,) - - if self.downsamplers is not None: - hidden_states = self.resnet_down(hidden_states, temb) - for downsampler in self.downsamplers: - skip_sample = downsampler(skip_sample) - - hidden_states = self.skip_conv(skip_sample) + hidden_states - - output_states += (hidden_states,) - - return hidden_states, output_states, skip_sample - - -class AttnUpBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - prev_output_channel: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attention_type="default", - attn_num_head_channels=1, - output_scale_factor=1.0, - add_upsample=True, - ): - super().__init__() - resnets = [] - attentions = [] - - self.attention_type = attention_type - - for i in range(num_layers): - res_skip_channels = in_channels if (i == num_layers - 1) else out_channels - resnet_in_channels = prev_output_channel if i == 0 else out_channels - - resnets.append( - ResnetBlock2D( - in_channels=resnet_in_channels + res_skip_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - attentions.append( - AttentionBlock( - out_channels, - num_head_channels=attn_num_head_channels, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - num_groups=resnet_groups, - ) - ) - - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - if add_upsample: - self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) - else: - self.upsamplers = None - - def forward(self, hidden_states, res_hidden_states_tuple, temb=None): - for resnet, attn in zip(self.resnets, self.attentions): - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - - hidden_states = resnet(hidden_states, temb) - hidden_states = attn(hidden_states) - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states) - - return hidden_states - - -class CrossAttnUpBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - prev_output_channel: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - cross_attention_dim=1280, - attention_type="default", - output_scale_factor=1.0, - downsample_padding=1, - add_upsample=True, - ): - super().__init__() - resnets = [] - attentions = [] - - self.attention_type = attention_type - self.attn_num_head_channels = attn_num_head_channels - - for i in range(num_layers): - res_skip_channels = in_channels if (i == num_layers - 1) else out_channels - resnet_in_channels = prev_output_channel if i == 0 else out_channels - - resnets.append( - ResnetBlock2D( - in_channels=resnet_in_channels + res_skip_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - attentions.append( - SpatialTransformer( - out_channels, - attn_num_head_channels, - out_channels // attn_num_head_channels, - depth=1, - context_dim=cross_attention_dim, - num_groups=resnet_groups, - ) - ) - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - if add_upsample: - self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) - else: - self.upsamplers = None - - self.gradient_checkpointing = False - - def set_attention_slice(self, slice_size): - if slice_size is not None and self.attn_num_head_channels % slice_size != 0: - raise ValueError( - f"Make sure slice_size {slice_size} is a divisor of " - f"the number of heads used in cross_attention {self.attn_num_head_channels}" - ) - if slice_size is not None and slice_size > self.attn_num_head_channels: - raise ValueError( - f"Chunk_size {slice_size} has to be smaller or equal to " - f"the number of heads used in cross_attention {self.attn_num_head_channels}" - ) - - for attn in self.attentions: - attn._set_attention_slice(slice_size) - - self.gradient_checkpointing = False - - def forward( - self, - hidden_states, - res_hidden_states_tuple, - temb=None, - encoder_hidden_states=None, - ): - for resnet, attn in zip(self.resnets, self.attentions): - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs) - - return custom_forward - - hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(attn), hidden_states, encoder_hidden_states - ) - else: - hidden_states = resnet(hidden_states, temb) - hidden_states = attn(hidden_states, context=encoder_hidden_states) - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states) - - return hidden_states - - -class UpBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - prev_output_channel: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - output_scale_factor=1.0, - add_upsample=True, - ): - super().__init__() - resnets = [] - - for i in range(num_layers): - res_skip_channels = in_channels if (i == num_layers - 1) else out_channels - resnet_in_channels = prev_output_channel if i == 0 else out_channels - - resnets.append( - ResnetBlock2D( - in_channels=resnet_in_channels + res_skip_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.resnets = nn.ModuleList(resnets) - - if add_upsample: - self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) - else: - self.upsamplers = None - - self.gradient_checkpointing = False - - def forward(self, hidden_states, res_hidden_states_tuple, temb=None): - for resnet in self.resnets: - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs) - - return custom_forward - - hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) - else: - hidden_states = resnet(hidden_states, temb) - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states) - - return hidden_states - - -class UpDecoderBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - output_scale_factor=1.0, - add_upsample=True, - ): - super().__init__() - resnets = [] - - for i in range(num_layers): - input_channels = in_channels if i == 0 else out_channels - - resnets.append( - ResnetBlock2D( - in_channels=input_channels, - out_channels=out_channels, - temb_channels=None, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.resnets = nn.ModuleList(resnets) - - if add_upsample: - self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) - else: - self.upsamplers = None - - def forward(self, hidden_states): - for resnet in self.resnets: - hidden_states = resnet(hidden_states, temb=None) - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states) - - return hidden_states - - -class AttnUpDecoderBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - output_scale_factor=1.0, - add_upsample=True, - ): - super().__init__() - resnets = [] - attentions = [] - - for i in range(num_layers): - input_channels = in_channels if i == 0 else out_channels - - resnets.append( - ResnetBlock2D( - in_channels=input_channels, - out_channels=out_channels, - temb_channels=None, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - attentions.append( - AttentionBlock( - out_channels, - num_head_channels=attn_num_head_channels, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - num_groups=resnet_groups, - ) - ) - - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - if add_upsample: - self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) - else: - self.upsamplers = None - - def forward(self, hidden_states): - for resnet, attn in zip(self.resnets, self.attentions): - hidden_states = resnet(hidden_states, temb=None) - hidden_states = attn(hidden_states) - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states) - - return hidden_states - - -class AttnSkipUpBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - prev_output_channel: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - attention_type="default", - output_scale_factor=np.sqrt(2.0), - upsample_padding=1, - add_upsample=True, - ): - super().__init__() - self.attentions = nn.ModuleList([]) - self.resnets = nn.ModuleList([]) - - self.attention_type = attention_type - - for i in range(num_layers): - res_skip_channels = in_channels if (i == num_layers - 1) else out_channels - resnet_in_channels = prev_output_channel if i == 0 else out_channels - - self.resnets.append( - ResnetBlock2D( - in_channels=resnet_in_channels + res_skip_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(resnet_in_channels + res_skip_channels // 4, 32), - groups_out=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.attentions.append( - AttentionBlock( - out_channels, - num_head_channels=attn_num_head_channels, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - ) - ) - - self.upsampler = FirUpsample2D(in_channels, out_channels=out_channels) - if add_upsample: - self.resnet_up = ResnetBlock2D( - in_channels=out_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(out_channels // 4, 32), - groups_out=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - use_in_shortcut=True, - up=True, - kernel="fir", - ) - self.skip_conv = nn.Conv2d(out_channels, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - self.skip_norm = torch.nn.GroupNorm( - num_groups=min(out_channels // 4, 32), num_channels=out_channels, eps=resnet_eps, affine=True - ) - self.act = nn.SiLU() - else: - self.resnet_up = None - self.skip_conv = None - self.skip_norm = None - self.act = None - - def forward(self, hidden_states, res_hidden_states_tuple, temb=None, skip_sample=None): - for resnet in self.resnets: - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - - hidden_states = resnet(hidden_states, temb) - - hidden_states = self.attentions[0](hidden_states) - - if skip_sample is not None: - skip_sample = self.upsampler(skip_sample) - else: - skip_sample = 0 - - if self.resnet_up is not None: - skip_sample_states = self.skip_norm(hidden_states) - skip_sample_states = self.act(skip_sample_states) - skip_sample_states = self.skip_conv(skip_sample_states) - - skip_sample = skip_sample + skip_sample_states - - hidden_states = self.resnet_up(hidden_states, temb) - - return hidden_states, skip_sample - - -class SkipUpBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - prev_output_channel: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_pre_norm: bool = True, - output_scale_factor=np.sqrt(2.0), - add_upsample=True, - upsample_padding=1, - ): - super().__init__() - self.resnets = nn.ModuleList([]) - - for i in range(num_layers): - res_skip_channels = in_channels if (i == num_layers - 1) else out_channels - resnet_in_channels = prev_output_channel if i == 0 else out_channels - - self.resnets.append( - ResnetBlock2D( - in_channels=resnet_in_channels + res_skip_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min((resnet_in_channels + res_skip_channels) // 4, 32), - groups_out=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.upsampler = FirUpsample2D(in_channels, out_channels=out_channels) - if add_upsample: - self.resnet_up = ResnetBlock2D( - in_channels=out_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(out_channels // 4, 32), - groups_out=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - use_in_shortcut=True, - up=True, - kernel="fir", - ) - self.skip_conv = nn.Conv2d(out_channels, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - self.skip_norm = torch.nn.GroupNorm( - num_groups=min(out_channels // 4, 32), num_channels=out_channels, eps=resnet_eps, affine=True - ) - self.act = nn.SiLU() - else: - self.resnet_up = None - self.skip_conv = None - self.skip_norm = None - self.act = None - - def forward(self, hidden_states, res_hidden_states_tuple, temb=None, skip_sample=None): - for resnet in self.resnets: - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - - hidden_states = resnet(hidden_states, temb) - - if skip_sample is not None: - skip_sample = self.upsampler(skip_sample) - else: - skip_sample = 0 - - if self.resnet_up is not None: - skip_sample_states = self.skip_norm(hidden_states) - skip_sample_states = self.act(skip_sample_states) - skip_sample_states = self.skip_conv(skip_sample_states) - - skip_sample = skip_sample + skip_sample_states - - hidden_states = self.resnet_up(hidden_states, temb) - - return hidden_states, skip_sample diff --git a/spaces/muellerzr/accelerate-presentation/Accelerate_files/libs/revealjs/plugin/reveal-menu/menu.css b/spaces/muellerzr/accelerate-presentation/Accelerate_files/libs/revealjs/plugin/reveal-menu/menu.css deleted file mode 100644 index 5a300fdf63f758dc86e6d9f2ad8b543ea444cd9f..0000000000000000000000000000000000000000 --- a/spaces/muellerzr/accelerate-presentation/Accelerate_files/libs/revealjs/plugin/reveal-menu/menu.css +++ /dev/null @@ -1,346 +0,0 @@ -.slide-menu-wrapper { - font-family: 'Source Sans Pro', Helvetica, sans-serif; -} - -.slide-menu-wrapper .slide-menu { - background-color: #333; - z-index: 200; - position: fixed; - top: 0; - width: 300px; - height: 100%; - /*overflow-y: scroll;*/ - transition: transform 0.3s; - font-size: 16px; - font-weight: normal; -} - -.slide-menu-wrapper .slide-menu.slide-menu--wide { - width: 500px; -} - -.slide-menu-wrapper .slide-menu.slide-menu--third { - width: 33%; -} - -.slide-menu-wrapper .slide-menu.slide-menu--half { - width: 50%; -} - -.slide-menu-wrapper .slide-menu.slide-menu--full { - width: 95%; -} - -/* - * Slides menu - */ - -.slide-menu-wrapper .slide-menu-items { - margin: 0; - padding: 0; - width: 100%; - border-bottom: solid 1px #555; -} - -.slide-menu-wrapper .slide-menu-item, -.slide-menu-wrapper .slide-menu-item-vertical { - display: block; - text-align: left; - padding: 10px 18px; - color: #aaa; - cursor: pointer; -} - -.slide-menu-wrapper .slide-menu-item-vertical { - padding-left: 30px; -} - -.slide-menu-wrapper .slide-menu--wide .slide-menu-item-vertical, -.slide-menu-wrapper .slide-menu--third .slide-menu-item-vertical, -.slide-menu-wrapper .slide-menu--half .slide-menu-item-vertical, -.slide-menu-wrapper .slide-menu--full .slide-menu-item-vertical, -.slide-menu-wrapper .slide-menu--custom .slide-menu-item-vertical { - padding-left: 50px; -} - -.slide-menu-wrapper .slide-menu-item { - border-top: solid 1px #555; -} - -.slide-menu-wrapper .active-menu-panel li.selected { - background-color: #222; - color: white; -} - -.slide-menu-wrapper .active-menu-panel li.active { - color: #eee; -} - -.slide-menu-wrapper .slide-menu-item.no-title .slide-menu-item-title, -.slide-menu-wrapper .slide-menu-item-vertical.no-title .slide-menu-item-title { - font-style: italic; -} - -.slide-menu-wrapper .slide-menu-item-number { - color: #999; - padding-right: 6px; -} - -.slide-menu-wrapper .slide-menu-item i.far, -.slide-menu-wrapper .slide-menu-item i.fas, -.slide-menu-wrapper .slide-menu-item-vertical i.far, -.slide-menu-wrapper .slide-menu-item-vertical i.fas, -.slide-menu-wrapper .slide-menu-item svg.svg-inline--fa, -.slide-menu-wrapper .slide-menu-item-vertical svg.svg-inline--fa { - padding-right: 12px; - display: none; -} - -.slide-menu-wrapper .slide-menu-item.past i.fas.past, -.slide-menu-wrapper .slide-menu-item-vertical.past i.fas.past, -.slide-menu-wrapper .slide-menu-item.active i.fas.active, -.slide-menu-wrapper .slide-menu-item-vertical.active i.fas.active, -.slide-menu-wrapper .slide-menu-item.future i.far.future, -.slide-menu-wrapper .slide-menu-item-vertical.future i.far.future, -.slide-menu-wrapper .slide-menu-item.past svg.svg-inline--fa.past, -.slide-menu-wrapper .slide-menu-item-vertical.past svg.svg-inline--fa.past, -.slide-menu-wrapper .slide-menu-item.active svg.svg-inline--fa.active, -.slide-menu-wrapper .slide-menu-item-vertical.active svg.svg-inline--fa.active, -.slide-menu-wrapper .slide-menu-item.future svg.svg-inline--fa.future, -.slide-menu-wrapper .slide-menu-item-vertical.future svg.svg-inline--fa.future { - display: inline-block; -} - -.slide-menu-wrapper .slide-menu-item.past i.fas.past, -.slide-menu-wrapper .slide-menu-item-vertical.past i.fas.past, -.slide-menu-wrapper .slide-menu-item.future i.far.future, -.slide-menu-wrapper .slide-menu-item-vertical.future i.far.future, -.slide-menu-wrapper .slide-menu-item.past svg.svg-inline--fa.past, -.slide-menu-wrapper .slide-menu-item-vertical.past svg.svg-inline--fa.past, -.slide-menu-wrapper .slide-menu-item.future svg.svg-inline--fa.future, -.slide-menu-wrapper .slide-menu-item-vertical.future svg.svg-inline--fa.future { - opacity: 0.4; -} - -.slide-menu-wrapper .slide-menu-item.active i.fas.active, -.slide-menu-wrapper .slide-menu-item-vertical.active i.fas.active, -.slide-menu-wrapper .slide-menu-item.active svg.svg-inline--fa.active, -.slide-menu-wrapper .slide-menu-item-vertical.active svg.svg-inline--fa.active { - opacity: 0.8; -} - -.slide-menu-wrapper .slide-menu--left { - left: 0; - -webkit-transform: translateX(-100%); - -ms-transform: translateX(-100%); - transform: translateX(-100%); -} - -.slide-menu-wrapper .slide-menu--left.active { - -webkit-transform: translateX(0); - -ms-transform: translateX(0); - transform: translateX(0); -} - -.slide-menu-wrapper .slide-menu--right { - right: 0; - -webkit-transform: translateX(100%); - -ms-transform: translateX(100%); - transform: translateX(100%); -} - -.slide-menu-wrapper .slide-menu--right.active { - -webkit-transform: translateX(0); - -ms-transform: translateX(0); - transform: translateX(0); -} - -.slide-menu-wrapper { - transition: transform 0.3s; -} - -/* - * Toolbar - */ -.slide-menu-wrapper .slide-menu-toolbar { - height: 60px; - width: 100%; - font-size: 12px; - display: table; - table-layout: fixed; /* ensures equal width */ - margin: 0; - padding: 0; - border-bottom: solid 2px #666; -} - -.slide-menu-wrapper .slide-menu-toolbar > li { - display: table-cell; - line-height: 150%; - text-align: center; - vertical-align: middle; - cursor: pointer; - color: #aaa; - border-radius: 3px; -} - -.slide-menu-wrapper .slide-menu-toolbar > li.toolbar-panel-button i, -.slide-menu-wrapper - .slide-menu-toolbar - > li.toolbar-panel-button - svg.svg-inline--fa { - font-size: 1.7em; -} - -.slide-menu-wrapper .slide-menu-toolbar > li.active-toolbar-button { - color: white; - text-shadow: 0 1px black; - text-decoration: underline; -} - -.slide-menu-toolbar > li.toolbar-panel-button:hover { - color: white; -} - -.slide-menu-toolbar - > li.toolbar-panel-button:hover - span.slide-menu-toolbar-label, -.slide-menu-wrapper - .slide-menu-toolbar - > li.active-toolbar-button - span.slide-menu-toolbar-label { - visibility: visible; -} - -/* - * Panels - */ -.slide-menu-wrapper .slide-menu-panel { - position: absolute; - width: 100%; - visibility: hidden; - height: calc(100% - 60px); - overflow-x: hidden; - overflow-y: auto; - color: #aaa; -} - -.slide-menu-wrapper .slide-menu-panel.active-menu-panel { - visibility: visible; -} - -.slide-menu-wrapper .slide-menu-panel h1, -.slide-menu-wrapper .slide-menu-panel h2, -.slide-menu-wrapper .slide-menu-panel h3, -.slide-menu-wrapper .slide-menu-panel h4, -.slide-menu-wrapper .slide-menu-panel h5, -.slide-menu-wrapper .slide-menu-panel h6 { - margin: 20px 0 10px 0; - color: #fff; - line-height: 1.2; - letter-spacing: normal; - text-shadow: none; -} - -.slide-menu-wrapper .slide-menu-panel h1 { - font-size: 1.6em; -} -.slide-menu-wrapper .slide-menu-panel h2 { - font-size: 1.4em; -} -.slide-menu-wrapper .slide-menu-panel h3 { - font-size: 1.3em; -} -.slide-menu-wrapper .slide-menu-panel h4 { - font-size: 1.1em; -} -.slide-menu-wrapper .slide-menu-panel h5 { - font-size: 1em; -} -.slide-menu-wrapper .slide-menu-panel h6 { - font-size: 0.9em; -} - -.slide-menu-wrapper .slide-menu-panel p { - margin: 10px 0 5px 0; -} - -.slide-menu-wrapper .slide-menu-panel a { - color: #ccc; - text-decoration: underline; -} - -.slide-menu-wrapper .slide-menu-panel a:hover { - color: white; -} - -.slide-menu-wrapper .slide-menu-item a { - text-decoration: none; -} - -.slide-menu-wrapper .slide-menu-custom-panel { - width: calc(100% - 20px); - padding-left: 10px; - padding-right: 10px; -} - -.slide-menu-wrapper .slide-menu-custom-panel .slide-menu-items { - width: calc(100% + 20px); - margin-left: -10px; - margin-right: 10px; -} - -/* - * Theme and Transitions buttons - */ - -.slide-menu-wrapper div[data-panel='Themes'] li, -.slide-menu-wrapper div[data-panel='Transitions'] li { - display: block; - text-align: left; - cursor: pointer; - color: #848484; -} - -/* - * Menu controls - */ -.reveal .slide-menu-button { - position: fixed; - left: 30px; - bottom: 30px; - z-index: 30; - font-size: 24px; -} - -/* - * Menu overlay - */ - -.slide-menu-wrapper .slide-menu-overlay { - position: fixed; - z-index: 199; - top: 0; - left: 0; - overflow: hidden; - width: 0; - height: 0; - background-color: #000; - opacity: 0; - transition: opacity 0.3s, width 0s 0.3s, height 0s 0.3s; -} - -.slide-menu-wrapper .slide-menu-overlay.active { - width: 100%; - height: 100%; - opacity: 0.7; - transition: opacity 0.3s; -} - -/* - * Hide menu for pdf printing - */ -body.print-pdf .slide-menu-wrapper .slide-menu, -body.print-pdf .reveal .slide-menu-button, -body.print-pdf .slide-menu-wrapper .slide-menu-overlay { - display: none; -} diff --git a/spaces/mygyasir/Stable-Diffusion-Fast111/app.py b/spaces/mygyasir/Stable-Diffusion-Fast111/app.py deleted file mode 100644 index 00583e4f526ab76464ff5cbe8c36413f953307da..0000000000000000000000000000000000000000 --- a/spaces/mygyasir/Stable-Diffusion-Fast111/app.py +++ /dev/null @@ -1,5 +0,0 @@ -import gradio as gr - -interface = gr.Interface.load("models/digiplay/DreamShaper_8", title="AI ART CREATOR") -interface.queue(concurrency_count=5) -interface.launch() diff --git a/spaces/mygyasir/deep-voice-cloning/Dockerfile b/spaces/mygyasir/deep-voice-cloning/Dockerfile deleted file mode 100644 index 58e260a4e96f3b89a15514769fb2437a43495fef..0000000000000000000000000000000000000000 --- a/spaces/mygyasir/deep-voice-cloning/Dockerfile +++ /dev/null @@ -1,4 +0,0 @@ -FROM python:3.9 -MAINTAINER Konstantin Verner -COPY . . -RUN pip install . \ No newline at end of file diff --git a/spaces/myrad01/Inpaint-Anything/third_party/lama/models/ade20k/segm_lib/nn/modules/comm.py b/spaces/myrad01/Inpaint-Anything/third_party/lama/models/ade20k/segm_lib/nn/modules/comm.py deleted file mode 100644 index b64bf6ba3b3e7abbab375c6dd4a87d8239e62138..0000000000000000000000000000000000000000 --- a/spaces/myrad01/Inpaint-Anything/third_party/lama/models/ade20k/segm_lib/nn/modules/comm.py +++ /dev/null @@ -1,131 +0,0 @@ -# -*- coding: utf-8 -*- -# File : comm.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -import queue -import collections -import threading - -__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster'] - - -class FutureResult(object): - """A thread-safe future implementation. Used only as one-to-one pipe.""" - - def __init__(self): - self._result = None - self._lock = threading.Lock() - self._cond = threading.Condition(self._lock) - - def put(self, result): - with self._lock: - assert self._result is None, 'Previous result has\'t been fetched.' - self._result = result - self._cond.notify() - - def get(self): - with self._lock: - if self._result is None: - self._cond.wait() - - res = self._result - self._result = None - return res - - -_MasterRegistry = collections.namedtuple('MasterRegistry', ['result']) -_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result']) - - -class SlavePipe(_SlavePipeBase): - """Pipe for master-slave communication.""" - - def run_slave(self, msg): - self.queue.put((self.identifier, msg)) - ret = self.result.get() - self.queue.put(True) - return ret - - -class SyncMaster(object): - """An abstract `SyncMaster` object. - - - During the replication, as the data parallel will trigger an callback of each module, all slave devices should - call `register(id)` and obtain an `SlavePipe` to communicate with the master. - - During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected, - and passed to a registered callback. - - After receiving the messages, the master device should gather the information and determine to message passed - back to each slave devices. - """ - - def __init__(self, master_callback): - """ - - Args: - master_callback: a callback to be invoked after having collected messages from slave devices. - """ - self._master_callback = master_callback - self._queue = queue.Queue() - self._registry = collections.OrderedDict() - self._activated = False - - def register_slave(self, identifier): - """ - Register an slave device. - - Args: - identifier: an identifier, usually is the device id. - - Returns: a `SlavePipe` object which can be used to communicate with the master device. - - """ - if self._activated: - assert self._queue.empty(), 'Queue is not clean before next initialization.' - self._activated = False - self._registry.clear() - future = FutureResult() - self._registry[identifier] = _MasterRegistry(future) - return SlavePipe(identifier, self._queue, future) - - def run_master(self, master_msg): - """ - Main entry for the master device in each forward pass. - The messages were first collected from each devices (including the master device), and then - an callback will be invoked to compute the message to be sent back to each devices - (including the master device). - - Args: - master_msg: the message that the master want to send to itself. This will be placed as the first - message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example. - - Returns: the message to be sent back to the master device. - - """ - self._activated = True - - intermediates = [(0, master_msg)] - for i in range(self.nr_slaves): - intermediates.append(self._queue.get()) - - results = self._master_callback(intermediates) - assert results[0][0] == 0, 'The first result should belongs to the master.' - - for i, res in results: - if i == 0: - continue - self._registry[i].result.put(res) - - for i in range(self.nr_slaves): - assert self._queue.get() is True - - return results[0][1] - - @property - def nr_slaves(self): - return len(self._registry) diff --git a/spaces/nakamura196/yolov5-kunshujo/ultralytics/yolov5/utils/flask_rest_api/restapi.py b/spaces/nakamura196/yolov5-kunshujo/ultralytics/yolov5/utils/flask_rest_api/restapi.py deleted file mode 100644 index b93ad16a0f58cf48bfc71afdbd1a548bc5ffe8db..0000000000000000000000000000000000000000 --- a/spaces/nakamura196/yolov5-kunshujo/ultralytics/yolov5/utils/flask_rest_api/restapi.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -Run a rest API exposing the yolov5s object detection model -""" -import argparse -import io - -import torch -from flask import Flask, request -from PIL import Image - -app = Flask(__name__) - -DETECTION_URL = "/v1/object-detection/yolov5s" - - -@app.route(DETECTION_URL, methods=["POST"]) -def predict(): - if not request.method == "POST": - return - - if request.files.get("image"): - image_file = request.files["image"] - image_bytes = image_file.read() - - img = Image.open(io.BytesIO(image_bytes)) - - results = model(img, size=640) # reduce size=320 for faster inference - return results.pandas().xyxy[0].to_json(orient="records") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model") - parser.add_argument("--port", default=5000, type=int, help="port number") - args = parser.parse_args() - - model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True) # force_reload to recache - app.run(host="0.0.0.0", port=args.port) # debug=True causes Restarting with stat diff --git a/spaces/nakas/audio-diffusion_style_transfer/audiodiffusion/mel.py b/spaces/nakas/audio-diffusion_style_transfer/audiodiffusion/mel.py deleted file mode 100644 index db2f7cffeec1984da325bc22ca2bdec06ffbe4aa..0000000000000000000000000000000000000000 --- a/spaces/nakas/audio-diffusion_style_transfer/audiodiffusion/mel.py +++ /dev/null @@ -1,127 +0,0 @@ -import warnings - -warnings.filterwarnings('ignore') - -import librosa -import numpy as np -from PIL import Image - - -class Mel: - - def __init__( - self, - x_res: int = 256, - y_res: int = 256, - sample_rate: int = 22050, - n_fft: int = 2048, - hop_length: int = 512, - top_db: int = 80, - ): - """Class to convert audio to mel spectrograms and vice versa. - - Args: - x_res (int): x resolution of spectrogram (time) - y_res (int): y resolution of spectrogram (frequency bins) - sample_rate (int): sample rate of audio - n_fft (int): number of Fast Fourier Transforms - hop_length (int): hop length (a higher number is recommended for lower than 256 y_res) - top_db (int): loudest in decibels - """ - self.x_res = x_res - self.y_res = y_res - self.sr = sample_rate - self.n_fft = n_fft - self.hop_length = hop_length - self.n_mels = self.y_res - self.slice_size = self.x_res * self.hop_length - 1 - self.fmax = self.sr / 2 - self.top_db = top_db - self.audio = None - - def load_audio(self, audio_file: str = None, raw_audio: np.ndarray = None): - """Load audio. - - Args: - audio_file (str): must be a file on disk due to Librosa limitation or - raw_audio (np.ndarray): audio as numpy array - """ - if audio_file is not None: - self.audio, _ = librosa.load(audio_file, mono=True, sr=self.sr) - else: - self.audio = raw_audio - - # Pad with silence if necessary. - if len(self.audio) < self.x_res * self.hop_length: - self.audio = np.concatenate([ - self.audio, - np.zeros((self.x_res * self.hop_length - len(self.audio), )) - ]) - - def get_number_of_slices(self) -> int: - """Get number of slices in audio. - - Returns: - int: number of spectograms audio can be sliced into - """ - return len(self.audio) // self.slice_size - - def get_audio_slice(self, slice: int = 0) -> np.ndarray: - """Get slice of audio. - - Args: - slice (int): slice number of audio (out of get_number_of_slices()) - - Returns: - np.ndarray: audio as numpy array - """ - return self.audio[self.slice_size * slice:self.slice_size * - (slice + 1)] - - def get_sample_rate(self) -> int: - """Get sample rate: - - Returns: - int: sample rate of audio - """ - return self.sr - - def audio_slice_to_image(self, slice: int) -> Image.Image: - """Convert slice of audio to spectrogram. - - Args: - slice (int): slice number of audio to convert (out of get_number_of_slices()) - - Returns: - PIL Image: grayscale image of x_res x y_res - """ - S = librosa.feature.melspectrogram( - y=self.get_audio_slice(slice), - sr=self.sr, - n_fft=self.n_fft, - hop_length=self.hop_length, - n_mels=self.n_mels, - fmax=self.fmax, - ) - log_S = librosa.power_to_db(S, ref=np.max, top_db=self.top_db) - bytedata = (((log_S + self.top_db) * 255 / self.top_db).clip(0, 255) + - 0.5).astype(np.uint8) - image = Image.fromarray(bytedata) - return image - - def image_to_audio(self, image: Image.Image) -> np.ndarray: - """Converts spectrogram to audio. - - Args: - image (PIL Image): x_res x y_res grayscale image - - Returns: - audio (np.ndarray): raw audio - """ - bytedata = np.frombuffer(image.tobytes(), dtype="uint8").reshape( - (image.height, image.width)) - log_S = bytedata.astype("float") * self.top_db / 255 - self.top_db - S = librosa.db_to_power(log_S) - audio = librosa.feature.inverse.mel_to_audio( - S, sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length) - return audio diff --git a/spaces/nateevo/docu-searcher/README.md b/spaces/nateevo/docu-searcher/README.md deleted file mode 100644 index 42b2d363044dd1ffaab184acf42a10f5cb652cdd..0000000000000000000000000000000000000000 --- a/spaces/nateevo/docu-searcher/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Docu Searcher -emoji: 🐨 -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.14.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/nateraw/jupyterlab-test2/login.html b/spaces/nateraw/jupyterlab-test2/login.html deleted file mode 100644 index 93ba1dcac698f6dfe0f23417687e8725b1928946..0000000000000000000000000000000000000000 --- a/spaces/nateraw/jupyterlab-test2/login.html +++ /dev/null @@ -1,67 +0,0 @@ -{% extends "page.html" %} - - -{% block stylesheet %} -{% endblock %} - -{% block site %} - -
      - - Hugging Face Logo -

      You can duplicate this Space to run it private.

      -
      - - Duplicate Space -
      -
      -

      Token is huggingface

      - - {% if login_available %} - {# login_available means password-login is allowed. Show the form. #} -
      - -
      - {% else %} -

      {% trans %}No login available, you shouldn't be seeing this page.{% endtrans %}

      - {% endif %} - {% if message %} -
      - {% for key in message %} -
      - {{message[key]}} -
      - {% endfor %} -
      - {% endif %} - {% if token_available %} - {% block token_message %} - - {% endblock token_message %} - {% endif %} -
      - -{% endblock %} - - -{% block script %} -{% endblock %} \ No newline at end of file diff --git a/spaces/nateraw/simple-video-to-video/README.md b/spaces/nateraw/simple-video-to-video/README.md deleted file mode 100644 index 731ed8659d766783e195483c68fdca87fc82a931..0000000000000000000000000000000000000000 --- a/spaces/nateraw/simple-video-to-video/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Simple Video To Video -emoji: 🌍 -colorFrom: red -colorTo: pink -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Canoco For Windows 4.5 Free Download WORK.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Canoco For Windows 4.5 Free Download WORK.md deleted file mode 100644 index ca162754c061feefa11df01560b66775f828f432..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Canoco For Windows 4.5 Free Download WORK.md +++ /dev/null @@ -1,105 +0,0 @@ - -

      Canoco for Windows 4.5 Free Download: A Comprehensive Guide

      -

      Canoco for Windows is a popular software for multivariate statistical analysis using ordination methods in the field of ecology and several related fields. It is a powerful tool for exploring and interpreting complex ecological data sets, such as species composition, environmental variables, and experimental treatments. In this article, you will learn everything you need to know about Canoco for Windows 4.5, including its features, benefits, limitations, installation process, and how to use it for your own research. You will also find out how to download Canoco for Windows 4.5 for free and what are the alternatives to this software.

      -

      Canoco For Windows 4.5 Free Download


      Download File >>> https://urlcod.com/2uIc16



      -

      What is Canoco for Windows 4.5?

      -

      Canoco for Windows 4.5 is the latest version of Canoco software, released in 2002 by Biometris, Wageningen and Petr Smilauer. It is a Windows-based program that integrates ordination with regression and permutation methodology, allowing sound statistical modelling of ecological data. Canoco for Windows 4.5 contains both linear and unimodal methods, such as principal component analysis (PCA), redundancy analysis (RDA), canonical correspondence analysis (CCA), detrended correspondence analysis (DCA), and non-metric multidimensional scaling (NMDS). It also offers various options for data transformation, scaling, selection, weighting, and testing.

      -

      What are the benefits of using Canoco for Windows 4.5?

      -

      Canoco for Windows 4.5 has many advantages over other software for multivariate analysis of ecological data. Some of the benefits are:

      -
        -
      • It is user-friendly and easy to learn. It has a graphical user interface (GUI) that allows you to perform most of the tasks with a few clicks. It also has a comprehensive user's guide and a reference manual that explain the theory and practice of ordination methods.
      • -
      • It is flexible and versatile. It can handle various types of data, such as abundance, presence/absence, biomass, environmental factors, spatial coordinates, etc. It can also deal with different data structures, such as unbalanced designs, nested designs, repeated measures, etc. It can handle up to 5000 samples and 5000 variables in one analysis.
      • -
      • It is robust and reliable. It uses state-of-the-art algorithms and techniques for data analysis, such as Monte Carlo permutation tests, partial ordination, constrained ordination, forward selection, backward elimination, etc. It also provides diagnostic tools and graphical outputs to check the validity and quality of the results.
      • -
      • It is compatible and integrable. It can import and export data from various formats, such as Excel, ASCII, SYSTAT, TWINSPAN, etc. It can also work with other software packages, such as CanoDraw for Windows (a graphical editor for ordination diagrams), R (a free software environment for statistical computing), or ArcView (a geographic information system).
      • -
      -

      What are the limitations of using Canoco for Windows 4.5?

      -

      Despite its many advantages, Canoco for Windows 4.5 also has some drawbacks that you should be aware of before using it. Some of the limitations are:

      -
        -
      • It is not free. You need to purchase a license to use Canoco for Windows 4.5 legally. The price depends on the type of license (single-user or network) and the number of users (academic or commercial). The license also expires after a certain period of time (usually one year) and needs to be renewed.
      • -
      • It is not updated. The last version of Canoco for Windows 4.5 was released in 2002 and no further updates or bug fixes have been made since then. This means that some features may not work properly or may be incompatible with newer versions of Windows or other software.
      • -
      • It is not comprehensive. Although Canoco for Windows 4.5 covers most of the common ordination methods used in ecology, it does not include some newer or more advanced methods that have been developed in recent years. For example, it does not support generalized linear models (GLM), generalized additive models (GAM), distance-based redundancy analysis (db-RDA), or constrained additive ordination (CAO).
      • How to install Canoco for Windows 4.5?

        -

        If you have purchased a license for Canoco for Windows 4.5, you can install it on your computer by following these steps:

        -
          -
        1. Download the installation file from the official website or from the CD-ROM that came with your license.
        2. -
        3. Run the installation file and follow the instructions on the screen. You will need to enter your license number and password during the installation process.
        4. -
        5. Choose the destination folder where you want to install Canoco for Windows 4.5 and click Next.
        6. -
        7. Select the components that you want to install, such as Canoco for Windows, CanoDraw for Windows, R interface, etc. and click Next.
        8. -
        9. Wait for the installation to complete and click Finish.
        10. -
        -

        You can now start using Canoco for Windows 4.5 by clicking on its icon on your desktop or in your Start menu.

        -

        -

        How to use Canoco for Windows 4.5?

        -

        To use Canoco for Windows 4.5 for your data analysis, you need to follow these general steps:

        -
          -
        1. Prepare your data in a suitable format and import it into Canoco for Windows 4.5. You can use Excel, ASCII, SYSTAT, TWINSPAN, or other formats to create your data file. You can also use CanoDraw for Windows to edit your data graphically. You need to have two types of data: response data (such as species abundance or biomass) and explanatory data (such as environmental variables or experimental treatments).
        2. -
        3. Choose the ordination method that best suits your data and research question. You can use linear methods (such as PCA or RDA) if your response data are linearly related to your explanatory data, or unimodal methods (such as CCA or DCA) if your response data are unimodally related to your explanatory data. You can also use NMDS if you want to use a distance-based method that does not assume any particular shape of the response curve.
        4. -
        5. Set the options and parameters for your ordination method, such as data transformation, scaling, selection, weighting, testing, etc. You can use the default settings or customize them according to your preferences and needs.
        6. -
        7. Run the analysis and view the results. You can see the numerical results in the output window and the graphical results in the diagram window. You can also export the results to other formats or software for further processing or presentation.
        8. -
        9. Interpret the results and draw conclusions. You can use the diagnostic tools and graphical outputs to check the validity and quality of the results. You can also use the statistical tests and modelling techniques to assess the significance and strength of the relationships between your response and explanatory data.
        10. -
        -

        For more detailed instructions and examples on how to use Canoco for Windows 4.5, you can refer to the user's guide or the reference manual that come with the software.

        -

        How to download Canoco for Windows 4.5 for free?

        -

        If you do not have a license for Canoco for Windows 4.5, you may wonder if you can download it for free from somewhere else. The answer is yes, but with some caveats. There are some websites that offer free downloads of Canoco for Windows 4.5, such as Softpedia, FileHorse, or Softonic. However, these websites are not authorized by Biometris or Petr Smilauer, and they may contain viruses, malware, or other harmful components that could damage your computer or compromise your privacy. Therefore, we do not recommend downloading Canoco for Windows 4.5 from these sources.

        -

        The only legal way to download Canoco for Windows 4.5 for free is to request a trial version from Biometris. The trial version is valid for 30 days and has all the features of the full version, except that it limits the number of samples and variables that you can analyze (up to 50 samples and 50 variables). To request a trial version, you need to fill out an online form with your name, email address, institution, country, and purpose of use. You will then receive an email with a link to download the trial version and a temporary license number and password.

        -

        What are the alternatives to Canoco for Windows 4.5?

        -

        If you are looking for other software packages that can perform multivariate analysis of ecological data using ordination methods, you have several options to choose from. Some of the alternatives to Canoco for Windows 4.5 are:

        - https://urlcod.com/2uIaAJ



        -
          -
        • Open your project in GarageBand and tap the My Songs button in the top-left corner of the screen.
        • -
        • Tap Select and tap your project to select it.
        • -
        • Tap Share and choose Song.
        • -
        • Tap GarageBand as the format. This will ensure that your project file retains all its tracks and settings.
        • -
        • Tap Save to Files.
        • -
        • Choose a location where you want to save your project file. You can save it to iCloud Drive or to your iPad's local storage.
        • -
        • Tap Add. Your project file will be saved to the Files app.
        • -
        - -

        Step 2: Compress Your Project File

        -

        The next thing you need to do is compress your project file. This will reduce its size and make it easier to send by email. To do this, follow these steps:

        -
          -
        • Open the Files app and locate your project file.
        • -
        • Tap Select and tap your project file to select it.
        • -
        • Tap More (the three dots icon) and choose Compress.
        • -
        • A new file with a .zip extension will be created in the same location as your original project file.
        • -
        - -

        Step 3: Email Your Compressed Project File

        -

        The final thing you need to do is email your compressed project file to the recipient. To do this, follow these steps:

        -
          -
        • Open the Files app and locate your compressed project file.
        • -
        • Tap Select and tap your compressed project file to select it.
        • -
        • Tap Share and choose Email.
        • -
        • The Mail app will open with a new message and your compressed project file attached.
        • -
        • Type in the recipient's email address, a subject line, and a message if you wish.
        • -
        • Tap Send. Your email will be sent with your compressed project file attached.
        • -
        - -

        Conclusion

        -

        Emailing a GarageBand project file on iPad is not as simple as emailing an audio file, but it is possible if you follow the steps above. By saving your project file to the Files app, compressing it, and emailing it, you can share your GarageBand creations with other GarageBand users who can open them on their own devices. This way, you can collaborate, remix, or learn from each other's projects. We hope this article was helpful and informative. If you have any questions or comments, feel free to leave them below.

        -

        cec2833e83
        -
        -
        \ No newline at end of file diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/model_zoo/model_zoo.py b/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/model_zoo/model_zoo.py deleted file mode 100644 index 5b90bc9a165ea46ada72ed0e71f1e80e71ea9f40..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/model_zoo/model_zoo.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import os -from typing import Optional -import pkg_resources -import torch - -from detectron2.checkpoint import DetectionCheckpointer -from detectron2.config import CfgNode, LazyConfig, get_cfg, instantiate -from detectron2.modeling import build_model - - -class _ModelZooUrls(object): - """ - Mapping from names to officially released Detectron2 pre-trained models. - """ - - S3_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/" - - # format: {config_path.yaml} -> model_id/model_final_{commit}.pkl - CONFIG_PATH_TO_URL_SUFFIX = { - # COCO Detection with Faster R-CNN - "COCO-Detection/faster_rcnn_R_50_C4_1x": "137257644/model_final_721ade.pkl", - "COCO-Detection/faster_rcnn_R_50_DC5_1x": "137847829/model_final_51d356.pkl", - "COCO-Detection/faster_rcnn_R_50_FPN_1x": "137257794/model_final_b275ba.pkl", - "COCO-Detection/faster_rcnn_R_50_C4_3x": "137849393/model_final_f97cb7.pkl", - "COCO-Detection/faster_rcnn_R_50_DC5_3x": "137849425/model_final_68d202.pkl", - "COCO-Detection/faster_rcnn_R_50_FPN_3x": "137849458/model_final_280758.pkl", - "COCO-Detection/faster_rcnn_R_101_C4_3x": "138204752/model_final_298dad.pkl", - "COCO-Detection/faster_rcnn_R_101_DC5_3x": "138204841/model_final_3e0943.pkl", - "COCO-Detection/faster_rcnn_R_101_FPN_3x": "137851257/model_final_f6e8b1.pkl", - "COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x": "139173657/model_final_68b088.pkl", - # COCO Detection with RetinaNet - "COCO-Detection/retinanet_R_50_FPN_1x": "190397773/model_final_bfca0b.pkl", - "COCO-Detection/retinanet_R_50_FPN_3x": "190397829/model_final_5bd44e.pkl", - "COCO-Detection/retinanet_R_101_FPN_3x": "190397697/model_final_971ab9.pkl", - # COCO Detection with RPN and Fast R-CNN - "COCO-Detection/rpn_R_50_C4_1x": "137258005/model_final_450694.pkl", - "COCO-Detection/rpn_R_50_FPN_1x": "137258492/model_final_02ce48.pkl", - "COCO-Detection/fast_rcnn_R_50_FPN_1x": "137635226/model_final_e5f7ce.pkl", - # COCO Instance Segmentation Baselines with Mask R-CNN - "COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x": "137259246/model_final_9243eb.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x": "137260150/model_final_4f86c3.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x": "137260431/model_final_a54504.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x": "137849525/model_final_4ce675.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x": "137849551/model_final_84107b.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x": "137849600/model_final_f10217.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x": "138363239/model_final_a2914c.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x": "138363294/model_final_0464b7.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x": "138205316/model_final_a3ec72.pkl", - "COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x": "139653917/model_final_2d9806.pkl", # noqa - # New baselines using Large-Scale Jitter and Longer Training Schedule - "new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ": "42047764/model_final_bb69de.pkl", - "new_baselines/mask_rcnn_R_50_FPN_200ep_LSJ": "42047638/model_final_89a8d3.pkl", - "new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ": "42019571/model_final_14d201.pkl", - "new_baselines/mask_rcnn_R_101_FPN_100ep_LSJ": "42025812/model_final_4f7b58.pkl", - "new_baselines/mask_rcnn_R_101_FPN_200ep_LSJ": "42131867/model_final_0bb7ae.pkl", - "new_baselines/mask_rcnn_R_101_FPN_400ep_LSJ": "42073830/model_final_f96b26.pkl", - "new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ": "42047771/model_final_b7fbab.pkl", # noqa - "new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_200ep_LSJ": "42132721/model_final_5d87c1.pkl", # noqa - "new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_400ep_LSJ": "42025447/model_final_f1362d.pkl", # noqa - "new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ": "42047784/model_final_6ba57e.pkl", # noqa - "new_baselines/mask_rcnn_regnety_4gf_dds_FPN_200ep_LSJ": "42047642/model_final_27b9c1.pkl", # noqa - "new_baselines/mask_rcnn_regnety_4gf_dds_FPN_400ep_LSJ": "42045954/model_final_ef3a80.pkl", # noqa - # COCO Person Keypoint Detection Baselines with Keypoint R-CNN - "COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x": "137261548/model_final_04e291.pkl", - "COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x": "137849621/model_final_a6e10b.pkl", - "COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x": "138363331/model_final_997cc7.pkl", - "COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x": "139686956/model_final_5ad38f.pkl", - # COCO Panoptic Segmentation Baselines with Panoptic FPN - "COCO-PanopticSegmentation/panoptic_fpn_R_50_1x": "139514544/model_final_dbfeb4.pkl", - "COCO-PanopticSegmentation/panoptic_fpn_R_50_3x": "139514569/model_final_c10459.pkl", - "COCO-PanopticSegmentation/panoptic_fpn_R_101_3x": "139514519/model_final_cafdb1.pkl", - # LVIS Instance Segmentation Baselines with Mask R-CNN - "LVISv0.5-InstanceSegmentation/mask_rcnn_R_50_FPN_1x": "144219072/model_final_571f7c.pkl", # noqa - "LVISv0.5-InstanceSegmentation/mask_rcnn_R_101_FPN_1x": "144219035/model_final_824ab5.pkl", # noqa - "LVISv0.5-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x": "144219108/model_final_5e3439.pkl", # noqa - # Cityscapes & Pascal VOC Baselines - "Cityscapes/mask_rcnn_R_50_FPN": "142423278/model_final_af9cf5.pkl", - "PascalVOC-Detection/faster_rcnn_R_50_C4": "142202221/model_final_b1acc2.pkl", - # Other Settings - "Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5": "138602867/model_final_65c703.pkl", - "Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5": "144998336/model_final_821d0b.pkl", - "Misc/cascade_mask_rcnn_R_50_FPN_1x": "138602847/model_final_e9d89b.pkl", - "Misc/cascade_mask_rcnn_R_50_FPN_3x": "144998488/model_final_480dd8.pkl", - "Misc/mask_rcnn_R_50_FPN_3x_syncbn": "169527823/model_final_3b3c51.pkl", - "Misc/mask_rcnn_R_50_FPN_3x_gn": "138602888/model_final_dc5d9e.pkl", - "Misc/scratch_mask_rcnn_R_50_FPN_3x_gn": "138602908/model_final_01ca85.pkl", - "Misc/scratch_mask_rcnn_R_50_FPN_9x_gn": "183808979/model_final_da7b4c.pkl", - "Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn": "184226666/model_final_5ce33e.pkl", - "Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x": "139797668/model_final_be35db.pkl", - "Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv": "18131413/model_0039999_e76410.pkl", # noqa - # D1 Comparisons - "Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x": "137781054/model_final_7ab50c.pkl", # noqa - "Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x": "137781281/model_final_62ca52.pkl", # noqa - "Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x": "137781195/model_final_cce136.pkl", - } - - @staticmethod - def query(config_path: str) -> Optional[str]: - """ - Args: - config_path: relative config filename - """ - name = config_path.replace(".yaml", "").replace(".py", "") - if name in _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX: - suffix = _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX[name] - return _ModelZooUrls.S3_PREFIX + name + "/" + suffix - return None - - -def get_checkpoint_url(config_path): - """ - Returns the URL to the model trained using the given config - - Args: - config_path (str): config file name relative to detectron2's "configs/" - directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" - - Returns: - str: a URL to the model - """ - url = _ModelZooUrls.query(config_path) - if url is None: - raise RuntimeError("Pretrained model for {} is not available!".format(config_path)) - return url - - -def get_config_file(config_path): - """ - Returns path to a builtin config file. - - Args: - config_path (str): config file name relative to detectron2's "configs/" - directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" - - Returns: - str: the real path to the config file. - """ - cfg_file = pkg_resources.resource_filename( - "detectron2.model_zoo", os.path.join("configs", config_path) - ) - if not os.path.exists(cfg_file): - raise RuntimeError("{} not available in Model Zoo!".format(config_path)) - return cfg_file - - -def get_config(config_path, trained: bool = False): - """ - Returns a config object for a model in model zoo. - - Args: - config_path (str): config file name relative to detectron2's "configs/" - directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" - trained (bool): If True, will set ``MODEL.WEIGHTS`` to trained model zoo weights. - If False, the checkpoint specified in the config file's ``MODEL.WEIGHTS`` is used - instead; this will typically (though not always) initialize a subset of weights using - an ImageNet pre-trained model, while randomly initializing the other weights. - - Returns: - CfgNode or omegaconf.DictConfig: a config object - """ - cfg_file = get_config_file(config_path) - if cfg_file.endswith(".yaml"): - cfg = get_cfg() - cfg.merge_from_file(cfg_file) - if trained: - cfg.MODEL.WEIGHTS = get_checkpoint_url(config_path) - return cfg - elif cfg_file.endswith(".py"): - cfg = LazyConfig.load(cfg_file) - if trained: - url = get_checkpoint_url(config_path) - if "train" in cfg and "init_checkpoint" in cfg.train: - cfg.train.init_checkpoint = url - else: - raise NotImplementedError - return cfg - - -def get(config_path, trained: bool = False, device: Optional[str] = None): - """ - Get a model specified by relative path under Detectron2's official ``configs/`` directory. - - Args: - config_path (str): config file name relative to detectron2's "configs/" - directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" - trained (bool): see :func:`get_config`. - device (str or None): overwrite the device in config, if given. - - Returns: - nn.Module: a detectron2 model. Will be in training mode. - - Example: - :: - from detectron2 import model_zoo - model = model_zoo.get("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml", trained=True) - """ - cfg = get_config(config_path, trained) - if device is None and not torch.cuda.is_available(): - device = "cpu" - if device is not None and isinstance(cfg, CfgNode): - cfg.MODEL.DEVICE = device - - if isinstance(cfg, CfgNode): - model = build_model(cfg) - DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) - else: - model = instantiate(cfg.model) - if device is not None: - model = model.to(device) - if "train" in cfg and "init_checkpoint" in cfg.train: - DetectionCheckpointer(model).load(cfg.train.init_checkpoint) - return model diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/dataset_mapper.py b/spaces/nikitaPDL2023/assignment4/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/dataset_mapper.py deleted file mode 100644 index 53272c726af810efc248f2428dda7ca7271fcd00..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/dataset_mapper.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import copy -import logging -import numpy as np -from typing import Callable, List, Union -import torch -from panopticapi.utils import rgb2id - -from detectron2.config import configurable -from detectron2.data import MetadataCatalog -from detectron2.data import detection_utils as utils -from detectron2.data import transforms as T - -from .target_generator import PanopticDeepLabTargetGenerator - -__all__ = ["PanopticDeeplabDatasetMapper"] - - -class PanopticDeeplabDatasetMapper: - """ - The callable currently does the following: - - 1. Read the image from "file_name" and label from "pan_seg_file_name" - 2. Applies random scale, crop and flip transforms to image and label - 3. Prepare data to Tensor and generate training targets from label - """ - - @configurable - def __init__( - self, - *, - augmentations: List[Union[T.Augmentation, T.Transform]], - image_format: str, - panoptic_target_generator: Callable, - ): - """ - NOTE: this interface is experimental. - - Args: - augmentations: a list of augmentations or deterministic transforms to apply - image_format: an image format supported by :func:`detection_utils.read_image`. - panoptic_target_generator: a callable that takes "panoptic_seg" and - "segments_info" to generate training targets for the model. - """ - # fmt: off - self.augmentations = T.AugmentationList(augmentations) - self.image_format = image_format - # fmt: on - logger = logging.getLogger(__name__) - logger.info("Augmentations used in training: " + str(augmentations)) - - self.panoptic_target_generator = panoptic_target_generator - - @classmethod - def from_config(cls, cfg): - augs = [ - T.ResizeShortestEdge( - cfg.INPUT.MIN_SIZE_TRAIN, - cfg.INPUT.MAX_SIZE_TRAIN, - cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING, - ) - ] - if cfg.INPUT.CROP.ENABLED: - augs.append(T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)) - augs.append(T.RandomFlip()) - - # Assume always applies to the training set. - dataset_names = cfg.DATASETS.TRAIN - meta = MetadataCatalog.get(dataset_names[0]) - panoptic_target_generator = PanopticDeepLabTargetGenerator( - ignore_label=meta.ignore_label, - thing_ids=list(meta.thing_dataset_id_to_contiguous_id.values()), - sigma=cfg.INPUT.GAUSSIAN_SIGMA, - ignore_stuff_in_offset=cfg.INPUT.IGNORE_STUFF_IN_OFFSET, - small_instance_area=cfg.INPUT.SMALL_INSTANCE_AREA, - small_instance_weight=cfg.INPUT.SMALL_INSTANCE_WEIGHT, - ignore_crowd_in_semantic=cfg.INPUT.IGNORE_CROWD_IN_SEMANTIC, - ) - - ret = { - "augmentations": augs, - "image_format": cfg.INPUT.FORMAT, - "panoptic_target_generator": panoptic_target_generator, - } - return ret - - def __call__(self, dataset_dict): - """ - Args: - dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. - - Returns: - dict: a format that builtin models in detectron2 accept - """ - dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below - # Load image. - image = utils.read_image(dataset_dict["file_name"], format=self.image_format) - utils.check_image_size(dataset_dict, image) - # Panoptic label is encoded in RGB image. - pan_seg_gt = utils.read_image(dataset_dict.pop("pan_seg_file_name"), "RGB") - - # Reuses semantic transform for panoptic labels. - aug_input = T.AugInput(image, sem_seg=pan_seg_gt) - _ = self.augmentations(aug_input) - image, pan_seg_gt = aug_input.image, aug_input.sem_seg - - # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, - # but not efficient on large generic data structures due to the use of pickle & mp.Queue. - # Therefore it's important to use torch.Tensor. - dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) - - # Generates training targets for Panoptic-DeepLab. - targets = self.panoptic_target_generator(rgb2id(pan_seg_gt), dataset_dict["segments_info"]) - dataset_dict.update(targets) - - return dataset_dict diff --git a/spaces/niro-private/chatCSV/src/modules/chatbot.py b/spaces/niro-private/chatCSV/src/modules/chatbot.py deleted file mode 100644 index 8a3a4c10658ffd652514fde450128785aa0cb6ad..0000000000000000000000000000000000000000 --- a/spaces/niro-private/chatCSV/src/modules/chatbot.py +++ /dev/null @@ -1,123 +0,0 @@ -import streamlit as st -from langchain.chains import ConversationalRetrievalChain -from langchain.prompts.prompt import PromptTemplate -from langchain.agents import create_csv_agent -from langchain.llms import OpenAI -from langchain.chat_models import ChatOpenAI - - -class Chatbot_txt: - _template = """Given the following conversation and a follow-up question, rephrase the follow-up question to be a standalone question. - Chat History: - {chat_history} - Follow-up entry: {question} - Standalone question:""" - - CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template) - - qa_template = """"You are an AI conversational assistant to answer questions based on a context. - You are given data from a txt file and a question, you must help the user find the information they need. - Your answers should be friendly, in the same language. - question: {question} - ========= - context: {context} - ======= - """ - - QA_PROMPT = PromptTemplate(template=qa_template, input_variables=["question", "context"]) - - def __init__(self, model_name, temperature, vectors): - self.model_name = model_name - self.temperature = temperature - self.vectors = vectors - - def conversational_chat(self, query): - """ - Starts a conversational chat with a model via Langchain - """ - chain = ConversationalRetrievalChain.from_llm( - llm=ChatOpenAI(model_name=self.model_name, temperature=self.temperature), - condense_question_prompt=self.CONDENSE_QUESTION_PROMPT, - qa_prompt=self.QA_PROMPT, - retriever=self.vectors.as_retriever(), - ) - result = chain({"question": query, "chat_history": st.session_state["history"]}) - - st.session_state["history"].append((query, result["answer"])) - - return result["answer"] - - -class Chatbot: - _template = """Given the following conversation and a follow-up question, rephrase the follow-up question to be a standalone question. - Chat History: - {chat_history} - Follow-up entry: {question} - Standalone question:""" - - CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template) - - qa_template = """"You are an AI conversational assistant to answer questions based on a context. - You are given data from a csv file and a question, you must help the user find the information they need. - Your answers should be friendly, in the same language. - question: {question} - ========= - context: {context} - ======= - """ - - QA_PROMPT = PromptTemplate(template=qa_template, input_variables=["question", "context"]) - - def __init__(self, model_name, temperature, vectors): - self.model_name = model_name - self.temperature = temperature - self.vectors = vectors - - def conversational_chat(self, query): - """ - Starts a conversational chat with a model via Langchain - """ - chain = ConversationalRetrievalChain.from_llm( - llm=ChatOpenAI(model_name=self.model_name, temperature=self.temperature), - condense_question_prompt=self.CONDENSE_QUESTION_PROMPT, - qa_prompt=self.QA_PROMPT, - retriever=self.vectors.as_retriever(), - ) - result = chain({"question": query, "chat_history": st.session_state["history"]}) - - st.session_state["history"].append((query, result["answer"])) - - return result["answer"] - - -class Chatbot_ledger: - - def __init__(self, model_name, temperature, csv): - self.model_name = model_name - self.temperature = temperature - self.csv = csv - - def csv_agent(self, query): - agent = create_csv_agent(OpenAI(temperature=self.temperature, model_name=self.model_name), - self.csv, - verbose=True, - index_col=0) - result = agent.run(query) - st.session_state['history'].append((query, result)) - return result - - def conversational_chat(self, query): - """ - Starts a conversational chat with a model via Langchain - """ - chain = ConversationalRetrievalChain.from_llm( - llm=ChatOpenAI(model_name=self.model_name, temperature=self.temperature), - condense_question_prompt=self.CONDENSE_QUESTION_PROMPT, - qa_prompt=self.QA_PROMPT, - retriever=self.vectors.as_retriever(), - ) - result = chain({"question": query, "chat_history": st.session_state["history"]}) - - st.session_state["history"].append((query, result["answer"])) - - return result["answer"] diff --git a/spaces/nomic-ai/nomic-ai_gpt4all-j-prompt-generations/README.md b/spaces/nomic-ai/nomic-ai_gpt4all-j-prompt-generations/README.md deleted file mode 100644 index f01f50e1d0511f4b6c62560648ee057d33471f8e..0000000000000000000000000000000000000000 --- a/spaces/nomic-ai/nomic-ai_gpt4all-j-prompt-generations/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: nomic-ai/gpt4all-j-prompt-generations -emoji: 🗺️ -colorFrom: purple -colorTo: red -sdk: static -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/nomic-ai/squad/README.md b/spaces/nomic-ai/squad/README.md deleted file mode 100644 index 594022cf5b4a4cfae45c96fe8e73b574a0c7f302..0000000000000000000000000000000000000000 --- a/spaces/nomic-ai/squad/README.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: squad -emoji: 🗺️ -colorFrom: purple -colorTo: red -sdk: static -pinned: false ---- - diff --git a/spaces/odettecantswim/rvc-mlbb-v2/README.md b/spaces/odettecantswim/rvc-mlbb-v2/README.md deleted file mode 100644 index 02f99c82ba02617612a182742f7829c8b6cb3813..0000000000000000000000000000000000000000 --- a/spaces/odettecantswim/rvc-mlbb-v2/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: RVC MLBB V2 -emoji: 🏃 -colorFrom: yellow -colorTo: pink -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: true -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/ofikodar/chatgpt-resume-builder/src/chatbot/__init__.py b/spaces/ofikodar/chatgpt-resume-builder/src/chatbot/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/omarelsayeed/SentenceSimilarity-Quran-v2/app.py b/spaces/omarelsayeed/SentenceSimilarity-Quran-v2/app.py deleted file mode 100644 index cf05ac90715b0442e4edce89eb9025ecd4f21c60..0000000000000000000000000000000000000000 --- a/spaces/omarelsayeed/SentenceSimilarity-Quran-v2/app.py +++ /dev/null @@ -1,51 +0,0 @@ -import pandas as pd -import spacy -import gradio as gr -import re - - -dataset = pd.read_excel('Dataset-Verse-by-Verse.xlsx') -dataset.rename(columns={'ArabicText': 'text'}, inplace=True) -nlp = spacy.load('aravec_model') -all_docs = [nlp(doc) for doc in dataset['text']] - -def clean_text(text): - # remove tashkeel - text = re.sub('[~ًٌٍَُِّْ]', '', text) - text = re.sub('[ًٌٍَُِّْـ]', '', text) - # ozbot el alef - text = re.sub('إ', 'ا', text) - text = re.sub('أ', 'ا', text) - text = re.sub('آ', 'ا', text) - # remove longation - text = re.sub(r'(.)\1+', r'\1\1', text) - # remove extra spaces - text = re.sub(' +', ' ', text) - text = text.strip() - text = re.sub('[\s]+', ' ', text) - # remove punctuations - text = re.sub(r'[^\w\s]', '', text) - return text - -def get_similar_sentences(text): - text = clean_text(text) - ref_sentence = nlp(text) - similar_sentences = [] - for i,doc in enumerate(all_docs): - similar_sentences.append((doc, ref_sentence.similarity(doc) , i)) - similar_sentences.sort(key=lambda x: x[1], reverse=True) - top_10 = similar_sentences[:10] - # add the surahnamearabic to text - return dict(zip([' [ ' + dataset['SurahNameArabic'][i] + ' ] ' + doc.text for doc, _, i in top_10], [similarity for _, similarity, _ in top_10])) - -text_input = gr.inputs.Textbox(lines = 1 , label = "Enter a Quran Verse" ) - -label = gr.outputs.Label() -examples = ['الحمدلله رب العالمين', - 'مثلهم كمثل الذي استوقد نارًا فلما أضاءت ما حوله ذهب الله بنورهم وتركهم في ظلماتٍ لا يبصرون', - 'إن الذين كفروا سواء عليهم أأنذرتهم أم لم تنذرهم لا يؤمنون', - 'ونادى أصحاب الجنة أصحاب النار أن قد وجدنا ما وعدنا ربنا حقا فهل وجدتم ما وعد ربكم حقا ۖ قالوا نعم ۚ فأذن مؤذن بينهم أن لعنة الله على الظالمين' - ] - -intf = gr.Interface(fn = get_similar_sentences , inputs = text_input , outputs = label , examples=examples ) -intf.launch() \ No newline at end of file diff --git a/spaces/omlab/vlchecklist_demo/models/vilt/engine.py b/spaces/omlab/vlchecklist_demo/models/vilt/engine.py deleted file mode 100644 index 6d9529c9924914e957aea1e4a2e1de2199dba2d0..0000000000000000000000000000000000000000 --- a/spaces/omlab/vlchecklist_demo/models/vilt/engine.py +++ /dev/null @@ -1,212 +0,0 @@ -import torch -import requests -import io -import os -from PIL import Image -from typing import List -import base64 -import json -from models.vilt.modules.vilt_module import ViLTransformer -from models.vilt.datamodules.datamodule_base import get_pretrained_tokenizer -from models.vilt.transforms import pixelbert_transform -from config import EnvVar -from utils.helpers import LRUCache, chunks -from models.model import Model -from models.vilt.modules.objectives import cost_matrix_cosine, ipot -import numpy as np -from models.vilt.HeatMap import HeatMap - - -class ViLT(Model): - root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),"../../") - MAX_CACHE = 20 - - def __init__(self): - self._models = LRUCache(self.MAX_CACHE) - self.batch_size = EnvVar.BATCH_SIZE - self.device = EnvVar.DEVICE - - def _load_model(self, model_id): - if model_id is None: - raise Exception("Model ID cannot be None.") - - if not self._models.has(model_id): - model_key = model_id.split('.')[0] - _config = json.load(open(os.path.join(self.root_dir, EnvVar.MODEL_DIR, model_key)+'.json')) - _config['load_path'] = os.path.join(self.root_dir, EnvVar.MODEL_DIR, model_id) - tokenizer = get_pretrained_tokenizer(_config["tokenizer"]) - model = ViLTransformer(_config) - model.setup("test") - model.eval() - model.to(EnvVar.DEVICE) - self._models.put(model_id, (model, tokenizer)) - - return self._models.get(model_id) - - def _load_data(self, src_type, data): - - def transform(x): - img = x.resize((384, 384)) - img = pixelbert_transform(size=384)(img) - img = img.unsqueeze(0).to(self.device) - return img - - if src_type == 'local': - image_data = [] - for x in data: - #temp = Image.open(x).convert('RGB') - image_data.append(transform(x)) - - elif src_type == 'url': - image_data = [] - for x in data: - temp = Image.open(io.BytesIO(requests.get(x).content)).convert("RGB") - image_data.append(transform(temp)) - - elif src_type == 'base64': - image_data = [] - for x in data: - temp = Image.open(io.BytesIO(base64.b64decode(x))) - image_data.append(transform(temp)) - else: - raise Exception("Unknown mode {}.".format(src_type)) - - return image_data - - # def predict(self, model_id, - # images, - # texts, - # ): - # model, tokenizer = self._load_model(model_id) - # # process images by batch - # probs = [] - # logits = [] - # image = self.transform(images) - # img = [image] - - - # batch = {"text": texts, "image": img} - - # inferred_token = texts - # batch["text"] = inferred_token - # encoded = tokenizer(inferred_token, padding='longest') - - # batch["text_ids"] = torch.tensor(encoded["input_ids"]).to(self.device) - # batch["text_labels"] = torch.tensor(encoded["input_ids"]).to(self.device) - # batch["text_masks"] = torch.tensor(encoded["attention_mask"]).to(self.device) - - # with torch.no_grad(): - # infer = model(batch) - # itm_logits = model.itm_score(infer["cls_feats"]) - # soft_prob = torch.softmax(itm_logits, dim=1) - - # probs.extend(soft_prob.tolist()) - # logits.extend(itm_logits.tolist()) - - # return probs - - def predict(self, model_id, - images: List, - texts: List, - ): - model, tokenizer = self._load_model(model_id) - # process images by batch - probs = [] - logits = [] - - for chunk_i, chunk_t in zip(chunks(images, EnvVar.BATCH_SIZE), chunks(texts, EnvVar.BATCH_SIZE)): - image_data = self._load_data('local', chunk_i) - image_batch_size = len(image_data) - - batch_images = [] # (num_image x num_text) - batch_text = [] - - for i,t in zip(image_data,chunk_t): - batch_images.append(i) - batch_text.append(t) - - batch = {"text": batch_text, "image": batch_images} - - inferred_token = batch_text - batch["text"] = inferred_token - encoded = tokenizer(inferred_token, padding='longest') - - batch["text_ids"] = torch.tensor(encoded["input_ids"]).to(self.device) - batch["text_labels"] = torch.tensor(encoded["input_ids"]).to(self.device) - batch["text_masks"] = torch.tensor(encoded["attention_mask"]).to(self.device) - - with torch.no_grad(): - infer = model(batch) - - itm_logits = model.itm_score(infer["cls_feats"]) - soft_prob = torch.softmax(itm_logits, dim=1) - - probs.extend(soft_prob.tolist()) - logits.extend(itm_logits.tolist()) - else: - return probs,logits - - def generate_heatmap(self, infer, image, tokenizer, input_ids): - image = Image.open(image).convert('RGB') - txt_emb, img_emb = infer["text_feats"], infer["image_feats"] - txt_mask, img_mask = ( - infer["text_masks"].bool(), - infer["image_masks"].bool(), - ) - for i, _len in enumerate(txt_mask.sum(dim=1)): - txt_mask[i, _len - 1] = False - txt_mask[:, 0] = False - img_mask[:, 0] = False - txt_pad, img_pad = ~txt_mask, ~img_mask - - cost = cost_matrix_cosine(txt_emb.float(), img_emb.float()) - joint_pad = txt_pad.unsqueeze(-1) | img_pad.unsqueeze(-2) - cost.masked_fill_(joint_pad, 0) - txt_len = (txt_pad.size(1) - txt_pad.sum(dim=1, keepdim=False)).to( - dtype=cost.dtype - ) - img_len = (img_pad.size(1) - img_pad.sum(dim=1, keepdim=False)).to( - dtype=cost.dtype - ) - T = ipot( - cost.detach(), - txt_len, - txt_pad, - img_len, - img_pad, - joint_pad, - 0.1, - 1000, - 1, - ) - - plan = T[0] - plan_single = plan * len(txt_emb) - outputs = [] - for hidx in range(1,len(input_ids)-1): - cost_ = plan_single.t() - cost_ = cost_[hidx][1:].cpu() - - patch_index, (H, W) = infer["patch_index"] - heatmap = torch.zeros(H, W) - for i, pidx in enumerate(patch_index[0]): - h, w = pidx[0].item(), pidx[1].item() - heatmap[h, w] = cost_[i] - - heatmap = (heatmap - heatmap.mean()) / heatmap.std() - heatmap = np.clip(heatmap, 1.0, 3.0) - heatmap = (heatmap - heatmap.min()) / (heatmap.max() - heatmap.min()) - selected_token = tokenizer.convert_ids_to_tokens( - input_ids[hidx] - ) - if not torch.isnan(heatmap).any(): - hm = HeatMap(image, heatmap.cpu().numpy()) - else: - heatmap = np.zeros(heatmap.shape) - hm = HeatMap(image, heatmap) - - outputs.append((hm,selected_token)) - return outputs - - - diff --git a/spaces/osanseviero/mistral-super-fast/README.md b/spaces/osanseviero/mistral-super-fast/README.md deleted file mode 100644 index ca86d5f573430dae1dd59c4262403070fefb862e..0000000000000000000000000000000000000000 --- a/spaces/osanseviero/mistral-super-fast/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Mistral Super Fast -emoji: 😻 -colorFrom: red -colorTo: yellow -sdk: gradio -sdk_version: 3.45.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/pipelines/stable_diffusion/inpaint.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/pipelines/stable_diffusion/inpaint.md deleted file mode 100644 index dc935d0bd17b44f847ce5a77f10537f3a69ae0e1..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/pipelines/stable_diffusion/inpaint.md +++ /dev/null @@ -1,57 +0,0 @@ - - -# Inpainting - -The Stable Diffusion model can also be applied to inpainting which lets you edit specific parts of an image by providing a mask and a text prompt using Stable Diffusion. - -## Tips - -It is recommended to use this pipeline with checkpoints that have been specifically fine-tuned for inpainting, such -as [runwayml/stable-diffusion-inpainting](https://huggingface.co/runwayml/stable-diffusion-inpainting). Default -text-to-image Stable Diffusion checkpoints, such as -[runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) are also compatible but they might be less performant. - - - -Make sure to check out the Stable Diffusion [Tips](overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently! - -If you're interested in using one of the official checkpoints for a task, explore the [CompVis](https://huggingface.co/CompVis), [Runway](https://huggingface.co/runwayml), and [Stability AI](https://huggingface.co/stabilityai) Hub organizations! - - - -## StableDiffusionInpaintPipeline - -[[autodoc]] StableDiffusionInpaintPipeline - - all - - __call__ - - enable_attention_slicing - - disable_attention_slicing - - enable_xformers_memory_efficient_attention - - disable_xformers_memory_efficient_attention - - load_textual_inversion - - load_lora_weights - - save_lora_weights - -## StableDiffusionPipelineOutput - -[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput - -## FlaxStableDiffusionInpaintPipeline - -[[autodoc]] FlaxStableDiffusionInpaintPipeline - - all - - __call__ - -## FlaxStableDiffusionPipelineOutput - -[[autodoc]] pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput \ No newline at end of file diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/research_projects/rdm/pipeline_rdm.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/research_projects/rdm/pipeline_rdm.py deleted file mode 100644 index 3e2653c5423d2c8eac2f3b9e00a6ce1a963e46d6..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/research_projects/rdm/pipeline_rdm.py +++ /dev/null @@ -1,452 +0,0 @@ -import inspect -from typing import Callable, List, Optional, Union - -import torch -from PIL import Image -from retriever import Retriever, normalize_images, preprocess_images -from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTokenizer - -from diffusers import ( - AutoencoderKL, - DDIMScheduler, - DiffusionPipeline, - DPMSolverMultistepScheduler, - EulerAncestralDiscreteScheduler, - EulerDiscreteScheduler, - ImagePipelineOutput, - LMSDiscreteScheduler, - PNDMScheduler, - UNet2DConditionModel, - logging, -) -from diffusers.image_processor import VaeImageProcessor -from diffusers.utils import is_accelerate_available, randn_tensor - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -class RDMPipeline(DiffusionPipeline): - r""" - Pipeline for text-to-image generation using Retrieval Augmented Diffusion. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. - clip ([`CLIPModel`]): - Frozen CLIP model. Retrieval Augmented Diffusion uses the CLIP model, specifically the - [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. - tokenizer (`CLIPTokenizer`): - Tokenizer of class - [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). - unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - feature_extractor ([`CLIPFeatureExtractor`]): - Model that extracts features from generated images to be used as inputs for the `safety_checker`. - """ - - def __init__( - self, - vae: AutoencoderKL, - clip: CLIPModel, - tokenizer: CLIPTokenizer, - unet: UNet2DConditionModel, - scheduler: Union[ - DDIMScheduler, - PNDMScheduler, - LMSDiscreteScheduler, - EulerDiscreteScheduler, - EulerAncestralDiscreteScheduler, - DPMSolverMultistepScheduler, - ], - feature_extractor: CLIPFeatureExtractor, - retriever: Optional[Retriever] = None, - ): - super().__init__() - self.register_modules( - vae=vae, - clip=clip, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - feature_extractor=feature_extractor, - ) - # Copy from statement here and all the methods we take from stable_diffusion_pipeline - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) - self.retriever = retriever - - def enable_xformers_memory_efficient_attention(self): - r""" - Enable memory efficient attention as implemented in xformers. - - When this option is enabled, you should observe lower GPU memory usage and a potential speed up at inference - time. Speed up at training time is not guaranteed. - - Warning: When Memory Efficient Attention and Sliced attention are both enabled, the Memory Efficient Attention - is used. - """ - self.unet.set_use_memory_efficient_attention_xformers(True) - - def disable_xformers_memory_efficient_attention(self): - r""" - Disable memory efficient attention as implemented in xformers. - """ - self.unet.set_use_memory_efficient_attention_xformers(False) - - def enable_vae_slicing(self): - r""" - Enable sliced VAE decoding. - - When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several - steps. This is useful to save some memory and allow larger batch sizes. - """ - self.vae.enable_slicing() - - def disable_vae_slicing(self): - r""" - Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to - computing decoding in one step. - """ - self.vae.disable_slicing() - - def enable_vae_tiling(self): - r""" - Enable tiled VAE decoding. - - When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in - several steps. This is useful to save a large amount of memory and to allow the processing of larger images. - """ - self.vae.enable_tiling() - - def disable_vae_tiling(self): - r""" - Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to - computing decoding in one step. - """ - self.vae.disable_tiling() - - def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"): - r""" - Enable sliced attention computation. - - When this option is enabled, the attention module will split the input tensor in slices, to compute attention - in several steps. This is useful to save some memory in exchange for a small speed decrease. - - Args: - slice_size (`str` or `int`, *optional*, defaults to `"auto"`): - When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If - a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, - `attention_head_dim` must be a multiple of `slice_size`. - """ - if slice_size == "auto": - # half the attention head size is usually a good trade-off between - # speed and memory - if isinstance(self.unet.config.attention_head_dim, int): - slice_size = self.unet.config.attention_head_dim // 2 - else: - slice_size = self.unet.config.attention_head_dim[0] // 2 - self.unet.set_attention_slice(slice_size) - - def disable_attention_slicing(self): - r""" - Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go - back to computing attention in one step. - """ - # set slice_size = `None` to disable `attention slicing` - self.enable_attention_slicing(None) - - def enable_sequential_cpu_offload(self): - r""" - Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, - text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a - `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. - """ - if is_accelerate_available(): - from accelerate import cpu_offload - else: - raise ImportError("Please install accelerate via `pip install accelerate`") - - device = torch.device("cuda") - - for cpu_offloaded_model in [self.unet, self.clip, self.vae]: - if cpu_offloaded_model is not None: - cpu_offload(cpu_offloaded_model, device) - - @property - def _execution_device(self): - r""" - Returns the device on which the pipeline's models will be executed. After calling - `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module - hooks. - """ - if not hasattr(self.unet, "_hf_hook"): - return self.device - for module in self.unet.modules(): - if ( - hasattr(module, "_hf_hook") - and hasattr(module._hf_hook, "execution_device") - and module._hf_hook.execution_device is not None - ): - return torch.device(module._hf_hook.execution_device) - return self.device - - def _encode_prompt(self, prompt): - # get prompt text embeddings - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - - if text_input_ids.shape[-1] > self.tokenizer.model_max_length: - removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}" - ) - text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] - prompt_embeds = self.clip.get_text_features(text_input_ids.to(self.device)) - prompt_embeds = prompt_embeds / torch.linalg.norm(prompt_embeds, dim=-1, keepdim=True) - prompt_embeds = prompt_embeds[:, None, :] - return prompt_embeds - - def _encode_image(self, retrieved_images, batch_size): - if len(retrieved_images[0]) == 0: - return None - for i in range(len(retrieved_images)): - retrieved_images[i] = normalize_images(retrieved_images[i]) - retrieved_images[i] = preprocess_images(retrieved_images[i], self.feature_extractor).to( - self.clip.device, dtype=self.clip.dtype - ) - _, c, h, w = retrieved_images[0].shape - - retrieved_images = torch.reshape(torch.cat(retrieved_images, dim=0), (-1, c, h, w)) - image_embeddings = self.clip.get_image_features(retrieved_images) - image_embeddings = image_embeddings / torch.linalg.norm(image_embeddings, dim=-1, keepdim=True) - _, d = image_embeddings.shape - image_embeddings = torch.reshape(image_embeddings, (batch_size, -1, d)) - return image_embeddings - - def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): - shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - if latents is None: - latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) - else: - latents = latents.to(device) - - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * self.scheduler.init_noise_sigma - return latents - - def retrieve_images(self, retrieved_images, prompt_embeds, knn=10): - if self.retriever is not None: - additional_images = self.retriever.retrieve_imgs_batch(prompt_embeds[:, 0].cpu(), knn).total_examples - for i in range(len(retrieved_images)): - retrieved_images[i] += additional_images[i][self.retriever.config.image_column] - return retrieved_images - - @torch.no_grad() - def __call__( - self, - prompt: Union[str, List[str]], - retrieved_images: Optional[List[Image.Image]] = None, - height: int = 768, - width: int = 768, - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - num_images_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[torch.Generator] = None, - latents: Optional[torch.FloatTensor] = None, - prompt_embeds: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: Optional[int] = 1, - knn: Optional[int] = 10, - **kwargs, - ): - r""" - Function invoked when calling the pipeline for generation. - - Args: - prompt (`str` or `List[str]`): - The prompt or prompts to guide the image generation. - height (`int`, *optional*, defaults to 512): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to 512): - The width in pixels of the generated image. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`torch.Generator`, *optional*): - A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation - deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple. - callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. The function will be - called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function will be called. If not specified, the callback will be - called at every step. - - Returns: - [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if - `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the - generated images. - """ - height = height or self.unet.config.sample_size * self.vae_scale_factor - width = width or self.unet.config.sample_size * self.vae_scale_factor - if isinstance(prompt, str): - batch_size = 1 - elif isinstance(prompt, list): - batch_size = len(prompt) - else: - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - if retrieved_images is not None: - retrieved_images = [retrieved_images for _ in range(batch_size)] - else: - retrieved_images = [[] for _ in range(batch_size)] - device = self._execution_device - - if height % 8 != 0 or width % 8 != 0: - raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") - - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - if prompt_embeds is None: - prompt_embeds = self._encode_prompt(prompt) - retrieved_images = self.retrieve_images(retrieved_images, prompt_embeds, knn=knn) - image_embeddings = self._encode_image(retrieved_images, batch_size) - if image_embeddings is not None: - prompt_embeds = torch.cat([prompt_embeds, image_embeddings], dim=1) - - # duplicate text embeddings for each generation per prompt, using mps friendly method - bs_embed, seq_len, _ = prompt_embeds.shape - prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) - prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) - - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance: - uncond_embeddings = torch.zeros_like(prompt_embeds).to(prompt_embeds.device) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - prompt_embeds = torch.cat([uncond_embeddings, prompt_embeds]) - # get the initial random noise unless the user supplied it - num_channels_latents = self.unet.config.in_channels - latents = self.prepare_latents( - batch_size * num_images_per_prompt, - num_channels_latents, - height, - width, - prompt_embeds.dtype, - device, - generator, - latents, - ) - - # set timesteps - self.scheduler.set_timesteps(num_inference_steps) - - # Some schedulers like PNDM have timesteps as arrays - # It's more optimized to move all timesteps to correct device beforehand - timesteps_tensor = self.scheduler.timesteps.to(self.device) - - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - - for i, t in enumerate(self.progress_bar(timesteps_tensor)): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # predict the noise residual - noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample - - # call the callback, if provided - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - if not output_type == "latent": - image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] - else: - image = latents - - image = self.image_processor.postprocess( - image, output_type=output_type, do_denormalize=[True] * image.shape[0] - ) - - # Offload last model to CPU - if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: - self.final_offload_hook.offload() - - if not return_dict: - return (image,) - - return ImagePipelineOutput(images=image) diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py deleted file mode 100644 index cedf9de014753f90e04750a3f279e33344b4fb86..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +++ /dev/null @@ -1,745 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -from typing import List, Optional, Tuple, Union - -import torch -import torch.nn as nn -import torch.utils.checkpoint -from transformers import PretrainedConfig, PreTrainedModel, PreTrainedTokenizer -from transformers.activations import ACT2FN -from transformers.modeling_outputs import BaseModelOutput -from transformers.utils import logging - -from ...models import AutoencoderKL, UNet2DConditionModel, UNet2DModel, VQModel -from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput - - -class LDMTextToImagePipeline(DiffusionPipeline): - r""" - Pipeline for text-to-image generation using latent diffusion. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods - implemented for all pipelines (downloading, saving, running on a particular device, etc.). - - Parameters: - vqvae ([`VQModel`]): - Vector-quantized (VQ) model to encode and decode images to and from latent representations. - bert ([`LDMBertModel`]): - Text-encoder model based on [`~transformers.BERT`]. - tokenizer ([`~transformers.BertTokenizer`]): - A `BertTokenizer` to tokenize text. - unet ([`UNet2DConditionModel`]): - A `UNet2DConditionModel` to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - """ - model_cpu_offload_seq = "bert->unet->vqvae" - - def __init__( - self, - vqvae: Union[VQModel, AutoencoderKL], - bert: PreTrainedModel, - tokenizer: PreTrainedTokenizer, - unet: Union[UNet2DModel, UNet2DConditionModel], - scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], - ): - super().__init__() - self.register_modules(vqvae=vqvae, bert=bert, tokenizer=tokenizer, unet=unet, scheduler=scheduler) - self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1) - - @torch.no_grad() - def __call__( - self, - prompt: Union[str, List[str]], - height: Optional[int] = None, - width: Optional[int] = None, - num_inference_steps: Optional[int] = 50, - guidance_scale: Optional[float] = 1.0, - eta: Optional[float] = 0.0, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - **kwargs, - ) -> Union[Tuple, ImagePipelineOutput]: - r""" - The call function to the pipeline for generation. - - Args: - prompt (`str` or `List[str]`): - The prompt or prompts to guide the image generation. - height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): - The width in pixels of the generated image. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 1.0): - A higher guidance scale value encourages the model to generate images closely linked to the text - `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. - generator (`torch.Generator`, *optional*): - A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make - generation deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor is generated by sampling using the supplied random `generator`. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generated image. Choose between `PIL.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. - - Example: - - ```py - >>> from diffusers import DiffusionPipeline - - >>> # load model and scheduler - >>> ldm = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256") - - >>> # run pipeline in inference (sample random noise and denoise) - >>> prompt = "A painting of a squirrel eating a burger" - >>> images = ldm([prompt], num_inference_steps=50, eta=0.3, guidance_scale=6).images - - >>> # save images - >>> for idx, image in enumerate(images): - ... image.save(f"squirrel-{idx}.png") - ``` - - Returns: - [`~pipelines.ImagePipelineOutput`] or `tuple`: - If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is - returned where the first element is a list with the generated images. - """ - # 0. Default height and width to unet - height = height or self.unet.config.sample_size * self.vae_scale_factor - width = width or self.unet.config.sample_size * self.vae_scale_factor - - if isinstance(prompt, str): - batch_size = 1 - elif isinstance(prompt, list): - batch_size = len(prompt) - else: - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if height % 8 != 0 or width % 8 != 0: - raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") - - # get unconditional embeddings for classifier free guidance - if guidance_scale != 1.0: - uncond_input = self.tokenizer( - [""] * batch_size, padding="max_length", max_length=77, truncation=True, return_tensors="pt" - ) - negative_prompt_embeds = self.bert(uncond_input.input_ids.to(self._execution_device))[0] - - # get prompt text embeddings - text_input = self.tokenizer(prompt, padding="max_length", max_length=77, truncation=True, return_tensors="pt") - prompt_embeds = self.bert(text_input.input_ids.to(self._execution_device))[0] - - # get the initial random noise unless the user supplied it - latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8) - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - if latents is None: - latents = randn_tensor( - latents_shape, generator=generator, device=self._execution_device, dtype=prompt_embeds.dtype - ) - else: - if latents.shape != latents_shape: - raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") - latents = latents.to(self._execution_device) - - self.scheduler.set_timesteps(num_inference_steps) - - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - - extra_kwargs = {} - if accepts_eta: - extra_kwargs["eta"] = eta - - for t in self.progress_bar(self.scheduler.timesteps): - if guidance_scale == 1.0: - # guidance_scale of 1 means no guidance - latents_input = latents - context = prompt_embeds - else: - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - latents_input = torch.cat([latents] * 2) - context = torch.cat([negative_prompt_embeds, prompt_embeds]) - - # predict the noise residual - noise_pred = self.unet(latents_input, t, encoder_hidden_states=context).sample - # perform guidance - if guidance_scale != 1.0: - noise_pred_uncond, noise_prediction_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_kwargs).prev_sample - - # scale and decode the image latents with vae - latents = 1 / self.vqvae.config.scaling_factor * latents - image = self.vqvae.decode(latents).sample - - image = (image / 2 + 0.5).clamp(0, 1) - image = image.cpu().permute(0, 2, 3, 1).numpy() - if output_type == "pil": - image = self.numpy_to_pil(image) - - if not return_dict: - return (image,) - - return ImagePipelineOutput(images=image) - - -################################################################################ -# Code for the text transformer model -################################################################################ -""" PyTorch LDMBERT model.""" - - -logger = logging.get_logger(__name__) - -LDMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "ldm-bert", - # See all LDMBert models at https://huggingface.co/models?filter=ldmbert -] - - -LDMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "ldm-bert": "https://huggingface.co/valhalla/ldm-bert/blob/main/config.json", -} - - -""" LDMBERT model configuration""" - - -class LDMBertConfig(PretrainedConfig): - model_type = "ldmbert" - keys_to_ignore_at_inference = ["past_key_values"] - attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} - - def __init__( - self, - vocab_size=30522, - max_position_embeddings=77, - encoder_layers=32, - encoder_ffn_dim=5120, - encoder_attention_heads=8, - head_dim=64, - encoder_layerdrop=0.0, - activation_function="gelu", - d_model=1280, - dropout=0.1, - attention_dropout=0.0, - activation_dropout=0.0, - init_std=0.02, - classifier_dropout=0.0, - scale_embedding=False, - use_cache=True, - pad_token_id=0, - **kwargs, - ): - self.vocab_size = vocab_size - self.max_position_embeddings = max_position_embeddings - self.d_model = d_model - self.encoder_ffn_dim = encoder_ffn_dim - self.encoder_layers = encoder_layers - self.encoder_attention_heads = encoder_attention_heads - self.head_dim = head_dim - self.dropout = dropout - self.attention_dropout = attention_dropout - self.activation_dropout = activation_dropout - self.activation_function = activation_function - self.init_std = init_std - self.encoder_layerdrop = encoder_layerdrop - self.classifier_dropout = classifier_dropout - self.use_cache = use_cache - self.num_hidden_layers = encoder_layers - self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True - - super().__init__(pad_token_id=pad_token_id, **kwargs) - - -def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): - """ - Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. - """ - bsz, src_len = mask.size() - tgt_len = tgt_len if tgt_len is not None else src_len - - expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) - - inverted_mask = 1.0 - expanded_mask - - return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) - - -# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->LDMBert -class LDMBertAttention(nn.Module): - """Multi-headed attention from 'Attention Is All You Need' paper""" - - def __init__( - self, - embed_dim: int, - num_heads: int, - head_dim: int, - dropout: float = 0.0, - is_decoder: bool = False, - bias: bool = False, - ): - super().__init__() - self.embed_dim = embed_dim - self.num_heads = num_heads - self.dropout = dropout - self.head_dim = head_dim - self.inner_dim = head_dim * num_heads - - self.scaling = self.head_dim**-0.5 - self.is_decoder = is_decoder - - self.k_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) - self.v_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) - self.q_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) - self.out_proj = nn.Linear(self.inner_dim, embed_dim) - - def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): - return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() - - def forward( - self, - hidden_states: torch.Tensor, - key_value_states: Optional[torch.Tensor] = None, - past_key_value: Optional[Tuple[torch.Tensor]] = None, - attention_mask: Optional[torch.Tensor] = None, - layer_head_mask: Optional[torch.Tensor] = None, - output_attentions: bool = False, - ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: - """Input shape: Batch x Time x Channel""" - - # if key_value_states are provided this layer is used as a cross-attention layer - # for the decoder - is_cross_attention = key_value_states is not None - - bsz, tgt_len, _ = hidden_states.size() - - # get query proj - query_states = self.q_proj(hidden_states) * self.scaling - # get key, value proj - if is_cross_attention and past_key_value is not None: - # reuse k,v, cross_attentions - key_states = past_key_value[0] - value_states = past_key_value[1] - elif is_cross_attention: - # cross_attentions - key_states = self._shape(self.k_proj(key_value_states), -1, bsz) - value_states = self._shape(self.v_proj(key_value_states), -1, bsz) - elif past_key_value is not None: - # reuse k, v, self_attention - key_states = self._shape(self.k_proj(hidden_states), -1, bsz) - value_states = self._shape(self.v_proj(hidden_states), -1, bsz) - key_states = torch.cat([past_key_value[0], key_states], dim=2) - value_states = torch.cat([past_key_value[1], value_states], dim=2) - else: - # self_attention - key_states = self._shape(self.k_proj(hidden_states), -1, bsz) - value_states = self._shape(self.v_proj(hidden_states), -1, bsz) - - if self.is_decoder: - # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. - # Further calls to cross_attention layer can then reuse all cross-attention - # key/value_states (first "if" case) - # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of - # all previous decoder key/value_states. Further calls to uni-directional self-attention - # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) - # if encoder bi-directional self-attention `past_key_value` is always `None` - past_key_value = (key_states, value_states) - - proj_shape = (bsz * self.num_heads, -1, self.head_dim) - query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) - key_states = key_states.view(*proj_shape) - value_states = value_states.view(*proj_shape) - - src_len = key_states.size(1) - attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) - - if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): - raise ValueError( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" - f" {attn_weights.size()}" - ) - - if attention_mask is not None: - if attention_mask.size() != (bsz, 1, tgt_len, src_len): - raise ValueError( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" - ) - attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask - attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) - - attn_weights = nn.functional.softmax(attn_weights, dim=-1) - - if layer_head_mask is not None: - if layer_head_mask.size() != (self.num_heads,): - raise ValueError( - f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" - f" {layer_head_mask.size()}" - ) - attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) - attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) - - if output_attentions: - # this operation is a bit awkward, but it's required to - # make sure that attn_weights keeps its gradient. - # In order to do so, attn_weights have to be reshaped - # twice and have to be reused in the following - attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) - attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) - else: - attn_weights_reshaped = None - - attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) - - attn_output = torch.bmm(attn_probs, value_states) - - if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): - raise ValueError( - f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" - f" {attn_output.size()}" - ) - - attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) - attn_output = attn_output.transpose(1, 2) - - # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be - # partitioned across GPUs when using tensor-parallelism. - attn_output = attn_output.reshape(bsz, tgt_len, self.inner_dim) - - attn_output = self.out_proj(attn_output) - - return attn_output, attn_weights_reshaped, past_key_value - - -class LDMBertEncoderLayer(nn.Module): - def __init__(self, config: LDMBertConfig): - super().__init__() - self.embed_dim = config.d_model - self.self_attn = LDMBertAttention( - embed_dim=self.embed_dim, - num_heads=config.encoder_attention_heads, - head_dim=config.head_dim, - dropout=config.attention_dropout, - ) - self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) - self.dropout = config.dropout - self.activation_fn = ACT2FN[config.activation_function] - self.activation_dropout = config.activation_dropout - self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) - self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) - self.final_layer_norm = nn.LayerNorm(self.embed_dim) - - def forward( - self, - hidden_states: torch.FloatTensor, - attention_mask: torch.FloatTensor, - layer_head_mask: torch.FloatTensor, - output_attentions: Optional[bool] = False, - ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: - """ - Args: - hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` - attention_mask (`torch.FloatTensor`): attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. - layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size - `(encoder_attention_heads,)`. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - """ - residual = hidden_states - hidden_states = self.self_attn_layer_norm(hidden_states) - hidden_states, attn_weights, _ = self.self_attn( - hidden_states=hidden_states, - attention_mask=attention_mask, - layer_head_mask=layer_head_mask, - output_attentions=output_attentions, - ) - hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) - hidden_states = residual + hidden_states - - residual = hidden_states - hidden_states = self.final_layer_norm(hidden_states) - hidden_states = self.activation_fn(self.fc1(hidden_states)) - hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) - hidden_states = self.fc2(hidden_states) - hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) - hidden_states = residual + hidden_states - - if hidden_states.dtype == torch.float16 and ( - torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() - ): - clamp_value = torch.finfo(hidden_states.dtype).max - 1000 - hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) - - outputs = (hidden_states,) - - if output_attentions: - outputs += (attn_weights,) - - return outputs - - -# Copied from transformers.models.bart.modeling_bart.BartPretrainedModel with Bart->LDMBert -class LDMBertPreTrainedModel(PreTrainedModel): - config_class = LDMBertConfig - base_model_prefix = "model" - _supports_gradient_checkpointing = True - _keys_to_ignore_on_load_unexpected = [r"encoder\.version", r"decoder\.version"] - - def _init_weights(self, module): - std = self.config.init_std - if isinstance(module, nn.Linear): - module.weight.data.normal_(mean=0.0, std=std) - if module.bias is not None: - module.bias.data.zero_() - elif isinstance(module, nn.Embedding): - module.weight.data.normal_(mean=0.0, std=std) - if module.padding_idx is not None: - module.weight.data[module.padding_idx].zero_() - - def _set_gradient_checkpointing(self, module, value=False): - if isinstance(module, (LDMBertEncoder,)): - module.gradient_checkpointing = value - - @property - def dummy_inputs(self): - pad_token = self.config.pad_token_id - input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) - dummy_inputs = { - "attention_mask": input_ids.ne(pad_token), - "input_ids": input_ids, - } - return dummy_inputs - - -class LDMBertEncoder(LDMBertPreTrainedModel): - """ - Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a - [`LDMBertEncoderLayer`]. - - Args: - config: LDMBertConfig - embed_tokens (nn.Embedding): output embedding - """ - - def __init__(self, config: LDMBertConfig): - super().__init__(config) - - self.dropout = config.dropout - - embed_dim = config.d_model - self.padding_idx = config.pad_token_id - self.max_source_positions = config.max_position_embeddings - - self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim) - self.embed_positions = nn.Embedding(config.max_position_embeddings, embed_dim) - self.layers = nn.ModuleList([LDMBertEncoderLayer(config) for _ in range(config.encoder_layers)]) - self.layer_norm = nn.LayerNorm(embed_dim) - - self.gradient_checkpointing = False - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self): - return self.embed_tokens - - def set_input_embeddings(self, value): - self.embed_tokens = value - - def forward( - self, - input_ids: torch.LongTensor = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - head_mask: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, BaseModelOutput]: - r""" - Args: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): - Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you - provide it. - - Indices can be obtained using [`BartTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): - Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert `input_ids` indices into associated vectors - than the model's internal embedding lookup matrix. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.BaseModelOutput`] instead of a plain tuple. - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - # retrieve input_ids and inputs_embeds - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - input_shape = input_ids.size() - input_ids = input_ids.view(-1, input_shape[-1]) - elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] - else: - raise ValueError("You have to specify either input_ids or inputs_embeds") - - if inputs_embeds is None: - inputs_embeds = self.embed_tokens(input_ids) - - seq_len = input_shape[1] - if position_ids is None: - position_ids = torch.arange(seq_len, dtype=torch.long, device=inputs_embeds.device).expand((1, -1)) - embed_pos = self.embed_positions(position_ids) - - hidden_states = inputs_embeds + embed_pos - hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) - - # expand attention_mask - if attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - - # check if head_mask has a correct number of layers specified if desired - if head_mask is not None: - if head_mask.size()[0] != (len(self.layers)): - raise ValueError( - f"The head_mask should be specified for {len(self.layers)} layers, but it is for" - f" {head_mask.size()[0]}." - ) - - for idx, encoder_layer in enumerate(self.layers): - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - if self.gradient_checkpointing and self.training: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs, output_attentions) - - return custom_forward - - layer_outputs = torch.utils.checkpoint.checkpoint( - create_custom_forward(encoder_layer), - hidden_states, - attention_mask, - (head_mask[idx] if head_mask is not None else None), - ) - else: - layer_outputs = encoder_layer( - hidden_states, - attention_mask, - layer_head_mask=(head_mask[idx] if head_mask is not None else None), - output_attentions=output_attentions, - ) - - hidden_states = layer_outputs[0] - - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - - hidden_states = self.layer_norm(hidden_states) - - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - if not return_dict: - return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) - return BaseModelOutput( - last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions - ) - - -class LDMBertModel(LDMBertPreTrainedModel): - _no_split_modules = [] - - def __init__(self, config: LDMBertConfig): - super().__init__(config) - self.model = LDMBertEncoder(config) - self.to_logits = nn.Linear(config.hidden_size, config.vocab_size) - - def forward( - self, - input_ids=None, - attention_mask=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - ): - outputs = self.model( - input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - return outputs diff --git a/spaces/pankajsthr/test-stable/app.py b/spaces/pankajsthr/test-stable/app.py deleted file mode 100644 index 1a26a41c1d1795b7cc719f3fddf7692876861d8c..0000000000000000000000000000000000000000 --- a/spaces/pankajsthr/test-stable/app.py +++ /dev/null @@ -1,360 +0,0 @@ -import gradio as gr - -from datasets import load_dataset -from PIL import Image -import torch -import re -import os -import requests - - -from share_btn import community_icon_html, loading_icon_html, share_js - -from diffusers import StableDiffusionPipeline - - -model_id = "runwayml/stable-diffusion-v1-5" -device = "cpu" - -auth_token = os.getenv("auth_token") - -pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=auth_token,revision="fp16",torch_dtype=torch.float32) -pipe = pipe.to(device) -#If you are running this code locally, you need to either do a 'huggingface-cli login` or paste your User Access Token from here https://huggingface.co/settings/tokens into the use_auth_token field below. -#pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=True, revision="fp16", torch_dtype=torch.float16) -#pipe = pipe.to(device) -#torch.backends.cudnn.benchmark = True - - -is_gpu_busy = False -def infer(prompt): - global is_gpu_busy - samples = 4 - steps = 50 - scale = 7.5 - - seed = gr.Slider( - label="Seed", - minimum=0, - maximum=2147483647, - step=1, - randomize=True, - ) - - generator = torch.Generator(device=device).manual_seed(1024) - #print("Is GPU busy? ", is_gpu_busy) - images = [] - if(not is_gpu_busy): - is_gpu_busy = True - images_list = pipe( - [prompt] * samples, - num_inference_steps=steps, - guidance_scale=scale, - generator=generator, - ) - is_gpu_busy = False - safe_image = Image.open(r"unsafe.png") - for i, image in enumerate(images_list["sample"]): - if(images_list["nsfw_content_detected"][i]): - images.append(safe_image) - else: - images.append(image) - #else: - #url = os.getenv('JAX_BACKEND_URL') - #payload = {'prompt': prompt} - #images_request = requests.post(url, json = payload) - #for image in images_request.json()["images"]: - # image_b64 = (f"data:image/jpeg;base64,{image}") - # images.append(image_b64) - - return None - - -css = """ - .gradio-container { - font-family: 'IBM Plex Sans', sans-serif; - } - .gr-button { - color: white; - border-color: black; - background: black; - } - input[type='range'] { - accent-color: black; - } - .dark input[type='range'] { - accent-color: #dfdfdf; - } - .container { - max-width: 730px; - margin: auto; - padding-top: 1.5rem; - } - #gallery { - min-height: 22rem; - margin-bottom: 15px; - margin-left: auto; - margin-right: auto; - border-bottom-right-radius: .5rem !important; - border-bottom-left-radius: .5rem !important; - } - #gallery>div>.h-full { - min-height: 20rem; - } - .details:hover { - text-decoration: underline; - } - .gr-button { - white-space: nowrap; - } - .gr-button:focus { - border-color: rgb(147 197 253 / var(--tw-border-opacity)); - outline: none; - box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); - --tw-border-opacity: 1; - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); - --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); - --tw-ring-opacity: .5; - } - #advanced-btn { - font-size: .7rem !important; - line-height: 19px; - margin-top: 12px; - margin-bottom: 12px; - padding: 2px 8px; - border-radius: 14px !important; - } - #advanced-options { - display: none; - margin-bottom: 20px; - } - .footer { - margin-bottom: 45px; - margin-top: 35px; - text-align: center; - border-bottom: 1px solid #e5e5e5; - } - .footer>p { - font-size: .8rem; - display: inline-block; - padding: 0 10px; - transform: translateY(10px); - background: white; - } - .dark .footer { - border-color: #303030; - } - .dark .footer>p { - background: #0b0f19; - } - .acknowledgments h4{ - margin: 1.25em 0 .25em 0; - font-weight: bold; - font-size: 115%; - } - #container-advanced-btns{ - display: flex; - flex-wrap: wrap; - justify-content: space-between; - align-items: center; - } - .animate-spin { - animation: spin 1s linear infinite; - } - @keyframes spin { - from { - transform: rotate(0deg); - } - to { - transform: rotate(360deg); - } - } - #share-btn-container { - display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; - } - #share-btn { - all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important; - } - #share-btn * { - all: unset; - } - .gr-form{ - flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0; - } - #prompt-container{ - gap: 0; - } -""" - -block = gr.Blocks(css=css) - -examples = [ - [ - 'A high tech solarpunk utopia in the Amazon rainforest', -# 4, -# 45, -# 7.5, -# 1024, - ], - [ - 'A pikachu fine dining with a view to the Eiffel Tower', -# 4, -# 45, -# 7, -# 1024, - ], - [ - 'A mecha robot in a favela in expressionist style', -# 4, -# 45, -# 7, -# 1024, - ], - [ - 'an insect robot preparing a delicious meal', -# 4, -# 45, -# 7, -# 1024, - ], - [ - "A small cabin on top of a snowy mountain in the style of Disney, artstation", -# 4, -# 45, -# 7, -# 1024, - ], -] - - -with block: - gr.HTML( - """ -
        -
        - - - - - - - - - - - - - - - - - - - - - - - - - - - -

        - Stable Diffusion Demo -

        -
        -

        - Stable Diffusion is a state of the art text-to-image model that generates - images from text.
        For faster generation and API - access you can try - DreamStudio Beta -

        -
        - """ - ) - with gr.Group(): - with gr.Box(): - with gr.Row(elem_id="prompt-container").style(): - text = gr.Textbox( - label="Enter your prompt", - show_label=False, - max_lines=1, - placeholder="Enter your prompt", - elem_id="prompt-text-input", - ).style( - - container=False, - ) - btn = gr.Button("Generate image").style( - - full_width=False, - ) - - gallery = gr.Gallery( - label="Generated images", show_label=False, elem_id="gallery" - ).style(grid=[2], height="auto") - - with gr.Group(elem_id="container-advanced-btns"): - advanced_button = gr.Button("Advanced options", elem_id="advanced-btn") - with gr.Group(elem_id="share-btn-container"): - community_icon = gr.HTML(community_icon_html) - loading_icon = gr.HTML(loading_icon_html) - share_button = gr.Button("Share to community", elem_id="share-btn") - - with gr.Row(elem_id="advanced-options"): - gr.Markdown("Advanced settings are temporarily unavailable") - samples = gr.Slider(label="Images", minimum=1, maximum=4, value=4, step=1) - steps = gr.Slider(label="Steps", minimum=1, maximum=50, value=45, step=1) - scale = gr.Slider( - label="Guidance Scale", minimum=0, maximum=50, value=7.5, step=0.1 - ) - seed = gr.Slider( - label="Seed", - minimum=0, - maximum=2147483647, - step=1, - randomize=True, - ) - - ex = gr.Examples(examples=examples, fn=infer, inputs=text, outputs=[gallery, community_icon, loading_icon, share_button], cache_examples=False) - ex.dataset.headers = [""] - - text.submit(infer, inputs=text, outputs=[gallery], postprocess=False) - btn.click(infer, inputs=text, outputs=[gallery], postprocess=False) - - - gr.HTML( - """ - -
        -

        LICENSE

        -The model is licensed with a CreativeML Open RAIL-M license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please read the license

        -

        Biases and content acknowledgment

        -Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the LAION-5B dataset, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. You can read more in the model card

        -
        - """ - ) - -block.queue(concurrency_count=40, max_size=20).launch() \ No newline at end of file diff --git a/spaces/phyloforfun/VoucherVision/app.py b/spaces/phyloforfun/VoucherVision/app.py deleted file mode 100644 index 40c60cbff6665c04a59c814031a7af39d66899ff..0000000000000000000000000000000000000000 --- a/spaces/phyloforfun/VoucherVision/app.py +++ /dev/null @@ -1,1292 +0,0 @@ -import streamlit as st -import yaml, os, json, random, time, shutil -import plotly.graph_objs as go -from itertools import chain -from PIL import Image -from io import BytesIO -from streamlit_extras.let_it_rain import rain - -from vouchervision.LeafMachine2_Config_Builder import write_config_file -from vouchervision.VoucherVision_Config_Builder import build_VV_config , TestOptionsGPT, TestOptionsPalm, check_if_usable -from vouchervision.vouchervision_main import voucher_vision -from vouchervision.general_utils import summarize_expense_report, validate_dir -from vouchervision.utils import upload_to_drive, image_to_base64, setup_streamlit_config, save_uploaded_file, check_prompt_yaml_filename - - - -######################################################################################################## -### Constants #### -######################################################################################################## -PROMPTS_THAT_NEED_DOMAIN_KNOWLEDGE = ["Version 1","Version 1 PaLM 2"] -# LLM_VERSIONS = ["GPT 4", "GPT 3.5", "Azure GPT 4", "Azure GPT 3.5", "PaLM 2"] -COLORS_EXPENSE_REPORT = { - 'GPT_4': '#8fff66', # Bright Green - 'GPT_3_5': '#006400', # Dark Green - 'PALM2': '#66a8ff' # blue - } -MAX_GALLERY_IMAGES = 50 -GALLERY_IMAGE_SIZE = 128 -N_OVERALL_STEPS = 6 - - - -######################################################################################################## -### Progress bar #### -######################################################################################################## -class ProgressReport: - def __init__(self, overall_bar, batch_bar, text_overall, text_batch): - self.overall_bar = overall_bar - self.batch_bar = batch_bar - self.text_overall = text_overall - self.text_batch = text_batch - self.current_overall_step = 0 - self.total_overall_steps = 20 # number of major steps in machine function - self.current_batch = 0 - self.total_batches = 20 - - def update_overall(self, step_name=""): - self.current_overall_step += 1 - self.overall_bar.progress(self.current_overall_step / self.total_overall_steps) - self.text_overall.text(step_name) - - def update_batch(self, step_name=""): - self.current_batch += 1 - self.batch_bar.progress(self.current_batch / self.total_batches) - self.text_batch.text(step_name) - - def set_n_batches(self, n_batches): - self.total_batches = n_batches - - def set_n_overall(self, total_overall_steps): - self.current_overall_step = 0 - self.overall_bar.progress(0) - self.total_overall_steps = total_overall_steps - - def reset_batch(self, step_name): - self.current_batch = 0 - self.batch_bar.progress(0) - self.text_batch.text(step_name) - def reset_overall(self, step_name): - self.current_overall_step = 0 - self.overall_bar.progress(0) - self.text_overall.text(step_name) - - def get_n_images(self): - return self.n_images - def get_n_overall(self): - return self.total_overall_steps - - - -######################################################################################################## -### Streamlit helper functions #### -######################################################################################################## -def display_scrollable_results(JSON_results, test_results, OPT2, OPT3): - """ - Display the results from JSON_results in a scrollable container. - """ - # Initialize the container - con_results = st.empty() - with con_results.container(): - - # Start the custom container for all the results - results_html = """
        """ - - for idx, (test_name, _) in enumerate(sorted(test_results.items())): - _, ind_opt1, ind_opt2, ind_opt3 = test_name.split('__') - opt2_readable = "Use LeafMachine2" if OPT2[int(ind_opt2.split('-')[1])] else "Don't use LeafMachine2" - opt3_readable = f"{OPT3[int(ind_opt3.split('-')[1])]}" - - if JSON_results[idx] is None: - results_html += f"

        None

        " - else: - formatted_json = json.dumps(JSON_results[idx], indent=4, sort_keys=False) - results_html += f"
        [{opt2_readable}] + [{opt3_readable}]
        {formatted_json}
        " - - # End the custom container - results_html += """
        """ - - # The CSS to make this container scrollable - css = """ - - """ - - # Apply the CSS and then the results - st.markdown(css, unsafe_allow_html=True) - st.markdown(results_html, unsafe_allow_html=True) - - - -def display_test_results(test_results, JSON_results, llm_version): - if llm_version == 'gpt': - OPT1, OPT2, OPT3 = TestOptionsGPT.get_options() - elif llm_version == 'palm': - OPT1, OPT2, OPT3 = TestOptionsPalm.get_options() - else: - raise - - widths = [1] * (len(OPT1) + 2) + [2] - columns = st.columns(widths) - - with columns[0]: - st.write("LeafMachine2") - with columns[1]: - st.write("Prompt") - with columns[len(OPT1) + 2]: - st.write("Scroll to See Last Transcription in Each Test") - - already_written = set() - - for test_name, result in sorted(test_results.items()): - _, ind_opt1, _, _ = test_name.split('__') - option_value = OPT1[int(ind_opt1.split('-')[1])] - - if option_value not in already_written: - with columns[int(ind_opt1.split('-')[1]) + 2]: - st.write(option_value) - already_written.add(option_value) - - printed_options = set() - - with columns[-1]: - display_scrollable_results(JSON_results, test_results, OPT2, OPT3) - - # Close the custom container - st.write('', unsafe_allow_html=True) - - for idx, (test_name, result) in enumerate(sorted(test_results.items())): - _, ind_opt1, ind_opt2, ind_opt3 = test_name.split('__') - opt2_readable = "Use LeafMachine2" if OPT2[int(ind_opt2.split('-')[1])] else "Don't use LeafMachine2" - opt3_readable = f"{OPT3[int(ind_opt3.split('-')[1])]}" - - if (opt2_readable, opt3_readable) not in printed_options: - with columns[0]: - st.info(f"{opt2_readable}") - st.write('---') - with columns[1]: - st.info(f"{opt3_readable}") - st.write('---') - printed_options.add((opt2_readable, opt3_readable)) - - with columns[int(ind_opt1.split('-')[1]) + 2]: - if result: - st.success(f"Test Passed") - else: - st.error(f"Test Failed") - st.write('---') - - # success_count = sum(1 for result in test_results.values() if result) - # failure_count = len(test_results) - success_count - # proportional_rain("🥇", success_count, "💔", failure_count, font_size=72, falling_speed=5, animation_length="infinite") - rain_emojis(test_results) - - - -def add_emoji_delay(): - time.sleep(0.3) - - - -def rain_emojis(test_results): - # test_results = { - # 'test1': True, # Test passed - # 'test2': True, # Test passed - # 'test3': True, # Test passed - # 'test4': False, # Test failed - # 'test5': False, # Test failed - # 'test6': False, # Test failed - # 'test7': False, # Test failed - # 'test8': False, # Test failed - # 'test9': False, # Test failed - # 'test10': False, # Test failed - # } - success_emojis = ["🥇", "🏆", "🍾", "🙌"] - failure_emojis = ["💔", "😭"] - - success_count = sum(1 for result in test_results.values() if result) - failure_count = len(test_results) - success_count - - chosen_emoji = random.choice(success_emojis) - for _ in range(success_count): - rain( - emoji=chosen_emoji, - font_size=72, - falling_speed=4, - animation_length=2, - ) - add_emoji_delay() - - chosen_emoji = random.choice(failure_emojis) - for _ in range(failure_count): - rain( - emoji=chosen_emoji, - font_size=72, - falling_speed=5, - animation_length=1, - ) - add_emoji_delay() - - - -def get_prompt_versions(LLM_version): - yaml_files = [f for f in os.listdir(os.path.join(st.session_state.dir_home, 'custom_prompts')) if f.endswith('.yaml')] - - if LLM_version in ["GPT 4", "GPT 3.5", "Azure GPT 4", "Azure GPT 3.5"]: - versions = ["Version 1", "Version 1 No Domain Knowledge", "Version 2"] - return (versions + yaml_files, "Version 2") - elif LLM_version in ["PaLM 2",]: - versions = ["Version 1 PaLM 2", "Version 1 PaLM 2 No Domain Knowledge", "Version 2 PaLM 2"] - return (versions + yaml_files, "Version 2 PaLM 2") - else: - # Handle other cases or raise an error - return (yaml_files, None) - - - -def delete_directory(dir_path): - try: - shutil.rmtree(dir_path) - st.session_state['input_list'] = [] - st.session_state['input_list_small'] = [] - # st.success(f"Deleted previously uploaded images, making room for new images: {dir_path}") - except OSError as e: - st.error(f"Error: {dir_path} : {e.strerror}") - - - -# Function to load a YAML file and update session_state -def load_prompt_yaml(filename): - st.session_state['user_clicked_load_prompt_yaml'] = filename - with open(filename, 'r') as file: - st.session_state['prompt_info'] = yaml.safe_load(file) - st.session_state['prompt_author'] = st.session_state['prompt_info'].get('prompt_author', st.session_state['default_prompt_author']) - st.session_state['prompt_author_institution'] = st.session_state['prompt_info'].get('prompt_author_institution', st.session_state['default_prompt_author_institution']) - st.session_state['prompt_description'] = st.session_state['prompt_info'].get('prompt_description', st.session_state['default_prompt_description']) - st.session_state['LLM'] = st.session_state['prompt_info'].get('LLM', 'gpt') - st.session_state['instructions'] = st.session_state['prompt_info'].get('instructions', st.session_state['default_instructions']) - st.session_state['json_formatting_instructions'] = st.session_state['prompt_info'].get('json_formatting_instructions', st.session_state['default_json_formatting_instructions'] ) - st.session_state['rules'] = st.session_state['prompt_info'].get('rules', {}) - st.session_state['mapping'] = st.session_state['prompt_info'].get('mapping', {}) - - st.session_state['prompt_info'] = { - 'prompt_author': st.session_state['prompt_author'], - 'prompt_author_institution': st.session_state['prompt_author_institution'], - 'prompt_description': st.session_state['prompt_description'], - 'LLM': st.session_state['LLM'], - 'instructions': st.session_state['instructions'], - 'json_formatting_instructions': st.session_state['json_formatting_instructions'], - 'rules': st.session_state['rules'], - 'mapping': st.session_state['mapping'], - } - - # Placeholder: - st.session_state['assigned_columns'] = list(chain.from_iterable(st.session_state['mapping'].values())) - - - -def save_prompt_yaml(filename, col_right_save): - yaml_content = { - 'prompt_author': st.session_state['prompt_author'], - 'prompt_author_institution': st.session_state['prompt_author_institution'], - 'prompt_description': st.session_state['prompt_description'], - 'LLM': st.session_state['LLM'], - 'instructions': st.session_state['instructions'], - 'json_formatting_instructions': st.session_state['json_formatting_instructions'], - 'rules': st.session_state['rules'], - 'mapping': st.session_state['mapping'], - } - - dir_prompt = os.path.join(st.session_state.dir_home, 'custom_prompts') - filepath = os.path.join(dir_prompt, f"{filename}.yaml") - - with open(filepath, 'w') as file: - yaml.safe_dump(dict(yaml_content), file, sort_keys=False) - - st.success(f"Prompt saved as '{filename}.yaml'.") - - upload_to_drive(filepath, filename) - - with col_right_save: - create_download_button_yaml(filepath, filename) - - - -def check_unique_mapping_assignments(): - if len(st.session_state['assigned_columns']) != len(set(st.session_state['assigned_columns'])): - st.error("Each column name must be assigned to only one category.") - return False - else: - st.success("Mapping confirmed.") - return True - - - -def create_download_button(zip_filepath): - with open(zip_filepath, 'rb') as f: - bytes_io = BytesIO(f.read()) - st.download_button( - label=f"Download Results for{st.session_state['processing_add_on']}",type='primary', - data=bytes_io, - file_name=os.path.basename(zip_filepath), - mime='application/zip' - ) - - - -def btn_load_prompt(selected_yaml_file, dir_prompt): - if selected_yaml_file: - yaml_file_path = os.path.join(dir_prompt, selected_yaml_file) - load_prompt_yaml(yaml_file_path) - elif not selected_yaml_file: - # Directly assigning default values since no file is selected - st.session_state['prompt_info'] = {} - st.session_state['prompt_author'] = st.session_state['default_prompt_author'] - st.session_state['prompt_author_institution'] = st.session_state['default_prompt_author_institution'] - st.session_state['prompt_description'] = st.session_state['default_prompt_description'] - st.session_state['instructions'] = st.session_state['default_instructions'] - st.session_state['json_formatting_instructions'] = st.session_state['default_json_formatting_instructions'] - st.session_state['rules'] = {} - st.session_state['LLM'] = 'gpt' - - st.session_state['assigned_columns'] = [] - - st.session_state['prompt_info'] = { - 'prompt_author': st.session_state['prompt_author'], - 'prompt_author_institution': st.session_state['prompt_author_institution'], - 'prompt_description': st.session_state['prompt_description'], - 'LLM': st.session_state['LLM'], - 'instructions': st.session_state['instructions'], - 'json_formatting_instructions': st.session_state['json_formatting_instructions'], - 'rules': st.session_state['rules'], - 'mapping': st.session_state['mapping'], - } - - - -def refresh(): - st.session_state['uploader_idk'] += 1 - st.write('') - - - -def upload_local_prompt_to_server(dir_prompt): - uploaded_file = st.file_uploader("Upload a custom prompt file", type=['yaml']) - if uploaded_file is not None: - # Check the file extension - file_name = uploaded_file.name - if file_name.endswith('.yaml'): - file_path = os.path.join(dir_prompt, file_name) - - # Save the file - with open(file_path, 'wb') as f: - f.write(uploaded_file.getbuffer()) - st.success(f"Saved file {file_name} in {dir_prompt}") - else: - st.error("Please upload a .yaml file that you previously created using this Prompt Builder tool.") - - - -def create_download_button_yaml(file_path, selected_yaml_file): - file_label = f"Download {selected_yaml_file}" - with open(file_path, 'rb') as f: - st.download_button( - label=file_label, - data=f, - file_name=os.path.basename(file_path), - mime='application/x-yaml' - ) - - - -def clear_image_gallery(): - delete_directory(st.session_state['dir_uploaded_images']) - delete_directory(st.session_state['dir_uploaded_images_small']) - validate_dir(st.session_state['dir_uploaded_images']) - validate_dir(st.session_state['dir_uploaded_images_small']) - - - -def use_test_image(): - st.info(f"Processing images from {os.path.join(st.session_state.dir_home,'demo','demo_images')}") - st.session_state.config['leafmachine']['project']['dir_images_local'] = os.path.join(st.session_state.dir_home,'demo','demo_images') - n_images = len([f for f in os.listdir(st.session_state.config['leafmachine']['project']['dir_images_local']) if os.path.isfile(os.path.join(st.session_state.config['leafmachine']['project']['dir_images_local'], f))]) - st.session_state['processing_add_on'] = f" {n_images} Images" - clear_image_gallery() - st.session_state['uploader_idk'] += 1 - - - -######################################################################################################## -### Streamlit sections #### -######################################################################################################## -def create_space_saver(): - st.subheader("Space Saving Options") - col_ss_1, col_ss_2 = st.columns([2,2]) - with col_ss_1: - st.write("Several folders are created and populated with data during the VoucherVision transcription process.") - st.write("Below are several options that will allow you to automatically delete temporary files that you may not need for everyday operations.") - st.write("VoucherVision creates the following folders. Folders marked with a :star: are required if you want to use VoucherVisionEditor for quality control.") - st.write("`../[Run Name]/Archival_Components`") - st.write("`../[Run Name]/Config_File`") - st.write("`../[Run Name]/Cropped_Images` :star:") - st.write("`../[Run Name]/Logs`") - st.write("`../[Run Name]/Original_Images` :star:") - st.write("`../[Run Name]/Transcription` :star:") - with col_ss_2: - st.session_state.config['leafmachine']['project']['delete_temps_keep_VVE'] = st.checkbox("Delete Temporary Files (KEEP files required for VoucherVisionEditor)", st.session_state.config['leafmachine']['project'].get('delete_temps_keep_VVE', False)) - st.session_state.config['leafmachine']['project']['delete_all_temps'] = st.checkbox("Keep only the final transcription file", st.session_state.config['leafmachine']['project'].get('delete_all_temps', False),help="*WARNING:* This limits your ability to do quality assurance. This will delete all folders created by VoucherVision, leaving only the `transcription.xlsx` file.") - - - -def show_available_APIs(): - st.session_state['has_key_openai'] = (os.getenv('OPENAI_API_KEY') is not None) and (os.getenv('OPENAI_API_KEY') != '') - st.session_state['has_key_google_OCR'] = (os.getenv('GOOGLE_APPLICATION_CREDENTIALS') is not None) and (os.getenv('GOOGLE_APPLICATION_CREDENTIALS') != '') - st.session_state['has_key_palm2'] = (os.getenv('PALM_API_KEY') is not None) and (os.getenv('PALM_API_KEY') != '') - st.session_state['has_key_azure'] = (os.getenv('AZURE_API_KEY') is not None) and (os.getenv('AZURE_API_KEY') != '') - - emoji_good = ":heavy_check_mark:" - emoji_bad = ":x:" - - table = { - 'Google Vision OCR API (required!)': emoji_good if st.session_state['has_key_google_OCR'] else emoji_bad, - 'OpenAI API': emoji_good if st.session_state['has_key_openai'] else emoji_bad, - 'PaLM 2 API': emoji_good if st.session_state['has_key_palm2'] else emoji_bad, - 'OpenAI API (Azure)': emoji_good if st.session_state['has_key_azure'] else emoji_bad, - } - for api_name, status in table.items(): - st.markdown(f"* {status} {api_name}") - -def display_image_gallery(): - # Initialize the container - con_image = st.empty() - - # Start the div for the image grid - img_grid_html = """ -
        - """ - - # Loop through each image in the input list - # with con_image.container(): - for image_path in st.session_state['input_list']: - # Open the image and create a thumbnail - img = Image.open(image_path) - img.thumbnail((120, 120), Image.Resampling.LANCZOS) - - # Convert the image to base64 - base64_image = image_to_base64(img) - - # Append the image to the grid HTML - # img_html = f""" - #
        - # Image - #
        - # """ - img_html = f""" - Image - """ - img_grid_html += img_html - # st.markdown(img_html, unsafe_allow_html=True) - - - # Close the div for the image grid - img_grid_html += "
        " - - # Display the image grid in the container - with con_image.container(): - st.markdown(img_grid_html, unsafe_allow_html=True) - - # The CSS to make the images display inline and be responsive - css = """ - - """ - # Apply the CSS - st.markdown(css, unsafe_allow_html=True) - -def show_header_welcome(): - st.session_state.logo_path = os.path.join(st.session_state.dir_home, 'img','logo.png') - st.session_state.logo = Image.open(st.session_state.logo_path) - st.image(st.session_state.logo, width=250) - - - -######################################################################################################## -### Sidebar for Expense Report #### -######################################################################################################## -def render_expense_report_summary(): - cost_labels = [] - cost_values = [] - total_images = 0 - cost_per_image_dict = {} - st.header('Expense Report Summary') - - if st.session_state.expense_summary: - st.metric(label="Total Cost", value=f"${round(st.session_state.expense_summary['total_cost_sum'], 4):,}") - col1, col2 = st.columns(2) - - # Run count and total costs - with col1: - st.metric(label="Run Count", value=st.session_state.expense_summary['run_count']) - st.metric(label="Tokens In", value=f"{st.session_state.expense_summary['tokens_in_sum']:,}") - - # Token information - with col2: - st.metric(label="Total Images", value=st.session_state.expense_summary['n_images_sum']) - st.metric(label="Tokens Out", value=f"{st.session_state.expense_summary['tokens_out_sum']:,}") - - # Calculate cost proportion per image for each API version - st.subheader('Average Cost per Image by API Version') - - # Iterate through the expense report to accumulate costs and image counts - for index, row in st.session_state.expense_report.iterrows(): - api_version = row['api_version'] - total_cost = row['total_cost'] - n_images = row['n_images'] - total_images += n_images # Keep track of total images processed - if api_version not in cost_per_image_dict: - cost_per_image_dict[api_version] = {'total_cost': 0, 'n_images': 0} - cost_per_image_dict[api_version]['total_cost'] += total_cost - cost_per_image_dict[api_version]['n_images'] += n_images - - api_versions = list(cost_per_image_dict.keys()) - colors = [COLORS_EXPENSE_REPORT[version] if version in COLORS_EXPENSE_REPORT else '#DDDDDD' for version in api_versions] - - # Calculate the cost per image for each API version - for version, cost_data in cost_per_image_dict.items(): - total_cost = cost_data['total_cost'] - n_images = cost_data['n_images'] - - # Calculate the cost per image for this version - cost_per_image = total_cost / n_images if n_images > 0 else 0 - cost_labels.append(version) - cost_values.append(cost_per_image) - - # Generate the pie chart - cost_pie_chart = go.Figure(data=[go.Pie(labels=cost_labels, values=cost_values, hole=.3)]) - - # Update traces for custom text in hoverinfo, displaying cost with a dollar sign and two decimal places - cost_pie_chart.update_traces( - marker=dict(colors=colors), - text=[f"${value:.2f}" for value in cost_values], - textinfo='percent+label', - hoverinfo='label+percent+text' - ) - st.plotly_chart(cost_pie_chart, use_container_width=True) - - st.subheader('Proportion of Total Cost by API Version') - cost_labels = [] - cost_proportions = [] - total_cost_by_version = {} - - # Sum the total cost for each API version - for index, row in st.session_state.expense_report.iterrows(): - api_version = row['api_version'] - total_cost = row['total_cost'] - if api_version not in total_cost_by_version: - total_cost_by_version[api_version] = 0 - total_cost_by_version[api_version] += total_cost - - # Calculate the combined total cost for all versions - combined_total_cost = sum(total_cost_by_version.values()) - - # Calculate the proportion of total cost for each API version - for version, total_cost in total_cost_by_version.items(): - proportion = (total_cost / combined_total_cost) * 100 if combined_total_cost > 0 else 0 - cost_labels.append(version) - cost_proportions.append(proportion) - - # Generate the pie chart - cost_pie_chart = go.Figure(data=[go.Pie(labels=cost_labels, values=cost_proportions, hole=.3)]) - - # Update traces for custom text in hoverinfo - cost_pie_chart.update_traces( - marker=dict(colors=colors), - text=[f"${cost:.2f}" for cost in total_cost_by_version.values()], - textinfo='percent+label', - hoverinfo='label+percent+text' - ) - st.plotly_chart(cost_pie_chart, use_container_width=True) - - # API version usage percentages pie chart - st.subheader('Runs by API Version') - api_versions = list(st.session_state.expense_summary['api_version_percentages'].keys()) - percentages = [st.session_state.expense_summary['api_version_percentages'][version] for version in api_versions] - pie_chart = go.Figure(data=[go.Pie(labels=api_versions, values=percentages, hole=.3)]) - pie_chart.update_layout(margin=dict(t=0, b=0, l=0, r=0)) - pie_chart.update_traces(marker=dict(colors=colors),) - st.plotly_chart(pie_chart, use_container_width=True) - - else: - st.error('No expense report data available.') - - - -def sidebar_content(): - if not os.path.exists(os.path.join(st.session_state.dir_home,'expense_report')): - validate_dir(os.path.join(st.session_state.dir_home,'expense_report')) - expense_report_path = os.path.join(st.session_state.dir_home, 'expense_report', 'expense_report.csv') - - if os.path.exists(expense_report_path): - # File exists, proceed with summarization - st.session_state.expense_summary, st.session_state.expense_report = summarize_expense_report(expense_report_path) - render_expense_report_summary() - else: - st.session_state.expense_summary, st.session_state.expense_report = None, None - st.header('Expense Report Summary') - st.write('Available after first run...') - - - -######################################################################################################## -### Config Builder #### -######################################################################################################## -def build_LLM_prompt_config(): - st.session_state['assigned_columns'] = [] - st.session_state['default_prompt_author'] = 'unknown' - st.session_state['default_prompt_author_institution'] = 'unknown' - st.session_state['default_prompt_description'] = 'unknown' - st.session_state['default_instructions'] = """1. Refactor the unstructured OCR text into a dictionary based on the JSON structure outlined below. -2. You should map the unstructured OCR text to the appropriate JSON key and then populate the field based on its rules. -3. Some JSON key fields are permitted to remain empty if the corresponding information is not found in the unstructured OCR text. -4. Ignore any information in the OCR text that doesn't fit into the defined JSON structure. -5. Duplicate dictionary fields are not allowed. -6. Ensure that all JSON keys are in lowercase. -7. Ensure that new JSON field values follow sentence case capitalization. -8. Ensure all key-value pairs in the JSON dictionary strictly adhere to the format and data types specified in the template. -9. Ensure the output JSON string is valid JSON format. It should not have trailing commas or unquoted keys. -10. Only return a JSON dictionary represented as a string. You should not explain your answer.""" - st.session_state['default_json_formatting_instructions'] = """The next section of instructions outlines how to format the JSON dictionary. The keys are the same as those of the final formatted JSON object. -For each key there is a format requirement that specifies how to transcribe the information for that key. -The possible formatting options are: -1. "verbatim transcription" - field is populated with verbatim text from the unformatted OCR. -2. "spell check transcription" - field is populated with spelling corrected text from the unformatted OCR. -3. "boolean yes no" - field is populated with only yes or no. -4. "boolean 1 0" - field is populated with only 1 or 0. -5. "integer" - field is populated with only an integer. -6. "[list]" - field is populated from one of the values in the list. -7. "yyyy-mm-dd" - field is populated with a date in the format year-month-day. -The desired null value is also given. Populate the field with the null value of the information for that key is not present in the unformatted OCR text.""" - - # Start building the Streamlit app - col_prompt_main_left, ___, col_prompt_main_right = st.columns([6,1,3]) - - with col_prompt_main_left: - st.title("Custom LLM Prompt Builder") - st.subheader('About') - st.write("This form allows you to craft a prompt for your specific task.") - st.subheader('For Hugging Face Spaces') - st.write("If you create a prompt with the Hugging Face Spaces implementation of VoucherVision, make sure that you download the prompt immediately after you have 'Saved' the prompt. Default storage space on HF Spaces is not persistant, so if you refresh the page your prompt will probably disappear.") - st.write("You can submit your prompt using this link and we will add it to our library so it will always be available.") - - st.subheader('How it works') - st.write("1. Edit this page until you are happy with your instructions. We recommend looking at the basic structure, writing down your prompt inforamtion in a Word document so that it does not randomly disappear, and then copying and pasting that info into this form once your whole prompt structure is defined.") - st.write("2. After you enter all of your prompt instructions, click 'Save' and give your file a name.") - st.write("3. This file will be saved as a yaml configuration file in the `..VoucherVision/custom_prompts` folder.") - st.write("4. When you go back the main VoucherVision page you will now see your custom prompt available in the 'Prompt Version' dropdown menu.") - st.write("5. Select your custom prompt. Note, your prompt will only be available for the LLM that you set when filling out the form below.") - - dir_prompt = os.path.join(st.session_state.dir_home, 'custom_prompts') - yaml_files = [f for f in os.listdir(dir_prompt) if f.endswith('.yaml')] - col_upload_yaml, col_upload_yaml_2 = st.columns([4,4]) - with col_upload_yaml: - # Upload a prompt from your computer - upload_local_prompt_to_server(dir_prompt) - - col_select_yaml, col_upload_btn, col_download_btn = st.columns([6,2,2]) - with col_select_yaml: - # Dropdown for selecting a YAML file - st.session_state['selected_yaml_file'] = st.selectbox('Select a prompt .YAML file to load:', [''] + yaml_files) - - with col_upload_btn: - st.write('##') - # Button to load the selected prompt - st.button('Load Selected Prompt into Builder', on_click=btn_load_prompt, args=[st.session_state['selected_yaml_file'] , dir_prompt]) - - with col_download_btn: - if st.session_state['selected_yaml_file']: - # Construct the full path to the file - download_file_path = os.path.join(dir_prompt, st.session_state['selected_yaml_file'] ) - # Create the download button - st.write('##') - create_download_button_yaml(download_file_path, st.session_state['selected_yaml_file'] ) - - # Prompt Author Information - st.header("Prompt Author Information") - st.write("We value community contributions! Please provide your name(s) (or pseudonym if you prefer) for credit. If you leave this field blank, it will say 'unknown'.") - st.session_state['prompt_author'] = st.text_input("Enter names of prompt author(s)", value=st.session_state['prompt_info'].get('prompt_author', st.session_state['default_prompt_author'])) - - st.write("Please provide your institution name. If you leave this field blank, it will say 'unknown'.") - st.session_state['prompt_author_institution'] = st.text_input("Enter name of institution", value=st.session_state['prompt_info'].get('prompt_author_institution', st.session_state['default_prompt_author_institution'])) - - st.write("Please provide a description of your prompt and its intended task. Is it designed for a specific collection? Taxa? Database structure?") - st.session_state['prompt_description'] = st.text_input("Enter description of prompt", value=st.session_state['prompt_info'].get('prompt_description', st.session_state['default_prompt_description'])) - - # Input for new file name - st.write('---') - st.header("Prompt Name") - st.write('Provide a name for your custom prompt. It can only conatin letters, numbers, and underscores. No spaces, dashes, or special characters.') - st.session_state['new_prompt_yaml_filename'] = st.text_input("Enter filename to save your prompt as a configuration YAML:", value=None, placeholder='my_prompt_name') - - # Define the options for the LLM Model Type dropdown - st.write('---') - st.header("Set LLM Model Type") - llm_options = ['gpt', 'palm'] - # Create the dropdown and set the value to session_state['LLM'] - st.write("Which LLM is this prompt designed for? This will not restrict its use to a specific LLM, but some prompts will behave in different ways across models.") - st.write("For example, VoucherVision will automatically add multiple JSON formatting blocks to all PaLM 2 prompts to coax PaLM 2 to return a valid JSON object.") - st.session_state['LLM'] = st.selectbox('Set LLM', llm_options, index=llm_options.index(st.session_state.get('LLM', 'gpt'))) - - st.write('---') - # Instructions Section - st.header("Instructions") - st.write("These are the general instructions that guide the LLM through the transcription task. We recommend using the default instructions unless you have a specific reason to change them.") - - st.session_state['instructions'] = st.text_area("Enter instructions:", value=st.session_state['default_instructions'].strip(), height=350, disabled=True) - - st.write('---') - - # Column Instructions Section - st.header("JSON Formatting Instructions") - st.write("The following section tells the LLM how we want to structure the JSON dictionary. We do not recommend changing this section because it would likely result in unstable and inconsistent behavior.") - st.session_state['json_formatting_instructions'] = st.text_area("Enter column instructions:", value=st.session_state['default_json_formatting_instructions'], height=350, disabled=True) - - st.write('---') - col_left, col_right = st.columns([6,4]) - with col_left: - st.subheader('Add/Edit Columns') - - # Initialize rules in session state if not already present - if 'rules' not in st.session_state or not st.session_state['rules']: - st.session_state['rules']['Dictionary'] = { - "catalog_number": { - "format": "verbatim transcription", - "null_value": "", - "description": "The barcode identifier, typically a number with at least 6 digits, but fewer than 30 digits." - } - } - st.session_state['rules']['SpeciesName'] = { - "taxonomy": ["Genus_species"] - } - - new_column_name = st.text_input("Enter a new column name:") - - if st.button("Add New Column") and new_column_name: - if new_column_name not in st.session_state['rules']['Dictionary']: - st.session_state['rules']['Dictionary'][new_column_name] = {"format": "", "null_value": "", "description": ""} - st.success(f"New column '{new_column_name}' added. Now you can edit its properties.") - else: - st.error("Column name already exists. Please enter a unique column name.") - - # Get columns excluding the protected "catalog_number" - st.write('#') - editable_columns = [col for col in st.session_state['rules']['Dictionary'] if col != "catalog_number"] - column_name = st.selectbox("Select a column to edit:", [""] + editable_columns) - - # Handle rules editing - current_rule = st.session_state['rules']['Dictionary'].get(column_name, { - "format": "", - "null_value": "", - "description": "" - }) - - if 'selected_column' not in st.session_state: - st.session_state['selected_column'] = column_name - - # Form for input fields - with st.form(key='rule_form', clear_on_submit=True): - format_options = ["verbatim transcription", "spell check transcription", "boolean yes no", "boolean 1 0", "integer", "[list]", "yyyy-mm-dd"] - current_rule["format"] = st.selectbox("Format:", format_options, index=format_options.index(current_rule["format"]) if current_rule["format"] else 0) - current_rule["null_value"] = st.text_input("Null value:", value=current_rule["null_value"]) - current_rule["description"] = st.text_area("Description:", value=current_rule["description"]) - commit_button = st.form_submit_button("Commit Column") - - default_rule = { - "format": format_options[0], # default format - "null_value": "", # default null value - "description": "", # default description - } - if st.session_state['selected_column'] != column_name: - # Column has changed. Update the session_state selected column. - st.session_state['selected_column'] = column_name - # Reset the current rule to the default for this new column, or a blank rule if not set. - current_rule = st.session_state['rules']['Dictionary'].get(column_name, default_rule.copy()) - - # Handle commit action - if commit_button and column_name: - # Commit the rules to the session state. - st.session_state['rules']['Dictionary'][column_name] = current_rule.copy() - st.success(f"Column '{column_name}' added/updated in rules.") - - # Force the form to reset by clearing the fields from the session state - st.session_state.pop('selected_column', None) # Clear the selected column to force reset - - # Layout for removing an existing column - delete_column_name = st.selectbox("Select a column to delete:", [""] + editable_columns, key='delete_column') - if st.button("Delete Column") and delete_column_name: - del st.session_state['rules'][delete_column_name] - st.success(f"Column '{delete_column_name}' removed from rules.") - - with col_right: - # Display the current state of the JSON rules - st.subheader('Formatted Columns') - st.json(st.session_state['rules']['Dictionary']) - - st.write('---') - col_left_mapping, col_right_mapping = st.columns([6,4]) - with col_left_mapping: - st.header("Mapping") - st.write("Assign each column name to a single category.") - st.session_state['refresh_mapping'] = False - - # Dynamically create a list of all column names that can be assigned - # This assumes that the column names are the keys in the dictionary under 'rules' - all_column_names = list(st.session_state['rules']['Dictionary'].keys()) - - categories = ['TAXONOMY', 'GEOGRAPHY', 'LOCALITY', 'COLLECTING', 'MISCELLANEOUS'] - if ('mapping' not in st.session_state) or (st.session_state['mapping'] == {}): - st.session_state['mapping'] = {category: [] for category in categories} - for category in categories: - # Filter out the already assigned columns - available_columns = [col for col in all_column_names if col not in st.session_state['assigned_columns'] or col in st.session_state['mapping'].get(category, [])] - - # Ensure the current mapping is a subset of the available options - current_mapping = [col for col in st.session_state['mapping'].get(category, []) if col in available_columns] - - # Provide a safe default if the current mapping is empty or contains invalid options - safe_default = current_mapping if all(col in available_columns for col in current_mapping) else [] - - # Create a multi-select widget for the category with a safe default - selected_columns = st.multiselect( - f"Select columns for {category}:", - available_columns, - default=safe_default, - key=f"mapping_{category}" - ) - # Update the assigned_columns based on the selections - for col in current_mapping: - if col not in selected_columns and col in st.session_state['assigned_columns']: - st.session_state['assigned_columns'].remove(col) - st.session_state['refresh_mapping'] = True - - for col in selected_columns: - if col not in st.session_state['assigned_columns']: - st.session_state['assigned_columns'].append(col) - st.session_state['refresh_mapping'] = True - - # Update the mapping in session state when there's a change - st.session_state['mapping'][category] = selected_columns - if st.session_state['refresh_mapping']: - st.session_state['refresh_mapping'] = False - - # Button to confirm and save the mapping configuration - if st.button('Confirm Mapping'): - if check_unique_mapping_assignments(): - # Proceed with further actions since the mapping is confirmed and unique - pass - - with col_right_mapping: - # Display the current state of the JSON rules - st.subheader('Formatted Column Maps') - st.json(st.session_state['mapping']) - - st.write('---') - st.header("Save and Download Custom Prompt") - st.write('Once you click save, validation checks will verify the formatting and then a download button will appear so that you can ***save a local copy of your custom prompt.***') - col_left_save, col_right_save, _ = st.columns([2,2,8]) - with col_left_save: - # Button to save the new YAML file - if st.button('Save YAML', type='primary'): - if st.session_state['new_prompt_yaml_filename']: - if check_unique_mapping_assignments(): - if check_prompt_yaml_filename(st.session_state['new_prompt_yaml_filename']): - save_prompt_yaml(st.session_state['new_prompt_yaml_filename'], col_right_save) - else: - st.error("File name can only contain letters, numbers, underscores, and dashes. Cannot contain spaces.") - else: - st.error("Mapping contains an error. Make sure that each column is assigned to only ***one*** category.") - else: - st.error("Please enter a filename.") - - st.write('---') - st.header("Return to VoucherVision") - if st.button('Exit'): - st.session_state.proceed_to_build_llm_prompt = False - st.session_state.proceed_to_main = True - st.rerun() - - with col_prompt_main_right: - if st.session_state['user_clicked_load_prompt_yaml'] is None: # see if user has loaded a yaml to edit - st.session_state['show_prompt_name_e'] = f"Prompt Status :arrow_forward: Building prompt from scratch" - if st.session_state['new_prompt_yaml_filename']: - st.session_state['show_prompt_name_w'] = f"New Prompt Name :arrow_forward: {st.session_state['new_prompt_yaml_filename']}.yaml" - else: - st.session_state['show_prompt_name_w'] = f"New Prompt Name :arrow_forward: [PLEASE SET NAME]" - else: - st.session_state['show_prompt_name_e'] = f"Prompt Status: Editing :arrow_forward: {st.session_state['selected_yaml_file']}" - if st.session_state['new_prompt_yaml_filename']: - st.session_state['show_prompt_name_w'] = f"New Prompt Name :arrow_forward: {st.session_state['new_prompt_yaml_filename']}.yaml" - else: - st.session_state['show_prompt_name_w'] = f"New Prompt Name :arrow_forward: [PLEASE SET NAME]" - - st.subheader(f'Full Prompt') - st.write(st.session_state['show_prompt_name_e']) - st.write(st.session_state['show_prompt_name_w']) - st.write("---") - st.session_state['prompt_info'] = { - 'prompt_author': st.session_state['prompt_author'], - 'prompt_author_institution': st.session_state['prompt_author_institution'], - 'prompt_description': st.session_state['prompt_description'], - 'LLM': st.session_state['LLM'], - 'instructions': st.session_state['instructions'], - 'json_formatting_instructions': st.session_state['json_formatting_instructions'], - 'rules': st.session_state['rules'], - 'mapping': st.session_state['mapping'], - } - st.json(st.session_state['prompt_info']) - -def content_header(): - # Header section, run, quick start, API report - col_run_1, col_run_2, col_run_3, col_run_4 = st.columns([2,2,2,2]) - - # Progress bar - col_run_info_1 = st.columns([1])[0] - - with col_run_info_1: - # Progress - st.subheader("Overall Progress") - overall_progress_bar = st.progress(0) - text_overall = st.empty() # Placeholder for current step name - - st.subheader('Transcription Progress') - batch_progress_bar = st.progress(0) - text_batch = st.empty() # Placeholder for current step name - - progress_report = ProgressReport(overall_progress_bar, batch_progress_bar, text_overall, text_batch) - - st.info("***Note:*** There is a known bug with tabs in Streamlit. If you update an input field it may take you back to the 'Project Settings' tab. Changes that you made are saved, it's just an annoying glitch. We are aware of this issue and will fix it as soon as we can.") - st.write("If you use VoucherVision frequently, you can change the default values that are auto-populated in the form below. In a text editor or IDE, edit the first few rows in the file `../VoucherVision/vouchervision/VoucherVision_Config_Builder.py`") - - with col_run_1: - show_header_welcome() - st.subheader('Run VoucherVision') - - if not check_if_usable(): - st.button("Start Processing", type='primary', disabled=True) - # st.error(":heavy_exclamation_mark: Required API keys not set. Please visit the 'API Keys' tab and set the Google Vision OCR API key and at least one LLM key.") - st.error(":heavy_exclamation_mark: Required API keys not set. Please set the API keys as 'Secrets' for your Hugging Face Space. Visit the 'Settings' tab at the top of the page.") - else: - if st.button(f"Start Processing{st.session_state['processing_add_on']}", type='primary'): - - # First, write the config file. - write_config_file(st.session_state.config, st.session_state.dir_home, filename="VoucherVision.yaml") - - path_custom_prompts = os.path.join(st.session_state.dir_home,'custom_prompts',st.session_state.config['leafmachine']['project']['prompt_version']) - - # Define number of overall steps - progress_report.set_n_overall(N_OVERALL_STEPS) - progress_report.update_overall(f"Starting VoucherVision...") - - # Call the machine function. - last_JSON_response, total_cost, st.session_state['zip_filepath'] = voucher_vision(None, st.session_state.dir_home, path_custom_prompts, None, progress_report,path_api_cost=os.path.join(st.session_state.dir_home,'api_cost','api_cost.yaml'), is_real_run=True) - - if total_cost: - st.success(f":money_with_wings: This run cost :heavy_dollar_sign:{total_cost:.4f}") - - # Format the JSON string for display. - if last_JSON_response is None: - st.markdown(f"Last JSON object in the batch: NONE") - else: - try: - formatted_json = json.dumps(json.loads(last_JSON_response), indent=4, sort_keys=False) - except: - formatted_json = json.dumps(last_JSON_response, indent=4, sort_keys=False) - st.markdown(f"Last JSON object in the batch:\n```\n{formatted_json}\n```") - st.balloons() - - if st.session_state['zip_filepath']: - create_download_button(st.session_state['zip_filepath']) - st.button("Refresh", on_click=refresh) - - with col_run_2: - st.subheader('Quick Start') - st.write('1. We include a single image for testing. Without uploading your own images, you can select options below and press "Start Processing" to try VoucherVision.') - st.write('2. Name your run --- If the same name already exist, VV will append the date to the run name.') - st.write('3. Choose a LLM version --- Only LLMs with valid keys will appear in the dropdown list.') - st.write('4. Select a prompt version --- Start with "Version 2". Custom Prompts will include ".yaml" in the name. You can build your own Custom Prompt in the Prompt Builder.') - st.markdown('5. Upload images --- Up to ~100 images can be uploaded in the Hugging Face Spaces implementation. If you want to process more images at once (and have more control in general) then use the [GitHub version](https://github.com/Gene-Weaver/VoucherVision). If you pay for persistent storage for your HF Space, then you may be able to process more too.') - with col_run_3: - st.subheader('') - st.write('6. LeafMachine2 collage --- If selected, LeafMachine2 will isolate all text from the image and create a label collage, which will be sent to the OCR algorithm instead of the full image. This improves OCR detection for small or finely written text.') - st.write('7. OCR overlay images --- If selected, VoucherVision will overlay the OCR detections onto the input image. This is useful for debugging transcription errors to see if the OCR failed or if the LLM failed.') - st.write('8. Start processing --- Wait for VoucherVision to finish.') - st.write('9. Download results --- Click the "Download Results" button to save the VoucherVision output to your computer. ***Output files will disappear if you start a new run or restart the Space.***') - st.write('10. Editing the LLM transcriptions --- Use the VoucherVisionEditor to revise and correct any mistakes or ommissions.') - - with col_run_4: - st.subheader('Available LLMs and APIs') - show_available_APIs() - st.info('Until the end of 2023, Azure OpenAI models will be available for anyone to use here. Then only PaLM 2 will be available. To use all services, duplicate this Space and provide your own API keys.') - - - - -######################################################################################################## -### Main Settings #### -######################################################################################################## -def content_tab_settings(): - st.write("---") - st.header("Configuration Settings") - col_project_1, col_project_2, col_project_3 = st.columns([2,2,2]) - - st.write("---") - st.header('Input Images') - st.write('Upload a batch of images using the uploader below. These images will be store temporarily on this server. Each time you upload new images the ***previously uploaded images will be deleted***. You can also clear these cached images by pressing the "Clear Staged Images" button.') - col_local_1, col_local_2 = st.columns([2,6]) - - st.write("---") - st.header('LeafMachine2 Label Collage') - col_cropped_1, col_cropped_2 = st.columns([4,4]) - - st.write("---") - st.header('OCR Overlay Image') - col_ocr_1, col_ocr_2 = st.columns([4,4]) - - ### Project - with col_project_1: - st.subheader('Run name') - st.session_state.config['leafmachine']['project']['run_name'] = st.text_input("Run name", st.session_state.config['leafmachine']['project'].get('run_name', ''), - label_visibility='collapsed') - st.write("Run name will be the name of the final zipped folder.") - - ### LLM Version - with col_project_2: - # Determine the available versions based on the API keys present - available_versions = [] - for api_name, versions in st.session_state['LLM_VERSIONS'].items(): - key_state = st.session_state['api_name_to_key_state'][api_name] - if st.session_state.get(key_state, False): - available_versions.extend(versions) - - # Show available LLM versions in a select box if there are any - st.subheader('LLM Version') - if available_versions: - # Get current selection from session_state, defaulting to the first available version - current_selection = st.session_state.config['leafmachine'].get('LLM_version', available_versions[0]) - # Update the selection with a selectbox - st.session_state.config['leafmachine']['LLM_version'] = st.selectbox( - "LLM version", available_versions, - index=available_versions.index(current_selection), - label_visibility='collapsed' - ) - st.markdown("""***Note:*** GPT-4 is significantly more expensive than GPT-3.5""") - else: - st.error("No LLM versions are available due to missing API keys.") - - ### Prompt Version - with col_project_3: - st.subheader('Prompt Version') - versions, default_version = get_prompt_versions(st.session_state.config['leafmachine']['LLM_version']) - - if versions: - selected_version = st.session_state.config['leafmachine']['project'].get('prompt_version', default_version) - if selected_version not in versions: - selected_version = default_version - st.session_state.config['leafmachine']['project']['prompt_version'] = st.selectbox("Prompt Version", versions, index=versions.index(selected_version),label_visibility='collapsed') - st.markdown("Several prompts are provided. Visit the 'Prompt Builder' tab to upload your own prompt. If you would like to make your prompt available to others or have the prompt in the dropdown by default, [please submit the yaml through this form.](https://forms.gle/d1sHV5Y7Y5NxMQzM9)") - - if st.button("Build Custom LLM Prompt",help="It may take a moment for the page to refresh."): - st.session_state.proceed_to_build_llm_prompt = True - st.rerun() - - ### Input Images Local - with col_local_1: - st.session_state['dir_uploaded_images'] = os.path.join(st.session_state.dir_home,'uploads') - st.session_state['dir_uploaded_images_small'] = os.path.join(st.session_state.dir_home,'uploads_small') - uploaded_files = st.file_uploader("Upload Images", type=['jpg', 'jpeg'], accept_multiple_files=True, key=st.session_state['uploader_idk']) - if uploaded_files: - # Clear input image gallery and input list - clear_image_gallery() - - # Process the new iamges - for uploaded_file in uploaded_files: - file_path = save_uploaded_file(st.session_state['dir_uploaded_images'], uploaded_file) - st.session_state['input_list'].append(file_path) - - img = Image.open(file_path) - img.thumbnail((GALLERY_IMAGE_SIZE, GALLERY_IMAGE_SIZE), Image.Resampling.LANCZOS) - file_path_small = save_uploaded_file(st.session_state['dir_uploaded_images_small'], uploaded_file, img) - st.session_state['input_list_small'].append(file_path_small) - print(uploaded_file.name) - - # Set the local images to the uploaded images - st.session_state.config['leafmachine']['project']['dir_images_local'] = st.session_state['dir_uploaded_images'] - - n_images = len([f for f in os.listdir(st.session_state.config['leafmachine']['project']['dir_images_local']) if os.path.isfile(os.path.join(st.session_state.config['leafmachine']['project']['dir_images_local'], f))]) - st.session_state['processing_add_on'] = f" {n_images} Images" - uploaded_files = None - st.session_state['uploader_idk'] += 1 - st.info(f"Processing **{n_images}** images from {st.session_state.config['leafmachine']['project']['dir_images_local']}") - - - st.button("Use Test Image",help="This will clear any uploaded images and load the 1 provided test image.",on_click=use_test_image) - - # Show uploaded images gallery (thumbnails only) - with col_local_2: - if st.session_state['input_list_small']: - st.subheader('Image Gallery') - if len(st.session_state['input_list_small']) > MAX_GALLERY_IMAGES: - # Only take the first 100 images from the list - images_to_display = st.session_state['input_list_small'][:MAX_GALLERY_IMAGES] - else: - # If there are less than 100 images, take them all - images_to_display = st.session_state['input_list_small'] - st.image(images_to_display) - - with col_cropped_1: - default_crops = st.session_state.config['leafmachine']['cropped_components'].get('save_cropped_annotations', ['leaf_whole']) - st.write("Prior to transcription, use LeafMachine2 to crop all labels from input images to create label collages for each specimen image. (Requires GPU)") - st.session_state.config['leafmachine']['use_RGB_label_images'] = st.checkbox("Use LeafMachine2 label collage for transcriptions", st.session_state.config['leafmachine'].get('use_RGB_label_images', False)) - - st.session_state.config['leafmachine']['cropped_components']['save_cropped_annotations'] = st.multiselect("Components to crop", - ['ruler', 'barcode','label', 'colorcard','map','envelope','photo','attached_item','weights', - 'leaf_whole', 'leaf_partial', 'leaflet', 'seed_fruit_one', 'seed_fruit_many', 'flower_one', 'flower_many', 'bud','specimen','roots','wood'],default=default_crops) - - with col_cropped_2: - ba = os.path.join(st.session_state.dir_home,'demo', 'ba','ba2.png') - image = Image.open(ba) - st.image(image, caption='LeafMachine2 Collage', output_format = "PNG") - - with col_ocr_1: - st.write('This will plot bounding boxes around all text that Google Vision was able to detect. If there are no boxes around text, then the OCR failed, so that missing text will not be seen by the LLM when it is creating the JSON object. The created image will be viewable in the VoucherVisionEditor.') - st.session_state.config['leafmachine']['do_create_OCR_helper_image'] = st.checkbox("Create image showing an overlay of the OCR detections", st.session_state.config['leafmachine'].get('do_create_OCR_helper_image', False)) - - with col_ocr_2: - ocr = os.path.join(st.session_state.dir_home,'demo', 'ba','ocr.png') - image_ocr = Image.open(ocr) - st.image(image_ocr, caption='OCR Overlay Images', output_format = "PNG") - - - -######################################################################################################## -### Main #### -######################################################################################################## -def main(): - with st.sidebar: - sidebar_content() - # Main App - content_header() - - tab_settings = st.container() - - with tab_settings: - content_tab_settings() - - - -######################################################################################################## -### STREAMLIT APP START #### -######################################################################################################## -st.set_page_config(layout="wide", page_icon='img/icon.ico', page_title='VoucherVision') - - - -######################################################################################################## -### STREAMLIT INIT STATES #### -######################################################################################################## -if 'config' not in st.session_state: - st.session_state.config, st.session_state.dir_home = build_VV_config() - setup_streamlit_config(st.session_state.dir_home) - -if 'proceed_to_main' not in st.session_state: - st.session_state.proceed_to_main = True - -if 'proceed_to_build_llm_prompt' not in st.session_state: - st.session_state.proceed_to_build_llm_prompt = False - -if 'proceed_to_private' not in st.session_state: - st.session_state.proceed_to_private = False - -if 'dir_uploaded_images' not in st.session_state: - st.session_state['dir_uploaded_images'] = os.path.join(st.session_state.dir_home,'uploads') - validate_dir(os.path.join(st.session_state.dir_home,'uploads')) - -if 'dir_uploaded_images_small' not in st.session_state: - st.session_state['dir_uploaded_images_small'] = os.path.join(st.session_state.dir_home,'uploads_small') - validate_dir(os.path.join(st.session_state.dir_home,'uploads_small')) - -if 'prompt_info' not in st.session_state: - st.session_state['prompt_info'] = {} - -if 'rules' not in st.session_state: - st.session_state['rules'] = {} - -if 'zip_filepath' not in st.session_state: - st.session_state['zip_filepath'] = None - -if 'input_list' not in st.session_state: - st.session_state['input_list'] = [] - -if 'input_list_small' not in st.session_state: - st.session_state['input_list_small'] = [] - -if 'selected_yaml_file' not in st.session_state: - st.session_state['selected_yaml_file'] = None - -if 'new_prompt_yaml_filename' not in st.session_state: - st.session_state['new_prompt_yaml_filename'] = None - -if 'show_prompt_name_e' not in st.session_state: - st.session_state['show_prompt_name_e'] = None - -if 'show_prompt_name_w' not in st.session_state: - st.session_state['show_prompt_name_w'] = None - -if 'user_clicked_load_prompt_yaml' not in st.session_state: - st.session_state['user_clicked_load_prompt_yaml'] = None - -if 'processing_add_on' not in st.session_state: - st.session_state['processing_add_on'] = ' 1 Image' - -if 'uploader_idk' not in st.session_state: - st.session_state['uploader_idk'] = 1 - -if 'LLM_VERSIONS' not in st.session_state: - st.session_state['LLM_VERSIONS'] = { - 'OpenAI API': ["GPT 4", "GPT 3.5"], - 'Azure API': ["Azure GPT 4", "Azure GPT 3.5"], - 'Palm API': ["PaLM 2"] - } -if 'api_name_to_key_state ' not in st.session_state: - st.session_state['api_name_to_key_state'] = { - 'OpenAI API': 'has_key_openai', - 'Google OCR API': 'has_key_google_OCR', - 'Palm API': 'has_key_palm2', - 'Azure API': 'has_key_azure' - } - - # Initialize API key states if not already in session_state -for api_name, key_state in st.session_state['api_name_to_key_state'].items(): - if key_state not in st.session_state: - st.session_state[key_state] = False - - - -######################################################################################################## -### STREAMLIT SESSION GUIDE #### -######################################################################################################## -if st.session_state.proceed_to_build_llm_prompt: - build_LLM_prompt_config() -elif st.session_state.proceed_to_main: - main() \ No newline at end of file diff --git a/spaces/phyloforfun/VoucherVision/vouchervision/component_detector/utils/augmentations.py b/spaces/phyloforfun/VoucherVision/vouchervision/component_detector/utils/augmentations.py deleted file mode 100644 index 3f764c06ae3b366496230bcba63c5e8621ce1c95..0000000000000000000000000000000000000000 --- a/spaces/phyloforfun/VoucherVision/vouchervision/component_detector/utils/augmentations.py +++ /dev/null @@ -1,284 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Image augmentation functions -""" - -import math -import random - -import cv2 -import numpy as np - -from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box -from utils.metrics import bbox_ioa - - -class Albumentations: - # YOLOv5 Albumentations class (optional, only used if package is installed) - def __init__(self): - self.transform = None - try: - import albumentations as A - check_version(A.__version__, '1.0.3', hard=True) # version requirement - - T = [ - A.Blur(p=0.01), - A.MedianBlur(p=0.01), - A.ToGray(p=0.01), - A.CLAHE(p=0.01), - A.RandomBrightnessContrast(p=0.0), - A.RandomGamma(p=0.0), - A.ImageCompression(quality_lower=75, p=0.0)] # transforms - self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) - - LOGGER.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) - except ImportError: # package not installed, skip - pass - except Exception as e: - LOGGER.info(colorstr('albumentations: ') + f'{e}') - - def __call__(self, im, labels, p=1.0): - if self.transform and random.random() < p: - new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed - im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) - return im, labels - - -def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): - # HSV color-space augmentation - if hgain or sgain or vgain: - r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains - hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) - dtype = im.dtype # uint8 - - x = np.arange(0, 256, dtype=r.dtype) - lut_hue = ((x * r[0]) % 180).astype(dtype) - lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) - lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - - im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) - cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed - - -def hist_equalize(im, clahe=True, bgr=False): - # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 - yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) - if clahe: - c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) - yuv[:, :, 0] = c.apply(yuv[:, :, 0]) - else: - yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram - return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB - - -def replicate(im, labels): - # Replicate labels - h, w = im.shape[:2] - boxes = labels[:, 1:].astype(int) - x1, y1, x2, y2 = boxes.T - s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) - for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices - x1b, y1b, x2b, y2b = boxes[i] - bh, bw = y2b - y1b, x2b - x1b - yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y - x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] - im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] - labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) - - return im, labels - - -def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): - # Resize and pad image while meeting stride-multiple constraints - shape = im.shape[:2] # current shape [height, width] - if isinstance(new_shape, int): - new_shape = (new_shape, new_shape) - - # Scale ratio (new / old) - r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) - if not scaleup: # only scale down, do not scale up (for better val mAP) - r = min(r, 1.0) - - # Compute padding - ratio = r, r # width, height ratios - new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) - dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding - if auto: # minimum rectangle - dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding - elif scaleFill: # stretch - dw, dh = 0.0, 0.0 - new_unpad = (new_shape[1], new_shape[0]) - ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios - - dw /= 2 # divide padding into 2 sides - dh /= 2 - - if shape[::-1] != new_unpad: # resize - im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) - top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) - left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) - im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border - return im, ratio, (dw, dh) - - -def random_perspective(im, - targets=(), - segments=(), - degrees=10, - translate=.1, - scale=.1, - shear=10, - perspective=0.0, - border=(0, 0)): - # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) - # targets = [cls, xyxy] - - height = im.shape[0] + border[0] * 2 # shape(h,w,c) - width = im.shape[1] + border[1] * 2 - - # Center - C = np.eye(3) - C[0, 2] = -im.shape[1] / 2 # x translation (pixels) - C[1, 2] = -im.shape[0] / 2 # y translation (pixels) - - # Perspective - P = np.eye(3) - P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) - P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) - - # Rotation and Scale - R = np.eye(3) - a = random.uniform(-degrees, degrees) - # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations - s = random.uniform(1 - scale, 1 + scale) - # s = 2 ** random.uniform(-scale, scale) - R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) - - # Shear - S = np.eye(3) - S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) - S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) - - # Translation - T = np.eye(3) - T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) - T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) - - # Combined rotation matrix - M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT - if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed - if perspective: - im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) - else: # affine - im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) - - # Visualize - # import matplotlib.pyplot as plt - # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() - # ax[0].imshow(im[:, :, ::-1]) # base - # ax[1].imshow(im2[:, :, ::-1]) # warped - - # Transform label coordinates - n = len(targets) - if n: - use_segments = any(x.any() for x in segments) - new = np.zeros((n, 4)) - if use_segments: # warp segments - segments = resample_segments(segments) # upsample - for i, segment in enumerate(segments): - xy = np.ones((len(segment), 3)) - xy[:, :2] = segment - xy = xy @ M.T # transform - xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine - - # clip - new[i] = segment2box(xy, width, height) - - else: # warp boxes - xy = np.ones((n * 4, 3)) - xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 - xy = xy @ M.T # transform - xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine - - # create new boxes - x = xy[:, [0, 2, 4, 6]] - y = xy[:, [1, 3, 5, 7]] - new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T - - # clip - new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) - new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) - - # filter candidates - i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) - targets = targets[i] - targets[:, 1:5] = new[i] - - return im, targets - - -def copy_paste(im, labels, segments, p=0.5): - # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) - n = len(segments) - if p and n: - h, w, c = im.shape # height, width, channels - im_new = np.zeros(im.shape, np.uint8) - for j in random.sample(range(n), k=round(p * n)): - l, s = labels[j], segments[j] - box = w - l[3], l[2], w - l[1], l[4] - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - if (ioa < 0.30).all(): # allow 30% obscuration of existing labels - labels = np.concatenate((labels, [[l[0], *box]]), 0) - segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) - cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) - - result = cv2.bitwise_and(src1=im, src2=im_new) - result = cv2.flip(result, 1) # augment segments (flip left-right) - i = result > 0 # pixels to replace - # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch - im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug - - return im, labels, segments - - -def cutout(im, labels, p=0.5): - # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - if random.random() < p: - h, w = im.shape[:2] - scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction - for s in scales: - mask_h = random.randint(1, int(h * s)) # create random masks - mask_w = random.randint(1, int(w * s)) - - # box - xmin = max(0, random.randint(0, w) - mask_w // 2) - ymin = max(0, random.randint(0, h) - mask_h // 2) - xmax = min(w, xmin + mask_w) - ymax = min(h, ymin + mask_h) - - # apply random color mask - im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] - - # return unobscured labels - if len(labels) and s > 0.03: - box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - labels = labels[ioa < 0.60] # remove >60% obscured labels - - return labels - - -def mixup(im, labels, im2, labels2): - # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf - r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 - im = (im * r + im2 * (1 - r)).astype(np.uint8) - labels = np.concatenate((labels, labels2), 0) - return im, labels - - -def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) - # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio - w1, h1 = box1[2] - box1[0], box1[3] - box1[1] - w2, h2 = box2[2] - box2[0], box2[3] - box2[1] - ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio - return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/utils/packaging.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/utils/packaging.py deleted file mode 100644 index b9f6af4d17410ce7e1d573c41a1f04dd18ae275e..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/utils/packaging.py +++ /dev/null @@ -1,57 +0,0 @@ -import functools -import logging -import re -from typing import NewType, Optional, Tuple, cast - -from pip._vendor.packaging import specifiers, version -from pip._vendor.packaging.requirements import Requirement - -NormalizedExtra = NewType("NormalizedExtra", str) - -logger = logging.getLogger(__name__) - - -def check_requires_python( - requires_python: Optional[str], version_info: Tuple[int, ...] -) -> bool: - """ - Check if the given Python version matches a "Requires-Python" specifier. - - :param version_info: A 3-tuple of ints representing a Python - major-minor-micro version to check (e.g. `sys.version_info[:3]`). - - :return: `True` if the given Python version satisfies the requirement. - Otherwise, return `False`. - - :raises InvalidSpecifier: If `requires_python` has an invalid format. - """ - if requires_python is None: - # The package provides no information - return True - requires_python_specifier = specifiers.SpecifierSet(requires_python) - - python_version = version.parse(".".join(map(str, version_info))) - return python_version in requires_python_specifier - - -@functools.lru_cache(maxsize=512) -def get_requirement(req_string: str) -> Requirement: - """Construct a packaging.Requirement object with caching""" - # Parsing requirement strings is expensive, and is also expected to happen - # with a low diversity of different arguments (at least relative the number - # constructed). This method adds a cache to requirement object creation to - # minimize repeated parsing of the same string to construct equivalent - # Requirement objects. - return Requirement(req_string) - - -def safe_extra(extra: str) -> NormalizedExtra: - """Convert an arbitrary string to a standard 'extra' name - - Any runs of non-alphanumeric characters are replaced with a single '_', - and the result is always lowercased. - - This function is duplicated from ``pkg_resources``. Note that this is not - the same to either ``canonicalize_name`` or ``_egg_link_name``. - """ - return cast(NormalizedExtra, re.sub("[^A-Za-z0-9.-]+", "_", extra).lower()) diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/more_itertools/more.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/more_itertools/more.py deleted file mode 100644 index e6fca4d47f661ff16fdc8c2bb7ae5b86c7f347b2..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/more_itertools/more.py +++ /dev/null @@ -1,3824 +0,0 @@ -import warnings - -from collections import Counter, defaultdict, deque, abc -from collections.abc import Sequence -from functools import partial, reduce, wraps -from heapq import merge, heapify, heapreplace, heappop -from itertools import ( - chain, - compress, - count, - cycle, - dropwhile, - groupby, - islice, - repeat, - starmap, - takewhile, - tee, - zip_longest, -) -from math import exp, factorial, floor, log -from queue import Empty, Queue -from random import random, randrange, uniform -from operator import itemgetter, mul, sub, gt, lt -from sys import hexversion, maxsize -from time import monotonic - -from .recipes import ( - consume, - flatten, - pairwise, - powerset, - take, - unique_everseen, -) - -__all__ = [ - 'AbortThread', - 'adjacent', - 'always_iterable', - 'always_reversible', - 'bucket', - 'callback_iter', - 'chunked', - 'circular_shifts', - 'collapse', - 'collate', - 'consecutive_groups', - 'consumer', - 'countable', - 'count_cycle', - 'mark_ends', - 'difference', - 'distinct_combinations', - 'distinct_permutations', - 'distribute', - 'divide', - 'exactly_n', - 'filter_except', - 'first', - 'groupby_transform', - 'ilen', - 'interleave_longest', - 'interleave', - 'intersperse', - 'islice_extended', - 'iterate', - 'ichunked', - 'is_sorted', - 'last', - 'locate', - 'lstrip', - 'make_decorator', - 'map_except', - 'map_reduce', - 'nth_or_last', - 'nth_permutation', - 'nth_product', - 'numeric_range', - 'one', - 'only', - 'padded', - 'partitions', - 'set_partitions', - 'peekable', - 'repeat_last', - 'replace', - 'rlocate', - 'rstrip', - 'run_length', - 'sample', - 'seekable', - 'SequenceView', - 'side_effect', - 'sliced', - 'sort_together', - 'split_at', - 'split_after', - 'split_before', - 'split_when', - 'split_into', - 'spy', - 'stagger', - 'strip', - 'substrings', - 'substrings_indexes', - 'time_limited', - 'unique_to_each', - 'unzip', - 'windowed', - 'with_iter', - 'UnequalIterablesError', - 'zip_equal', - 'zip_offset', - 'windowed_complete', - 'all_unique', - 'value_chain', - 'product_index', - 'combination_index', - 'permutation_index', -] - -_marker = object() - - -def chunked(iterable, n, strict=False): - """Break *iterable* into lists of length *n*: - - >>> list(chunked([1, 2, 3, 4, 5, 6], 3)) - [[1, 2, 3], [4, 5, 6]] - - By the default, the last yielded list will have fewer than *n* elements - if the length of *iterable* is not divisible by *n*: - - >>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3)) - [[1, 2, 3], [4, 5, 6], [7, 8]] - - To use a fill-in value instead, see the :func:`grouper` recipe. - - If the length of *iterable* is not divisible by *n* and *strict* is - ``True``, then ``ValueError`` will be raised before the last - list is yielded. - - """ - iterator = iter(partial(take, n, iter(iterable)), []) - if strict: - - def ret(): - for chunk in iterator: - if len(chunk) != n: - raise ValueError('iterable is not divisible by n.') - yield chunk - - return iter(ret()) - else: - return iterator - - -def first(iterable, default=_marker): - """Return the first item of *iterable*, or *default* if *iterable* is - empty. - - >>> first([0, 1, 2, 3]) - 0 - >>> first([], 'some default') - 'some default' - - If *default* is not provided and there are no items in the iterable, - raise ``ValueError``. - - :func:`first` is useful when you have a generator of expensive-to-retrieve - values and want any arbitrary one. It is marginally shorter than - ``next(iter(iterable), default)``. - - """ - try: - return next(iter(iterable)) - except StopIteration as e: - if default is _marker: - raise ValueError( - 'first() was called on an empty iterable, and no ' - 'default value was provided.' - ) from e - return default - - -def last(iterable, default=_marker): - """Return the last item of *iterable*, or *default* if *iterable* is - empty. - - >>> last([0, 1, 2, 3]) - 3 - >>> last([], 'some default') - 'some default' - - If *default* is not provided and there are no items in the iterable, - raise ``ValueError``. - """ - try: - if isinstance(iterable, Sequence): - return iterable[-1] - # Work around https://bugs.python.org/issue38525 - elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0): - return next(reversed(iterable)) - else: - return deque(iterable, maxlen=1)[-1] - except (IndexError, TypeError, StopIteration): - if default is _marker: - raise ValueError( - 'last() was called on an empty iterable, and no default was ' - 'provided.' - ) - return default - - -def nth_or_last(iterable, n, default=_marker): - """Return the nth or the last item of *iterable*, - or *default* if *iterable* is empty. - - >>> nth_or_last([0, 1, 2, 3], 2) - 2 - >>> nth_or_last([0, 1], 2) - 1 - >>> nth_or_last([], 0, 'some default') - 'some default' - - If *default* is not provided and there are no items in the iterable, - raise ``ValueError``. - """ - return last(islice(iterable, n + 1), default=default) - - -class peekable: - """Wrap an iterator to allow lookahead and prepending elements. - - Call :meth:`peek` on the result to get the value that will be returned - by :func:`next`. This won't advance the iterator: - - >>> p = peekable(['a', 'b']) - >>> p.peek() - 'a' - >>> next(p) - 'a' - - Pass :meth:`peek` a default value to return that instead of raising - ``StopIteration`` when the iterator is exhausted. - - >>> p = peekable([]) - >>> p.peek('hi') - 'hi' - - peekables also offer a :meth:`prepend` method, which "inserts" items - at the head of the iterable: - - >>> p = peekable([1, 2, 3]) - >>> p.prepend(10, 11, 12) - >>> next(p) - 10 - >>> p.peek() - 11 - >>> list(p) - [11, 12, 1, 2, 3] - - peekables can be indexed. Index 0 is the item that will be returned by - :func:`next`, index 1 is the item after that, and so on: - The values up to the given index will be cached. - - >>> p = peekable(['a', 'b', 'c', 'd']) - >>> p[0] - 'a' - >>> p[1] - 'b' - >>> next(p) - 'a' - - Negative indexes are supported, but be aware that they will cache the - remaining items in the source iterator, which may require significant - storage. - - To check whether a peekable is exhausted, check its truth value: - - >>> p = peekable(['a', 'b']) - >>> if p: # peekable has items - ... list(p) - ['a', 'b'] - >>> if not p: # peekable is exhausted - ... list(p) - [] - - """ - - def __init__(self, iterable): - self._it = iter(iterable) - self._cache = deque() - - def __iter__(self): - return self - - def __bool__(self): - try: - self.peek() - except StopIteration: - return False - return True - - def peek(self, default=_marker): - """Return the item that will be next returned from ``next()``. - - Return ``default`` if there are no items left. If ``default`` is not - provided, raise ``StopIteration``. - - """ - if not self._cache: - try: - self._cache.append(next(self._it)) - except StopIteration: - if default is _marker: - raise - return default - return self._cache[0] - - def prepend(self, *items): - """Stack up items to be the next ones returned from ``next()`` or - ``self.peek()``. The items will be returned in - first in, first out order:: - - >>> p = peekable([1, 2, 3]) - >>> p.prepend(10, 11, 12) - >>> next(p) - 10 - >>> list(p) - [11, 12, 1, 2, 3] - - It is possible, by prepending items, to "resurrect" a peekable that - previously raised ``StopIteration``. - - >>> p = peekable([]) - >>> next(p) - Traceback (most recent call last): - ... - StopIteration - >>> p.prepend(1) - >>> next(p) - 1 - >>> next(p) - Traceback (most recent call last): - ... - StopIteration - - """ - self._cache.extendleft(reversed(items)) - - def __next__(self): - if self._cache: - return self._cache.popleft() - - return next(self._it) - - def _get_slice(self, index): - # Normalize the slice's arguments - step = 1 if (index.step is None) else index.step - if step > 0: - start = 0 if (index.start is None) else index.start - stop = maxsize if (index.stop is None) else index.stop - elif step < 0: - start = -1 if (index.start is None) else index.start - stop = (-maxsize - 1) if (index.stop is None) else index.stop - else: - raise ValueError('slice step cannot be zero') - - # If either the start or stop index is negative, we'll need to cache - # the rest of the iterable in order to slice from the right side. - if (start < 0) or (stop < 0): - self._cache.extend(self._it) - # Otherwise we'll need to find the rightmost index and cache to that - # point. - else: - n = min(max(start, stop) + 1, maxsize) - cache_len = len(self._cache) - if n >= cache_len: - self._cache.extend(islice(self._it, n - cache_len)) - - return list(self._cache)[index] - - def __getitem__(self, index): - if isinstance(index, slice): - return self._get_slice(index) - - cache_len = len(self._cache) - if index < 0: - self._cache.extend(self._it) - elif index >= cache_len: - self._cache.extend(islice(self._it, index + 1 - cache_len)) - - return self._cache[index] - - -def collate(*iterables, **kwargs): - """Return a sorted merge of the items from each of several already-sorted - *iterables*. - - >>> list(collate('ACDZ', 'AZ', 'JKL')) - ['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z'] - - Works lazily, keeping only the next value from each iterable in memory. Use - :func:`collate` to, for example, perform a n-way mergesort of items that - don't fit in memory. - - If a *key* function is specified, the iterables will be sorted according - to its result: - - >>> key = lambda s: int(s) # Sort by numeric value, not by string - >>> list(collate(['1', '10'], ['2', '11'], key=key)) - ['1', '2', '10', '11'] - - - If the *iterables* are sorted in descending order, set *reverse* to - ``True``: - - >>> list(collate([5, 3, 1], [4, 2, 0], reverse=True)) - [5, 4, 3, 2, 1, 0] - - If the elements of the passed-in iterables are out of order, you might get - unexpected results. - - On Python 3.5+, this function is an alias for :func:`heapq.merge`. - - """ - warnings.warn( - "collate is no longer part of more_itertools, use heapq.merge", - DeprecationWarning, - ) - return merge(*iterables, **kwargs) - - -def consumer(func): - """Decorator that automatically advances a PEP-342-style "reverse iterator" - to its first yield point so you don't have to call ``next()`` on it - manually. - - >>> @consumer - ... def tally(): - ... i = 0 - ... while True: - ... print('Thing number %s is %s.' % (i, (yield))) - ... i += 1 - ... - >>> t = tally() - >>> t.send('red') - Thing number 0 is red. - >>> t.send('fish') - Thing number 1 is fish. - - Without the decorator, you would have to call ``next(t)`` before - ``t.send()`` could be used. - - """ - - @wraps(func) - def wrapper(*args, **kwargs): - gen = func(*args, **kwargs) - next(gen) - return gen - - return wrapper - - -def ilen(iterable): - """Return the number of items in *iterable*. - - >>> ilen(x for x in range(1000000) if x % 3 == 0) - 333334 - - This consumes the iterable, so handle with care. - - """ - # This approach was selected because benchmarks showed it's likely the - # fastest of the known implementations at the time of writing. - # See GitHub tracker: #236, #230. - counter = count() - deque(zip(iterable, counter), maxlen=0) - return next(counter) - - -def iterate(func, start): - """Return ``start``, ``func(start)``, ``func(func(start))``, ... - - >>> from itertools import islice - >>> list(islice(iterate(lambda x: 2*x, 1), 10)) - [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] - - """ - while True: - yield start - start = func(start) - - -def with_iter(context_manager): - """Wrap an iterable in a ``with`` statement, so it closes once exhausted. - - For example, this will close the file when the iterator is exhausted:: - - upper_lines = (line.upper() for line in with_iter(open('foo'))) - - Any context manager which returns an iterable is a candidate for - ``with_iter``. - - """ - with context_manager as iterable: - yield from iterable - - -def one(iterable, too_short=None, too_long=None): - """Return the first item from *iterable*, which is expected to contain only - that item. Raise an exception if *iterable* is empty or has more than one - item. - - :func:`one` is useful for ensuring that an iterable contains only one item. - For example, it can be used to retrieve the result of a database query - that is expected to return a single row. - - If *iterable* is empty, ``ValueError`` will be raised. You may specify a - different exception with the *too_short* keyword: - - >>> it = [] - >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - ValueError: too many items in iterable (expected 1)' - >>> too_short = IndexError('too few items') - >>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - IndexError: too few items - - Similarly, if *iterable* contains more than one item, ``ValueError`` will - be raised. You may specify a different exception with the *too_long* - keyword: - - >>> it = ['too', 'many'] - >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - ValueError: Expected exactly one item in iterable, but got 'too', - 'many', and perhaps more. - >>> too_long = RuntimeError - >>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - RuntimeError - - Note that :func:`one` attempts to advance *iterable* twice to ensure there - is only one item. See :func:`spy` or :func:`peekable` to check iterable - contents less destructively. - - """ - it = iter(iterable) - - try: - first_value = next(it) - except StopIteration as e: - raise ( - too_short or ValueError('too few items in iterable (expected 1)') - ) from e - - try: - second_value = next(it) - except StopIteration: - pass - else: - msg = ( - 'Expected exactly one item in iterable, but got {!r}, {!r}, ' - 'and perhaps more.'.format(first_value, second_value) - ) - raise too_long or ValueError(msg) - - return first_value - - -def distinct_permutations(iterable, r=None): - """Yield successive distinct permutations of the elements in *iterable*. - - >>> sorted(distinct_permutations([1, 0, 1])) - [(0, 1, 1), (1, 0, 1), (1, 1, 0)] - - Equivalent to ``set(permutations(iterable))``, except duplicates are not - generated and thrown away. For larger input sequences this is much more - efficient. - - Duplicate permutations arise when there are duplicated elements in the - input iterable. The number of items returned is - `n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of - items input, and each `x_i` is the count of a distinct item in the input - sequence. - - If *r* is given, only the *r*-length permutations are yielded. - - >>> sorted(distinct_permutations([1, 0, 1], r=2)) - [(0, 1), (1, 0), (1, 1)] - >>> sorted(distinct_permutations(range(3), r=2)) - [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] - - """ - # Algorithm: https://w.wiki/Qai - def _full(A): - while True: - # Yield the permutation we have - yield tuple(A) - - # Find the largest index i such that A[i] < A[i + 1] - for i in range(size - 2, -1, -1): - if A[i] < A[i + 1]: - break - # If no such index exists, this permutation is the last one - else: - return - - # Find the largest index j greater than j such that A[i] < A[j] - for j in range(size - 1, i, -1): - if A[i] < A[j]: - break - - # Swap the value of A[i] with that of A[j], then reverse the - # sequence from A[i + 1] to form the new permutation - A[i], A[j] = A[j], A[i] - A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1] - - # Algorithm: modified from the above - def _partial(A, r): - # Split A into the first r items and the last r items - head, tail = A[:r], A[r:] - right_head_indexes = range(r - 1, -1, -1) - left_tail_indexes = range(len(tail)) - - while True: - # Yield the permutation we have - yield tuple(head) - - # Starting from the right, find the first index of the head with - # value smaller than the maximum value of the tail - call it i. - pivot = tail[-1] - for i in right_head_indexes: - if head[i] < pivot: - break - pivot = head[i] - else: - return - - # Starting from the left, find the first value of the tail - # with a value greater than head[i] and swap. - for j in left_tail_indexes: - if tail[j] > head[i]: - head[i], tail[j] = tail[j], head[i] - break - # If we didn't find one, start from the right and find the first - # index of the head with a value greater than head[i] and swap. - else: - for j in right_head_indexes: - if head[j] > head[i]: - head[i], head[j] = head[j], head[i] - break - - # Reverse head[i + 1:] and swap it with tail[:r - (i + 1)] - tail += head[: i - r : -1] # head[i + 1:][::-1] - i += 1 - head[i:], tail[:] = tail[: r - i], tail[r - i :] - - items = sorted(iterable) - - size = len(items) - if r is None: - r = size - - if 0 < r <= size: - return _full(items) if (r == size) else _partial(items, r) - - return iter(() if r else ((),)) - - -def intersperse(e, iterable, n=1): - """Intersperse filler element *e* among the items in *iterable*, leaving - *n* items between each filler element. - - >>> list(intersperse('!', [1, 2, 3, 4, 5])) - [1, '!', 2, '!', 3, '!', 4, '!', 5] - - >>> list(intersperse(None, [1, 2, 3, 4, 5], n=2)) - [1, 2, None, 3, 4, None, 5] - - """ - if n == 0: - raise ValueError('n must be > 0') - elif n == 1: - # interleave(repeat(e), iterable) -> e, x_0, e, e, x_1, e, x_2... - # islice(..., 1, None) -> x_0, e, e, x_1, e, x_2... - return islice(interleave(repeat(e), iterable), 1, None) - else: - # interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]... - # islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]... - # flatten(...) -> x_0, x_1, e, x_2, x_3... - filler = repeat([e]) - chunks = chunked(iterable, n) - return flatten(islice(interleave(filler, chunks), 1, None)) - - -def unique_to_each(*iterables): - """Return the elements from each of the input iterables that aren't in the - other input iterables. - - For example, suppose you have a set of packages, each with a set of - dependencies:: - - {'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}} - - If you remove one package, which dependencies can also be removed? - - If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not - associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for - ``pkg_2``, and ``D`` is only needed for ``pkg_3``:: - - >>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'}) - [['A'], ['C'], ['D']] - - If there are duplicates in one input iterable that aren't in the others - they will be duplicated in the output. Input order is preserved:: - - >>> unique_to_each("mississippi", "missouri") - [['p', 'p'], ['o', 'u', 'r']] - - It is assumed that the elements of each iterable are hashable. - - """ - pool = [list(it) for it in iterables] - counts = Counter(chain.from_iterable(map(set, pool))) - uniques = {element for element in counts if counts[element] == 1} - return [list(filter(uniques.__contains__, it)) for it in pool] - - -def windowed(seq, n, fillvalue=None, step=1): - """Return a sliding window of width *n* over the given iterable. - - >>> all_windows = windowed([1, 2, 3, 4, 5], 3) - >>> list(all_windows) - [(1, 2, 3), (2, 3, 4), (3, 4, 5)] - - When the window is larger than the iterable, *fillvalue* is used in place - of missing values: - - >>> list(windowed([1, 2, 3], 4)) - [(1, 2, 3, None)] - - Each window will advance in increments of *step*: - - >>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2)) - [(1, 2, 3), (3, 4, 5), (5, 6, '!')] - - To slide into the iterable's items, use :func:`chain` to add filler items - to the left: - - >>> iterable = [1, 2, 3, 4] - >>> n = 3 - >>> padding = [None] * (n - 1) - >>> list(windowed(chain(padding, iterable), 3)) - [(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)] - """ - if n < 0: - raise ValueError('n must be >= 0') - if n == 0: - yield tuple() - return - if step < 1: - raise ValueError('step must be >= 1') - - window = deque(maxlen=n) - i = n - for _ in map(window.append, seq): - i -= 1 - if not i: - i = step - yield tuple(window) - - size = len(window) - if size < n: - yield tuple(chain(window, repeat(fillvalue, n - size))) - elif 0 < i < min(step, n): - window += (fillvalue,) * i - yield tuple(window) - - -def substrings(iterable): - """Yield all of the substrings of *iterable*. - - >>> [''.join(s) for s in substrings('more')] - ['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more'] - - Note that non-string iterables can also be subdivided. - - >>> list(substrings([0, 1, 2])) - [(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)] - - """ - # The length-1 substrings - seq = [] - for item in iter(iterable): - seq.append(item) - yield (item,) - seq = tuple(seq) - item_count = len(seq) - - # And the rest - for n in range(2, item_count + 1): - for i in range(item_count - n + 1): - yield seq[i : i + n] - - -def substrings_indexes(seq, reverse=False): - """Yield all substrings and their positions in *seq* - - The items yielded will be a tuple of the form ``(substr, i, j)``, where - ``substr == seq[i:j]``. - - This function only works for iterables that support slicing, such as - ``str`` objects. - - >>> for item in substrings_indexes('more'): - ... print(item) - ('m', 0, 1) - ('o', 1, 2) - ('r', 2, 3) - ('e', 3, 4) - ('mo', 0, 2) - ('or', 1, 3) - ('re', 2, 4) - ('mor', 0, 3) - ('ore', 1, 4) - ('more', 0, 4) - - Set *reverse* to ``True`` to yield the same items in the opposite order. - - - """ - r = range(1, len(seq) + 1) - if reverse: - r = reversed(r) - return ( - (seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1) - ) - - -class bucket: - """Wrap *iterable* and return an object that buckets it iterable into - child iterables based on a *key* function. - - >>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3'] - >>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character - >>> sorted(list(s)) # Get the keys - ['a', 'b', 'c'] - >>> a_iterable = s['a'] - >>> next(a_iterable) - 'a1' - >>> next(a_iterable) - 'a2' - >>> list(s['b']) - ['b1', 'b2', 'b3'] - - The original iterable will be advanced and its items will be cached until - they are used by the child iterables. This may require significant storage. - - By default, attempting to select a bucket to which no items belong will - exhaust the iterable and cache all values. - If you specify a *validator* function, selected buckets will instead be - checked against it. - - >>> from itertools import count - >>> it = count(1, 2) # Infinite sequence of odd numbers - >>> key = lambda x: x % 10 # Bucket by last digit - >>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only - >>> s = bucket(it, key=key, validator=validator) - >>> 2 in s - False - >>> list(s[2]) - [] - - """ - - def __init__(self, iterable, key, validator=None): - self._it = iter(iterable) - self._key = key - self._cache = defaultdict(deque) - self._validator = validator or (lambda x: True) - - def __contains__(self, value): - if not self._validator(value): - return False - - try: - item = next(self[value]) - except StopIteration: - return False - else: - self._cache[value].appendleft(item) - - return True - - def _get_values(self, value): - """ - Helper to yield items from the parent iterator that match *value*. - Items that don't match are stored in the local cache as they - are encountered. - """ - while True: - # If we've cached some items that match the target value, emit - # the first one and evict it from the cache. - if self._cache[value]: - yield self._cache[value].popleft() - # Otherwise we need to advance the parent iterator to search for - # a matching item, caching the rest. - else: - while True: - try: - item = next(self._it) - except StopIteration: - return - item_value = self._key(item) - if item_value == value: - yield item - break - elif self._validator(item_value): - self._cache[item_value].append(item) - - def __iter__(self): - for item in self._it: - item_value = self._key(item) - if self._validator(item_value): - self._cache[item_value].append(item) - - yield from self._cache.keys() - - def __getitem__(self, value): - if not self._validator(value): - return iter(()) - - return self._get_values(value) - - -def spy(iterable, n=1): - """Return a 2-tuple with a list containing the first *n* elements of - *iterable*, and an iterator with the same items as *iterable*. - This allows you to "look ahead" at the items in the iterable without - advancing it. - - There is one item in the list by default: - - >>> iterable = 'abcdefg' - >>> head, iterable = spy(iterable) - >>> head - ['a'] - >>> list(iterable) - ['a', 'b', 'c', 'd', 'e', 'f', 'g'] - - You may use unpacking to retrieve items instead of lists: - - >>> (head,), iterable = spy('abcdefg') - >>> head - 'a' - >>> (first, second), iterable = spy('abcdefg', 2) - >>> first - 'a' - >>> second - 'b' - - The number of items requested can be larger than the number of items in - the iterable: - - >>> iterable = [1, 2, 3, 4, 5] - >>> head, iterable = spy(iterable, 10) - >>> head - [1, 2, 3, 4, 5] - >>> list(iterable) - [1, 2, 3, 4, 5] - - """ - it = iter(iterable) - head = take(n, it) - - return head.copy(), chain(head, it) - - -def interleave(*iterables): - """Return a new iterable yielding from each iterable in turn, - until the shortest is exhausted. - - >>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8])) - [1, 4, 6, 2, 5, 7] - - For a version that doesn't terminate after the shortest iterable is - exhausted, see :func:`interleave_longest`. - - """ - return chain.from_iterable(zip(*iterables)) - - -def interleave_longest(*iterables): - """Return a new iterable yielding from each iterable in turn, - skipping any that are exhausted. - - >>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8])) - [1, 4, 6, 2, 5, 7, 3, 8] - - This function produces the same output as :func:`roundrobin`, but may - perform better for some inputs (in particular when the number of iterables - is large). - - """ - i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker)) - return (x for x in i if x is not _marker) - - -def collapse(iterable, base_type=None, levels=None): - """Flatten an iterable with multiple levels of nesting (e.g., a list of - lists of tuples) into non-iterable types. - - >>> iterable = [(1, 2), ([3, 4], [[5], [6]])] - >>> list(collapse(iterable)) - [1, 2, 3, 4, 5, 6] - - Binary and text strings are not considered iterable and - will not be collapsed. - - To avoid collapsing other types, specify *base_type*: - - >>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']] - >>> list(collapse(iterable, base_type=tuple)) - ['ab', ('cd', 'ef'), 'gh', 'ij'] - - Specify *levels* to stop flattening after a certain level: - - >>> iterable = [('a', ['b']), ('c', ['d'])] - >>> list(collapse(iterable)) # Fully flattened - ['a', 'b', 'c', 'd'] - >>> list(collapse(iterable, levels=1)) # Only one level flattened - ['a', ['b'], 'c', ['d']] - - """ - - def walk(node, level): - if ( - ((levels is not None) and (level > levels)) - or isinstance(node, (str, bytes)) - or ((base_type is not None) and isinstance(node, base_type)) - ): - yield node - return - - try: - tree = iter(node) - except TypeError: - yield node - return - else: - for child in tree: - yield from walk(child, level + 1) - - yield from walk(iterable, 0) - - -def side_effect(func, iterable, chunk_size=None, before=None, after=None): - """Invoke *func* on each item in *iterable* (or on each *chunk_size* group - of items) before yielding the item. - - `func` must be a function that takes a single argument. Its return value - will be discarded. - - *before* and *after* are optional functions that take no arguments. They - will be executed before iteration starts and after it ends, respectively. - - `side_effect` can be used for logging, updating progress bars, or anything - that is not functionally "pure." - - Emitting a status message: - - >>> from more_itertools import consume - >>> func = lambda item: print('Received {}'.format(item)) - >>> consume(side_effect(func, range(2))) - Received 0 - Received 1 - - Operating on chunks of items: - - >>> pair_sums = [] - >>> func = lambda chunk: pair_sums.append(sum(chunk)) - >>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2)) - [0, 1, 2, 3, 4, 5] - >>> list(pair_sums) - [1, 5, 9] - - Writing to a file-like object: - - >>> from io import StringIO - >>> from more_itertools import consume - >>> f = StringIO() - >>> func = lambda x: print(x, file=f) - >>> before = lambda: print(u'HEADER', file=f) - >>> after = f.close - >>> it = [u'a', u'b', u'c'] - >>> consume(side_effect(func, it, before=before, after=after)) - >>> f.closed - True - - """ - try: - if before is not None: - before() - - if chunk_size is None: - for item in iterable: - func(item) - yield item - else: - for chunk in chunked(iterable, chunk_size): - func(chunk) - yield from chunk - finally: - if after is not None: - after() - - -def sliced(seq, n, strict=False): - """Yield slices of length *n* from the sequence *seq*. - - >>> list(sliced((1, 2, 3, 4, 5, 6), 3)) - [(1, 2, 3), (4, 5, 6)] - - By the default, the last yielded slice will have fewer than *n* elements - if the length of *seq* is not divisible by *n*: - - >>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3)) - [(1, 2, 3), (4, 5, 6), (7, 8)] - - If the length of *seq* is not divisible by *n* and *strict* is - ``True``, then ``ValueError`` will be raised before the last - slice is yielded. - - This function will only work for iterables that support slicing. - For non-sliceable iterables, see :func:`chunked`. - - """ - iterator = takewhile(len, (seq[i : i + n] for i in count(0, n))) - if strict: - - def ret(): - for _slice in iterator: - if len(_slice) != n: - raise ValueError("seq is not divisible by n.") - yield _slice - - return iter(ret()) - else: - return iterator - - -def split_at(iterable, pred, maxsplit=-1, keep_separator=False): - """Yield lists of items from *iterable*, where each list is delimited by - an item where callable *pred* returns ``True``. - - >>> list(split_at('abcdcba', lambda x: x == 'b')) - [['a'], ['c', 'd', 'c'], ['a']] - - >>> list(split_at(range(10), lambda n: n % 2 == 1)) - [[0], [2], [4], [6], [8], []] - - At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, - then there is no limit on the number of splits: - - >>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2)) - [[0], [2], [4, 5, 6, 7, 8, 9]] - - By default, the delimiting items are not included in the output. - The include them, set *keep_separator* to ``True``. - - >>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True)) - [['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']] - - """ - if maxsplit == 0: - yield list(iterable) - return - - buf = [] - it = iter(iterable) - for item in it: - if pred(item): - yield buf - if keep_separator: - yield [item] - if maxsplit == 1: - yield list(it) - return - buf = [] - maxsplit -= 1 - else: - buf.append(item) - yield buf - - -def split_before(iterable, pred, maxsplit=-1): - """Yield lists of items from *iterable*, where each list ends just before - an item for which callable *pred* returns ``True``: - - >>> list(split_before('OneTwo', lambda s: s.isupper())) - [['O', 'n', 'e'], ['T', 'w', 'o']] - - >>> list(split_before(range(10), lambda n: n % 3 == 0)) - [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] - - At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, - then there is no limit on the number of splits: - - >>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2)) - [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]] - """ - if maxsplit == 0: - yield list(iterable) - return - - buf = [] - it = iter(iterable) - for item in it: - if pred(item) and buf: - yield buf - if maxsplit == 1: - yield [item] + list(it) - return - buf = [] - maxsplit -= 1 - buf.append(item) - if buf: - yield buf - - -def split_after(iterable, pred, maxsplit=-1): - """Yield lists of items from *iterable*, where each list ends with an - item where callable *pred* returns ``True``: - - >>> list(split_after('one1two2', lambda s: s.isdigit())) - [['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']] - - >>> list(split_after(range(10), lambda n: n % 3 == 0)) - [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]] - - At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, - then there is no limit on the number of splits: - - >>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2)) - [[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]] - - """ - if maxsplit == 0: - yield list(iterable) - return - - buf = [] - it = iter(iterable) - for item in it: - buf.append(item) - if pred(item) and buf: - yield buf - if maxsplit == 1: - yield list(it) - return - buf = [] - maxsplit -= 1 - if buf: - yield buf - - -def split_when(iterable, pred, maxsplit=-1): - """Split *iterable* into pieces based on the output of *pred*. - *pred* should be a function that takes successive pairs of items and - returns ``True`` if the iterable should be split in between them. - - For example, to find runs of increasing numbers, split the iterable when - element ``i`` is larger than element ``i + 1``: - - >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y)) - [[1, 2, 3, 3], [2, 5], [2, 4], [2]] - - At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, - then there is no limit on the number of splits: - - >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], - ... lambda x, y: x > y, maxsplit=2)) - [[1, 2, 3, 3], [2, 5], [2, 4, 2]] - - """ - if maxsplit == 0: - yield list(iterable) - return - - it = iter(iterable) - try: - cur_item = next(it) - except StopIteration: - return - - buf = [cur_item] - for next_item in it: - if pred(cur_item, next_item): - yield buf - if maxsplit == 1: - yield [next_item] + list(it) - return - buf = [] - maxsplit -= 1 - - buf.append(next_item) - cur_item = next_item - - yield buf - - -def split_into(iterable, sizes): - """Yield a list of sequential items from *iterable* of length 'n' for each - integer 'n' in *sizes*. - - >>> list(split_into([1,2,3,4,5,6], [1,2,3])) - [[1], [2, 3], [4, 5, 6]] - - If the sum of *sizes* is smaller than the length of *iterable*, then the - remaining items of *iterable* will not be returned. - - >>> list(split_into([1,2,3,4,5,6], [2,3])) - [[1, 2], [3, 4, 5]] - - If the sum of *sizes* is larger than the length of *iterable*, fewer items - will be returned in the iteration that overruns *iterable* and further - lists will be empty: - - >>> list(split_into([1,2,3,4], [1,2,3,4])) - [[1], [2, 3], [4], []] - - When a ``None`` object is encountered in *sizes*, the returned list will - contain items up to the end of *iterable* the same way that itertools.slice - does: - - >>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None])) - [[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]] - - :func:`split_into` can be useful for grouping a series of items where the - sizes of the groups are not uniform. An example would be where in a row - from a table, multiple columns represent elements of the same feature - (e.g. a point represented by x,y,z) but, the format is not the same for - all columns. - """ - # convert the iterable argument into an iterator so its contents can - # be consumed by islice in case it is a generator - it = iter(iterable) - - for size in sizes: - if size is None: - yield list(it) - return - else: - yield list(islice(it, size)) - - -def padded(iterable, fillvalue=None, n=None, next_multiple=False): - """Yield the elements from *iterable*, followed by *fillvalue*, such that - at least *n* items are emitted. - - >>> list(padded([1, 2, 3], '?', 5)) - [1, 2, 3, '?', '?'] - - If *next_multiple* is ``True``, *fillvalue* will be emitted until the - number of items emitted is a multiple of *n*:: - - >>> list(padded([1, 2, 3, 4], n=3, next_multiple=True)) - [1, 2, 3, 4, None, None] - - If *n* is ``None``, *fillvalue* will be emitted indefinitely. - - """ - it = iter(iterable) - if n is None: - yield from chain(it, repeat(fillvalue)) - elif n < 1: - raise ValueError('n must be at least 1') - else: - item_count = 0 - for item in it: - yield item - item_count += 1 - - remaining = (n - item_count) % n if next_multiple else n - item_count - for _ in range(remaining): - yield fillvalue - - -def repeat_last(iterable, default=None): - """After the *iterable* is exhausted, keep yielding its last element. - - >>> list(islice(repeat_last(range(3)), 5)) - [0, 1, 2, 2, 2] - - If the iterable is empty, yield *default* forever:: - - >>> list(islice(repeat_last(range(0), 42), 5)) - [42, 42, 42, 42, 42] - - """ - item = _marker - for item in iterable: - yield item - final = default if item is _marker else item - yield from repeat(final) - - -def distribute(n, iterable): - """Distribute the items from *iterable* among *n* smaller iterables. - - >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6]) - >>> list(group_1) - [1, 3, 5] - >>> list(group_2) - [2, 4, 6] - - If the length of *iterable* is not evenly divisible by *n*, then the - length of the returned iterables will not be identical: - - >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7]) - >>> [list(c) for c in children] - [[1, 4, 7], [2, 5], [3, 6]] - - If the length of *iterable* is smaller than *n*, then the last returned - iterables will be empty: - - >>> children = distribute(5, [1, 2, 3]) - >>> [list(c) for c in children] - [[1], [2], [3], [], []] - - This function uses :func:`itertools.tee` and may require significant - storage. If you need the order items in the smaller iterables to match the - original iterable, see :func:`divide`. - - """ - if n < 1: - raise ValueError('n must be at least 1') - - children = tee(iterable, n) - return [islice(it, index, None, n) for index, it in enumerate(children)] - - -def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None): - """Yield tuples whose elements are offset from *iterable*. - The amount by which the `i`-th item in each tuple is offset is given by - the `i`-th item in *offsets*. - - >>> list(stagger([0, 1, 2, 3])) - [(None, 0, 1), (0, 1, 2), (1, 2, 3)] - >>> list(stagger(range(8), offsets=(0, 2, 4))) - [(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)] - - By default, the sequence will end when the final element of a tuple is the - last item in the iterable. To continue until the first element of a tuple - is the last item in the iterable, set *longest* to ``True``:: - - >>> list(stagger([0, 1, 2, 3], longest=True)) - [(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)] - - By default, ``None`` will be used to replace offsets beyond the end of the - sequence. Specify *fillvalue* to use some other value. - - """ - children = tee(iterable, len(offsets)) - - return zip_offset( - *children, offsets=offsets, longest=longest, fillvalue=fillvalue - ) - - -class UnequalIterablesError(ValueError): - def __init__(self, details=None): - msg = 'Iterables have different lengths' - if details is not None: - msg += (': index 0 has length {}; index {} has length {}').format( - *details - ) - - super().__init__(msg) - - -def _zip_equal_generator(iterables): - for combo in zip_longest(*iterables, fillvalue=_marker): - for val in combo: - if val is _marker: - raise UnequalIterablesError() - yield combo - - -def zip_equal(*iterables): - """``zip`` the input *iterables* together, but raise - ``UnequalIterablesError`` if they aren't all the same length. - - >>> it_1 = range(3) - >>> it_2 = iter('abc') - >>> list(zip_equal(it_1, it_2)) - [(0, 'a'), (1, 'b'), (2, 'c')] - - >>> it_1 = range(3) - >>> it_2 = iter('abcd') - >>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - more_itertools.more.UnequalIterablesError: Iterables have different - lengths - - """ - if hexversion >= 0x30A00A6: - warnings.warn( - ( - 'zip_equal will be removed in a future version of ' - 'more-itertools. Use the builtin zip function with ' - 'strict=True instead.' - ), - DeprecationWarning, - ) - # Check whether the iterables are all the same size. - try: - first_size = len(iterables[0]) - for i, it in enumerate(iterables[1:], 1): - size = len(it) - if size != first_size: - break - else: - # If we didn't break out, we can use the built-in zip. - return zip(*iterables) - - # If we did break out, there was a mismatch. - raise UnequalIterablesError(details=(first_size, i, size)) - # If any one of the iterables didn't have a length, start reading - # them until one runs out. - except TypeError: - return _zip_equal_generator(iterables) - - -def zip_offset(*iterables, offsets, longest=False, fillvalue=None): - """``zip`` the input *iterables* together, but offset the `i`-th iterable - by the `i`-th item in *offsets*. - - >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1))) - [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')] - - This can be used as a lightweight alternative to SciPy or pandas to analyze - data sets in which some series have a lead or lag relationship. - - By default, the sequence will end when the shortest iterable is exhausted. - To continue until the longest iterable is exhausted, set *longest* to - ``True``. - - >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True)) - [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')] - - By default, ``None`` will be used to replace offsets beyond the end of the - sequence. Specify *fillvalue* to use some other value. - - """ - if len(iterables) != len(offsets): - raise ValueError("Number of iterables and offsets didn't match") - - staggered = [] - for it, n in zip(iterables, offsets): - if n < 0: - staggered.append(chain(repeat(fillvalue, -n), it)) - elif n > 0: - staggered.append(islice(it, n, None)) - else: - staggered.append(it) - - if longest: - return zip_longest(*staggered, fillvalue=fillvalue) - - return zip(*staggered) - - -def sort_together(iterables, key_list=(0,), key=None, reverse=False): - """Return the input iterables sorted together, with *key_list* as the - priority for sorting. All iterables are trimmed to the length of the - shortest one. - - This can be used like the sorting function in a spreadsheet. If each - iterable represents a column of data, the key list determines which - columns are used for sorting. - - By default, all iterables are sorted using the ``0``-th iterable:: - - >>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')] - >>> sort_together(iterables) - [(1, 2, 3, 4), ('d', 'c', 'b', 'a')] - - Set a different key list to sort according to another iterable. - Specifying multiple keys dictates how ties are broken:: - - >>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')] - >>> sort_together(iterables, key_list=(1, 2)) - [(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')] - - To sort by a function of the elements of the iterable, pass a *key* - function. Its arguments are the elements of the iterables corresponding to - the key list:: - - >>> names = ('a', 'b', 'c') - >>> lengths = (1, 2, 3) - >>> widths = (5, 2, 1) - >>> def area(length, width): - ... return length * width - >>> sort_together([names, lengths, widths], key_list=(1, 2), key=area) - [('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)] - - Set *reverse* to ``True`` to sort in descending order. - - >>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True) - [(3, 2, 1), ('a', 'b', 'c')] - - """ - if key is None: - # if there is no key function, the key argument to sorted is an - # itemgetter - key_argument = itemgetter(*key_list) - else: - # if there is a key function, call it with the items at the offsets - # specified by the key function as arguments - key_list = list(key_list) - if len(key_list) == 1: - # if key_list contains a single item, pass the item at that offset - # as the only argument to the key function - key_offset = key_list[0] - key_argument = lambda zipped_items: key(zipped_items[key_offset]) - else: - # if key_list contains multiple items, use itemgetter to return a - # tuple of items, which we pass as *args to the key function - get_key_items = itemgetter(*key_list) - key_argument = lambda zipped_items: key( - *get_key_items(zipped_items) - ) - - return list( - zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse)) - ) - - -def unzip(iterable): - """The inverse of :func:`zip`, this function disaggregates the elements - of the zipped *iterable*. - - The ``i``-th iterable contains the ``i``-th element from each element - of the zipped iterable. The first element is used to to determine the - length of the remaining elements. - - >>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] - >>> letters, numbers = unzip(iterable) - >>> list(letters) - ['a', 'b', 'c', 'd'] - >>> list(numbers) - [1, 2, 3, 4] - - This is similar to using ``zip(*iterable)``, but it avoids reading - *iterable* into memory. Note, however, that this function uses - :func:`itertools.tee` and thus may require significant storage. - - """ - head, iterable = spy(iter(iterable)) - if not head: - # empty iterable, e.g. zip([], [], []) - return () - # spy returns a one-length iterable as head - head = head[0] - iterables = tee(iterable, len(head)) - - def itemgetter(i): - def getter(obj): - try: - return obj[i] - except IndexError: - # basically if we have an iterable like - # iter([(1, 2, 3), (4, 5), (6,)]) - # the second unzipped iterable would fail at the third tuple - # since it would try to access tup[1] - # same with the third unzipped iterable and the second tuple - # to support these "improperly zipped" iterables, - # we create a custom itemgetter - # which just stops the unzipped iterables - # at first length mismatch - raise StopIteration - - return getter - - return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables)) - - -def divide(n, iterable): - """Divide the elements from *iterable* into *n* parts, maintaining - order. - - >>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6]) - >>> list(group_1) - [1, 2, 3] - >>> list(group_2) - [4, 5, 6] - - If the length of *iterable* is not evenly divisible by *n*, then the - length of the returned iterables will not be identical: - - >>> children = divide(3, [1, 2, 3, 4, 5, 6, 7]) - >>> [list(c) for c in children] - [[1, 2, 3], [4, 5], [6, 7]] - - If the length of the iterable is smaller than n, then the last returned - iterables will be empty: - - >>> children = divide(5, [1, 2, 3]) - >>> [list(c) for c in children] - [[1], [2], [3], [], []] - - This function will exhaust the iterable before returning and may require - significant storage. If order is not important, see :func:`distribute`, - which does not first pull the iterable into memory. - - """ - if n < 1: - raise ValueError('n must be at least 1') - - try: - iterable[:0] - except TypeError: - seq = tuple(iterable) - else: - seq = iterable - - q, r = divmod(len(seq), n) - - ret = [] - stop = 0 - for i in range(1, n + 1): - start = stop - stop += q + 1 if i <= r else q - ret.append(iter(seq[start:stop])) - - return ret - - -def always_iterable(obj, base_type=(str, bytes)): - """If *obj* is iterable, return an iterator over its items:: - - >>> obj = (1, 2, 3) - >>> list(always_iterable(obj)) - [1, 2, 3] - - If *obj* is not iterable, return a one-item iterable containing *obj*:: - - >>> obj = 1 - >>> list(always_iterable(obj)) - [1] - - If *obj* is ``None``, return an empty iterable: - - >>> obj = None - >>> list(always_iterable(None)) - [] - - By default, binary and text strings are not considered iterable:: - - >>> obj = 'foo' - >>> list(always_iterable(obj)) - ['foo'] - - If *base_type* is set, objects for which ``isinstance(obj, base_type)`` - returns ``True`` won't be considered iterable. - - >>> obj = {'a': 1} - >>> list(always_iterable(obj)) # Iterate over the dict's keys - ['a'] - >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit - [{'a': 1}] - - Set *base_type* to ``None`` to avoid any special handling and treat objects - Python considers iterable as iterable: - - >>> obj = 'foo' - >>> list(always_iterable(obj, base_type=None)) - ['f', 'o', 'o'] - """ - if obj is None: - return iter(()) - - if (base_type is not None) and isinstance(obj, base_type): - return iter((obj,)) - - try: - return iter(obj) - except TypeError: - return iter((obj,)) - - -def adjacent(predicate, iterable, distance=1): - """Return an iterable over `(bool, item)` tuples where the `item` is - drawn from *iterable* and the `bool` indicates whether - that item satisfies the *predicate* or is adjacent to an item that does. - - For example, to find whether items are adjacent to a ``3``:: - - >>> list(adjacent(lambda x: x == 3, range(6))) - [(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)] - - Set *distance* to change what counts as adjacent. For example, to find - whether items are two places away from a ``3``: - - >>> list(adjacent(lambda x: x == 3, range(6), distance=2)) - [(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)] - - This is useful for contextualizing the results of a search function. - For example, a code comparison tool might want to identify lines that - have changed, but also surrounding lines to give the viewer of the diff - context. - - The predicate function will only be called once for each item in the - iterable. - - See also :func:`groupby_transform`, which can be used with this function - to group ranges of items with the same `bool` value. - - """ - # Allow distance=0 mainly for testing that it reproduces results with map() - if distance < 0: - raise ValueError('distance must be at least 0') - - i1, i2 = tee(iterable) - padding = [False] * distance - selected = chain(padding, map(predicate, i1), padding) - adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1)) - return zip(adjacent_to_selected, i2) - - -def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None): - """An extension of :func:`itertools.groupby` that can apply transformations - to the grouped data. - - * *keyfunc* is a function computing a key value for each item in *iterable* - * *valuefunc* is a function that transforms the individual items from - *iterable* after grouping - * *reducefunc* is a function that transforms each group of items - - >>> iterable = 'aAAbBBcCC' - >>> keyfunc = lambda k: k.upper() - >>> valuefunc = lambda v: v.lower() - >>> reducefunc = lambda g: ''.join(g) - >>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc)) - [('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')] - - Each optional argument defaults to an identity function if not specified. - - :func:`groupby_transform` is useful when grouping elements of an iterable - using a separate iterable as the key. To do this, :func:`zip` the iterables - and pass a *keyfunc* that extracts the first element and a *valuefunc* - that extracts the second element:: - - >>> from operator import itemgetter - >>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3] - >>> values = 'abcdefghi' - >>> iterable = zip(keys, values) - >>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1)) - >>> [(k, ''.join(g)) for k, g in grouper] - [(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')] - - Note that the order of items in the iterable is significant. - Only adjacent items are grouped together, so if you don't want any - duplicate groups, you should sort the iterable by the key function. - - """ - ret = groupby(iterable, keyfunc) - if valuefunc: - ret = ((k, map(valuefunc, g)) for k, g in ret) - if reducefunc: - ret = ((k, reducefunc(g)) for k, g in ret) - - return ret - - -class numeric_range(abc.Sequence, abc.Hashable): - """An extension of the built-in ``range()`` function whose arguments can - be any orderable numeric type. - - With only *stop* specified, *start* defaults to ``0`` and *step* - defaults to ``1``. The output items will match the type of *stop*: - - >>> list(numeric_range(3.5)) - [0.0, 1.0, 2.0, 3.0] - - With only *start* and *stop* specified, *step* defaults to ``1``. The - output items will match the type of *start*: - - >>> from decimal import Decimal - >>> start = Decimal('2.1') - >>> stop = Decimal('5.1') - >>> list(numeric_range(start, stop)) - [Decimal('2.1'), Decimal('3.1'), Decimal('4.1')] - - With *start*, *stop*, and *step* specified the output items will match - the type of ``start + step``: - - >>> from fractions import Fraction - >>> start = Fraction(1, 2) # Start at 1/2 - >>> stop = Fraction(5, 2) # End at 5/2 - >>> step = Fraction(1, 2) # Count by 1/2 - >>> list(numeric_range(start, stop, step)) - [Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)] - - If *step* is zero, ``ValueError`` is raised. Negative steps are supported: - - >>> list(numeric_range(3, -1, -1.0)) - [3.0, 2.0, 1.0, 0.0] - - Be aware of the limitations of floating point numbers; the representation - of the yielded numbers may be surprising. - - ``datetime.datetime`` objects can be used for *start* and *stop*, if *step* - is a ``datetime.timedelta`` object: - - >>> import datetime - >>> start = datetime.datetime(2019, 1, 1) - >>> stop = datetime.datetime(2019, 1, 3) - >>> step = datetime.timedelta(days=1) - >>> items = iter(numeric_range(start, stop, step)) - >>> next(items) - datetime.datetime(2019, 1, 1, 0, 0) - >>> next(items) - datetime.datetime(2019, 1, 2, 0, 0) - - """ - - _EMPTY_HASH = hash(range(0, 0)) - - def __init__(self, *args): - argc = len(args) - if argc == 1: - (self._stop,) = args - self._start = type(self._stop)(0) - self._step = type(self._stop - self._start)(1) - elif argc == 2: - self._start, self._stop = args - self._step = type(self._stop - self._start)(1) - elif argc == 3: - self._start, self._stop, self._step = args - elif argc == 0: - raise TypeError( - 'numeric_range expected at least ' - '1 argument, got {}'.format(argc) - ) - else: - raise TypeError( - 'numeric_range expected at most ' - '3 arguments, got {}'.format(argc) - ) - - self._zero = type(self._step)(0) - if self._step == self._zero: - raise ValueError('numeric_range() arg 3 must not be zero') - self._growing = self._step > self._zero - self._init_len() - - def __bool__(self): - if self._growing: - return self._start < self._stop - else: - return self._start > self._stop - - def __contains__(self, elem): - if self._growing: - if self._start <= elem < self._stop: - return (elem - self._start) % self._step == self._zero - else: - if self._start >= elem > self._stop: - return (self._start - elem) % (-self._step) == self._zero - - return False - - def __eq__(self, other): - if isinstance(other, numeric_range): - empty_self = not bool(self) - empty_other = not bool(other) - if empty_self or empty_other: - return empty_self and empty_other # True if both empty - else: - return ( - self._start == other._start - and self._step == other._step - and self._get_by_index(-1) == other._get_by_index(-1) - ) - else: - return False - - def __getitem__(self, key): - if isinstance(key, int): - return self._get_by_index(key) - elif isinstance(key, slice): - step = self._step if key.step is None else key.step * self._step - - if key.start is None or key.start <= -self._len: - start = self._start - elif key.start >= self._len: - start = self._stop - else: # -self._len < key.start < self._len - start = self._get_by_index(key.start) - - if key.stop is None or key.stop >= self._len: - stop = self._stop - elif key.stop <= -self._len: - stop = self._start - else: # -self._len < key.stop < self._len - stop = self._get_by_index(key.stop) - - return numeric_range(start, stop, step) - else: - raise TypeError( - 'numeric range indices must be ' - 'integers or slices, not {}'.format(type(key).__name__) - ) - - def __hash__(self): - if self: - return hash((self._start, self._get_by_index(-1), self._step)) - else: - return self._EMPTY_HASH - - def __iter__(self): - values = (self._start + (n * self._step) for n in count()) - if self._growing: - return takewhile(partial(gt, self._stop), values) - else: - return takewhile(partial(lt, self._stop), values) - - def __len__(self): - return self._len - - def _init_len(self): - if self._growing: - start = self._start - stop = self._stop - step = self._step - else: - start = self._stop - stop = self._start - step = -self._step - distance = stop - start - if distance <= self._zero: - self._len = 0 - else: # distance > 0 and step > 0: regular euclidean division - q, r = divmod(distance, step) - self._len = int(q) + int(r != self._zero) - - def __reduce__(self): - return numeric_range, (self._start, self._stop, self._step) - - def __repr__(self): - if self._step == 1: - return "numeric_range({}, {})".format( - repr(self._start), repr(self._stop) - ) - else: - return "numeric_range({}, {}, {})".format( - repr(self._start), repr(self._stop), repr(self._step) - ) - - def __reversed__(self): - return iter( - numeric_range( - self._get_by_index(-1), self._start - self._step, -self._step - ) - ) - - def count(self, value): - return int(value in self) - - def index(self, value): - if self._growing: - if self._start <= value < self._stop: - q, r = divmod(value - self._start, self._step) - if r == self._zero: - return int(q) - else: - if self._start >= value > self._stop: - q, r = divmod(self._start - value, -self._step) - if r == self._zero: - return int(q) - - raise ValueError("{} is not in numeric range".format(value)) - - def _get_by_index(self, i): - if i < 0: - i += self._len - if i < 0 or i >= self._len: - raise IndexError("numeric range object index out of range") - return self._start + i * self._step - - -def count_cycle(iterable, n=None): - """Cycle through the items from *iterable* up to *n* times, yielding - the number of completed cycles along with each item. If *n* is omitted the - process repeats indefinitely. - - >>> list(count_cycle('AB', 3)) - [(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')] - - """ - iterable = tuple(iterable) - if not iterable: - return iter(()) - counter = count() if n is None else range(n) - return ((i, item) for i in counter for item in iterable) - - -def mark_ends(iterable): - """Yield 3-tuples of the form ``(is_first, is_last, item)``. - - >>> list(mark_ends('ABC')) - [(True, False, 'A'), (False, False, 'B'), (False, True, 'C')] - - Use this when looping over an iterable to take special action on its first - and/or last items: - - >>> iterable = ['Header', 100, 200, 'Footer'] - >>> total = 0 - >>> for is_first, is_last, item in mark_ends(iterable): - ... if is_first: - ... continue # Skip the header - ... if is_last: - ... continue # Skip the footer - ... total += item - >>> print(total) - 300 - """ - it = iter(iterable) - - try: - b = next(it) - except StopIteration: - return - - try: - for i in count(): - a = b - b = next(it) - yield i == 0, False, a - - except StopIteration: - yield i == 0, True, a - - -def locate(iterable, pred=bool, window_size=None): - """Yield the index of each item in *iterable* for which *pred* returns - ``True``. - - *pred* defaults to :func:`bool`, which will select truthy items: - - >>> list(locate([0, 1, 1, 0, 1, 0, 0])) - [1, 2, 4] - - Set *pred* to a custom function to, e.g., find the indexes for a particular - item. - - >>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b')) - [1, 3] - - If *window_size* is given, then the *pred* function will be called with - that many items. This enables searching for sub-sequences: - - >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] - >>> pred = lambda *args: args == (1, 2, 3) - >>> list(locate(iterable, pred=pred, window_size=3)) - [1, 5, 9] - - Use with :func:`seekable` to find indexes and then retrieve the associated - items: - - >>> from itertools import count - >>> from more_itertools import seekable - >>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count()) - >>> it = seekable(source) - >>> pred = lambda x: x > 100 - >>> indexes = locate(it, pred=pred) - >>> i = next(indexes) - >>> it.seek(i) - >>> next(it) - 106 - - """ - if window_size is None: - return compress(count(), map(pred, iterable)) - - if window_size < 1: - raise ValueError('window size must be at least 1') - - it = windowed(iterable, window_size, fillvalue=_marker) - return compress(count(), starmap(pred, it)) - - -def lstrip(iterable, pred): - """Yield the items from *iterable*, but strip any from the beginning - for which *pred* returns ``True``. - - For example, to remove a set of items from the start of an iterable: - - >>> iterable = (None, False, None, 1, 2, None, 3, False, None) - >>> pred = lambda x: x in {None, False, ''} - >>> list(lstrip(iterable, pred)) - [1, 2, None, 3, False, None] - - This function is analogous to to :func:`str.lstrip`, and is essentially - an wrapper for :func:`itertools.dropwhile`. - - """ - return dropwhile(pred, iterable) - - -def rstrip(iterable, pred): - """Yield the items from *iterable*, but strip any from the end - for which *pred* returns ``True``. - - For example, to remove a set of items from the end of an iterable: - - >>> iterable = (None, False, None, 1, 2, None, 3, False, None) - >>> pred = lambda x: x in {None, False, ''} - >>> list(rstrip(iterable, pred)) - [None, False, None, 1, 2, None, 3] - - This function is analogous to :func:`str.rstrip`. - - """ - cache = [] - cache_append = cache.append - cache_clear = cache.clear - for x in iterable: - if pred(x): - cache_append(x) - else: - yield from cache - cache_clear() - yield x - - -def strip(iterable, pred): - """Yield the items from *iterable*, but strip any from the - beginning and end for which *pred* returns ``True``. - - For example, to remove a set of items from both ends of an iterable: - - >>> iterable = (None, False, None, 1, 2, None, 3, False, None) - >>> pred = lambda x: x in {None, False, ''} - >>> list(strip(iterable, pred)) - [1, 2, None, 3] - - This function is analogous to :func:`str.strip`. - - """ - return rstrip(lstrip(iterable, pred), pred) - - -class islice_extended: - """An extension of :func:`itertools.islice` that supports negative values - for *stop*, *start*, and *step*. - - >>> iterable = iter('abcdefgh') - >>> list(islice_extended(iterable, -4, -1)) - ['e', 'f', 'g'] - - Slices with negative values require some caching of *iterable*, but this - function takes care to minimize the amount of memory required. - - For example, you can use a negative step with an infinite iterator: - - >>> from itertools import count - >>> list(islice_extended(count(), 110, 99, -2)) - [110, 108, 106, 104, 102, 100] - - You can also use slice notation directly: - - >>> iterable = map(str, count()) - >>> it = islice_extended(iterable)[10:20:2] - >>> list(it) - ['10', '12', '14', '16', '18'] - - """ - - def __init__(self, iterable, *args): - it = iter(iterable) - if args: - self._iterable = _islice_helper(it, slice(*args)) - else: - self._iterable = it - - def __iter__(self): - return self - - def __next__(self): - return next(self._iterable) - - def __getitem__(self, key): - if isinstance(key, slice): - return islice_extended(_islice_helper(self._iterable, key)) - - raise TypeError('islice_extended.__getitem__ argument must be a slice') - - -def _islice_helper(it, s): - start = s.start - stop = s.stop - if s.step == 0: - raise ValueError('step argument must be a non-zero integer or None.') - step = s.step or 1 - - if step > 0: - start = 0 if (start is None) else start - - if start < 0: - # Consume all but the last -start items - cache = deque(enumerate(it, 1), maxlen=-start) - len_iter = cache[-1][0] if cache else 0 - - # Adjust start to be positive - i = max(len_iter + start, 0) - - # Adjust stop to be positive - if stop is None: - j = len_iter - elif stop >= 0: - j = min(stop, len_iter) - else: - j = max(len_iter + stop, 0) - - # Slice the cache - n = j - i - if n <= 0: - return - - for index, item in islice(cache, 0, n, step): - yield item - elif (stop is not None) and (stop < 0): - # Advance to the start position - next(islice(it, start, start), None) - - # When stop is negative, we have to carry -stop items while - # iterating - cache = deque(islice(it, -stop), maxlen=-stop) - - for index, item in enumerate(it): - cached_item = cache.popleft() - if index % step == 0: - yield cached_item - cache.append(item) - else: - # When both start and stop are positive we have the normal case - yield from islice(it, start, stop, step) - else: - start = -1 if (start is None) else start - - if (stop is not None) and (stop < 0): - # Consume all but the last items - n = -stop - 1 - cache = deque(enumerate(it, 1), maxlen=n) - len_iter = cache[-1][0] if cache else 0 - - # If start and stop are both negative they are comparable and - # we can just slice. Otherwise we can adjust start to be negative - # and then slice. - if start < 0: - i, j = start, stop - else: - i, j = min(start - len_iter, -1), None - - for index, item in list(cache)[i:j:step]: - yield item - else: - # Advance to the stop position - if stop is not None: - m = stop + 1 - next(islice(it, m, m), None) - - # stop is positive, so if start is negative they are not comparable - # and we need the rest of the items. - if start < 0: - i = start - n = None - # stop is None and start is positive, so we just need items up to - # the start index. - elif stop is None: - i = None - n = start + 1 - # Both stop and start are positive, so they are comparable. - else: - i = None - n = start - stop - if n <= 0: - return - - cache = list(islice(it, n)) - - yield from cache[i::step] - - -def always_reversible(iterable): - """An extension of :func:`reversed` that supports all iterables, not - just those which implement the ``Reversible`` or ``Sequence`` protocols. - - >>> print(*always_reversible(x for x in range(3))) - 2 1 0 - - If the iterable is already reversible, this function returns the - result of :func:`reversed()`. If the iterable is not reversible, - this function will cache the remaining items in the iterable and - yield them in reverse order, which may require significant storage. - """ - try: - return reversed(iterable) - except TypeError: - return reversed(list(iterable)) - - -def consecutive_groups(iterable, ordering=lambda x: x): - """Yield groups of consecutive items using :func:`itertools.groupby`. - The *ordering* function determines whether two items are adjacent by - returning their position. - - By default, the ordering function is the identity function. This is - suitable for finding runs of numbers: - - >>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40] - >>> for group in consecutive_groups(iterable): - ... print(list(group)) - [1] - [10, 11, 12] - [20] - [30, 31, 32, 33] - [40] - - For finding runs of adjacent letters, try using the :meth:`index` method - of a string of letters: - - >>> from string import ascii_lowercase - >>> iterable = 'abcdfgilmnop' - >>> ordering = ascii_lowercase.index - >>> for group in consecutive_groups(iterable, ordering): - ... print(list(group)) - ['a', 'b', 'c', 'd'] - ['f', 'g'] - ['i'] - ['l', 'm', 'n', 'o', 'p'] - - Each group of consecutive items is an iterator that shares it source with - *iterable*. When an an output group is advanced, the previous group is - no longer available unless its elements are copied (e.g., into a ``list``). - - >>> iterable = [1, 2, 11, 12, 21, 22] - >>> saved_groups = [] - >>> for group in consecutive_groups(iterable): - ... saved_groups.append(list(group)) # Copy group elements - >>> saved_groups - [[1, 2], [11, 12], [21, 22]] - - """ - for k, g in groupby( - enumerate(iterable), key=lambda x: x[0] - ordering(x[1]) - ): - yield map(itemgetter(1), g) - - -def difference(iterable, func=sub, *, initial=None): - """This function is the inverse of :func:`itertools.accumulate`. By default - it will compute the first difference of *iterable* using - :func:`operator.sub`: - - >>> from itertools import accumulate - >>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10 - >>> list(difference(iterable)) - [0, 1, 2, 3, 4] - - *func* defaults to :func:`operator.sub`, but other functions can be - specified. They will be applied as follows:: - - A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ... - - For example, to do progressive division: - - >>> iterable = [1, 2, 6, 24, 120] - >>> func = lambda x, y: x // y - >>> list(difference(iterable, func)) - [1, 2, 3, 4, 5] - - If the *initial* keyword is set, the first element will be skipped when - computing successive differences. - - >>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10) - >>> list(difference(it, initial=10)) - [1, 2, 3] - - """ - a, b = tee(iterable) - try: - first = [next(b)] - except StopIteration: - return iter([]) - - if initial is not None: - first = [] - - return chain(first, starmap(func, zip(b, a))) - - -class SequenceView(Sequence): - """Return a read-only view of the sequence object *target*. - - :class:`SequenceView` objects are analogous to Python's built-in - "dictionary view" types. They provide a dynamic view of a sequence's items, - meaning that when the sequence updates, so does the view. - - >>> seq = ['0', '1', '2'] - >>> view = SequenceView(seq) - >>> view - SequenceView(['0', '1', '2']) - >>> seq.append('3') - >>> view - SequenceView(['0', '1', '2', '3']) - - Sequence views support indexing, slicing, and length queries. They act - like the underlying sequence, except they don't allow assignment: - - >>> view[1] - '1' - >>> view[1:-1] - ['1', '2'] - >>> len(view) - 4 - - Sequence views are useful as an alternative to copying, as they don't - require (much) extra storage. - - """ - - def __init__(self, target): - if not isinstance(target, Sequence): - raise TypeError - self._target = target - - def __getitem__(self, index): - return self._target[index] - - def __len__(self): - return len(self._target) - - def __repr__(self): - return '{}({})'.format(self.__class__.__name__, repr(self._target)) - - -class seekable: - """Wrap an iterator to allow for seeking backward and forward. This - progressively caches the items in the source iterable so they can be - re-visited. - - Call :meth:`seek` with an index to seek to that position in the source - iterable. - - To "reset" an iterator, seek to ``0``: - - >>> from itertools import count - >>> it = seekable((str(n) for n in count())) - >>> next(it), next(it), next(it) - ('0', '1', '2') - >>> it.seek(0) - >>> next(it), next(it), next(it) - ('0', '1', '2') - >>> next(it) - '3' - - You can also seek forward: - - >>> it = seekable((str(n) for n in range(20))) - >>> it.seek(10) - >>> next(it) - '10' - >>> it.seek(20) # Seeking past the end of the source isn't a problem - >>> list(it) - [] - >>> it.seek(0) # Resetting works even after hitting the end - >>> next(it), next(it), next(it) - ('0', '1', '2') - - Call :meth:`peek` to look ahead one item without advancing the iterator: - - >>> it = seekable('1234') - >>> it.peek() - '1' - >>> list(it) - ['1', '2', '3', '4'] - >>> it.peek(default='empty') - 'empty' - - Before the iterator is at its end, calling :func:`bool` on it will return - ``True``. After it will return ``False``: - - >>> it = seekable('5678') - >>> bool(it) - True - >>> list(it) - ['5', '6', '7', '8'] - >>> bool(it) - False - - You may view the contents of the cache with the :meth:`elements` method. - That returns a :class:`SequenceView`, a view that updates automatically: - - >>> it = seekable((str(n) for n in range(10))) - >>> next(it), next(it), next(it) - ('0', '1', '2') - >>> elements = it.elements() - >>> elements - SequenceView(['0', '1', '2']) - >>> next(it) - '3' - >>> elements - SequenceView(['0', '1', '2', '3']) - - By default, the cache grows as the source iterable progresses, so beware of - wrapping very large or infinite iterables. Supply *maxlen* to limit the - size of the cache (this of course limits how far back you can seek). - - >>> from itertools import count - >>> it = seekable((str(n) for n in count()), maxlen=2) - >>> next(it), next(it), next(it), next(it) - ('0', '1', '2', '3') - >>> list(it.elements()) - ['2', '3'] - >>> it.seek(0) - >>> next(it), next(it), next(it), next(it) - ('2', '3', '4', '5') - >>> next(it) - '6' - - """ - - def __init__(self, iterable, maxlen=None): - self._source = iter(iterable) - if maxlen is None: - self._cache = [] - else: - self._cache = deque([], maxlen) - self._index = None - - def __iter__(self): - return self - - def __next__(self): - if self._index is not None: - try: - item = self._cache[self._index] - except IndexError: - self._index = None - else: - self._index += 1 - return item - - item = next(self._source) - self._cache.append(item) - return item - - def __bool__(self): - try: - self.peek() - except StopIteration: - return False - return True - - def peek(self, default=_marker): - try: - peeked = next(self) - except StopIteration: - if default is _marker: - raise - return default - if self._index is None: - self._index = len(self._cache) - self._index -= 1 - return peeked - - def elements(self): - return SequenceView(self._cache) - - def seek(self, index): - self._index = index - remainder = index - len(self._cache) - if remainder > 0: - consume(self, remainder) - - -class run_length: - """ - :func:`run_length.encode` compresses an iterable with run-length encoding. - It yields groups of repeated items with the count of how many times they - were repeated: - - >>> uncompressed = 'abbcccdddd' - >>> list(run_length.encode(uncompressed)) - [('a', 1), ('b', 2), ('c', 3), ('d', 4)] - - :func:`run_length.decode` decompresses an iterable that was previously - compressed with run-length encoding. It yields the items of the - decompressed iterable: - - >>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] - >>> list(run_length.decode(compressed)) - ['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd'] - - """ - - @staticmethod - def encode(iterable): - return ((k, ilen(g)) for k, g in groupby(iterable)) - - @staticmethod - def decode(iterable): - return chain.from_iterable(repeat(k, n) for k, n in iterable) - - -def exactly_n(iterable, n, predicate=bool): - """Return ``True`` if exactly ``n`` items in the iterable are ``True`` - according to the *predicate* function. - - >>> exactly_n([True, True, False], 2) - True - >>> exactly_n([True, True, False], 1) - False - >>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3) - True - - The iterable will be advanced until ``n + 1`` truthy items are encountered, - so avoid calling it on infinite iterables. - - """ - return len(take(n + 1, filter(predicate, iterable))) == n - - -def circular_shifts(iterable): - """Return a list of circular shifts of *iterable*. - - >>> circular_shifts(range(4)) - [(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)] - """ - lst = list(iterable) - return take(len(lst), windowed(cycle(lst), len(lst))) - - -def make_decorator(wrapping_func, result_index=0): - """Return a decorator version of *wrapping_func*, which is a function that - modifies an iterable. *result_index* is the position in that function's - signature where the iterable goes. - - This lets you use itertools on the "production end," i.e. at function - definition. This can augment what the function returns without changing the - function's code. - - For example, to produce a decorator version of :func:`chunked`: - - >>> from more_itertools import chunked - >>> chunker = make_decorator(chunked, result_index=0) - >>> @chunker(3) - ... def iter_range(n): - ... return iter(range(n)) - ... - >>> list(iter_range(9)) - [[0, 1, 2], [3, 4, 5], [6, 7, 8]] - - To only allow truthy items to be returned: - - >>> truth_serum = make_decorator(filter, result_index=1) - >>> @truth_serum(bool) - ... def boolean_test(): - ... return [0, 1, '', ' ', False, True] - ... - >>> list(boolean_test()) - [1, ' ', True] - - The :func:`peekable` and :func:`seekable` wrappers make for practical - decorators: - - >>> from more_itertools import peekable - >>> peekable_function = make_decorator(peekable) - >>> @peekable_function() - ... def str_range(*args): - ... return (str(x) for x in range(*args)) - ... - >>> it = str_range(1, 20, 2) - >>> next(it), next(it), next(it) - ('1', '3', '5') - >>> it.peek() - '7' - >>> next(it) - '7' - - """ - # See https://sites.google.com/site/bbayles/index/decorator_factory for - # notes on how this works. - def decorator(*wrapping_args, **wrapping_kwargs): - def outer_wrapper(f): - def inner_wrapper(*args, **kwargs): - result = f(*args, **kwargs) - wrapping_args_ = list(wrapping_args) - wrapping_args_.insert(result_index, result) - return wrapping_func(*wrapping_args_, **wrapping_kwargs) - - return inner_wrapper - - return outer_wrapper - - return decorator - - -def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None): - """Return a dictionary that maps the items in *iterable* to categories - defined by *keyfunc*, transforms them with *valuefunc*, and - then summarizes them by category with *reducefunc*. - - *valuefunc* defaults to the identity function if it is unspecified. - If *reducefunc* is unspecified, no summarization takes place: - - >>> keyfunc = lambda x: x.upper() - >>> result = map_reduce('abbccc', keyfunc) - >>> sorted(result.items()) - [('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])] - - Specifying *valuefunc* transforms the categorized items: - - >>> keyfunc = lambda x: x.upper() - >>> valuefunc = lambda x: 1 - >>> result = map_reduce('abbccc', keyfunc, valuefunc) - >>> sorted(result.items()) - [('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])] - - Specifying *reducefunc* summarizes the categorized items: - - >>> keyfunc = lambda x: x.upper() - >>> valuefunc = lambda x: 1 - >>> reducefunc = sum - >>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc) - >>> sorted(result.items()) - [('A', 1), ('B', 2), ('C', 3)] - - You may want to filter the input iterable before applying the map/reduce - procedure: - - >>> all_items = range(30) - >>> items = [x for x in all_items if 10 <= x <= 20] # Filter - >>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1 - >>> categories = map_reduce(items, keyfunc=keyfunc) - >>> sorted(categories.items()) - [(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])] - >>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum) - >>> sorted(summaries.items()) - [(0, 90), (1, 75)] - - Note that all items in the iterable are gathered into a list before the - summarization step, which may require significant storage. - - The returned object is a :obj:`collections.defaultdict` with the - ``default_factory`` set to ``None``, such that it behaves like a normal - dictionary. - - """ - valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc - - ret = defaultdict(list) - for item in iterable: - key = keyfunc(item) - value = valuefunc(item) - ret[key].append(value) - - if reducefunc is not None: - for key, value_list in ret.items(): - ret[key] = reducefunc(value_list) - - ret.default_factory = None - return ret - - -def rlocate(iterable, pred=bool, window_size=None): - """Yield the index of each item in *iterable* for which *pred* returns - ``True``, starting from the right and moving left. - - *pred* defaults to :func:`bool`, which will select truthy items: - - >>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4 - [4, 2, 1] - - Set *pred* to a custom function to, e.g., find the indexes for a particular - item: - - >>> iterable = iter('abcb') - >>> pred = lambda x: x == 'b' - >>> list(rlocate(iterable, pred)) - [3, 1] - - If *window_size* is given, then the *pred* function will be called with - that many items. This enables searching for sub-sequences: - - >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] - >>> pred = lambda *args: args == (1, 2, 3) - >>> list(rlocate(iterable, pred=pred, window_size=3)) - [9, 5, 1] - - Beware, this function won't return anything for infinite iterables. - If *iterable* is reversible, ``rlocate`` will reverse it and search from - the right. Otherwise, it will search from the left and return the results - in reverse order. - - See :func:`locate` to for other example applications. - - """ - if window_size is None: - try: - len_iter = len(iterable) - return (len_iter - i - 1 for i in locate(reversed(iterable), pred)) - except TypeError: - pass - - return reversed(list(locate(iterable, pred, window_size))) - - -def replace(iterable, pred, substitutes, count=None, window_size=1): - """Yield the items from *iterable*, replacing the items for which *pred* - returns ``True`` with the items from the iterable *substitutes*. - - >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1] - >>> pred = lambda x: x == 0 - >>> substitutes = (2, 3) - >>> list(replace(iterable, pred, substitutes)) - [1, 1, 2, 3, 1, 1, 2, 3, 1, 1] - - If *count* is given, the number of replacements will be limited: - - >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0] - >>> pred = lambda x: x == 0 - >>> substitutes = [None] - >>> list(replace(iterable, pred, substitutes, count=2)) - [1, 1, None, 1, 1, None, 1, 1, 0] - - Use *window_size* to control the number of items passed as arguments to - *pred*. This allows for locating and replacing subsequences. - - >>> iterable = [0, 1, 2, 5, 0, 1, 2, 5] - >>> window_size = 3 - >>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred - >>> substitutes = [3, 4] # Splice in these items - >>> list(replace(iterable, pred, substitutes, window_size=window_size)) - [3, 4, 5, 3, 4, 5] - - """ - if window_size < 1: - raise ValueError('window_size must be at least 1') - - # Save the substitutes iterable, since it's used more than once - substitutes = tuple(substitutes) - - # Add padding such that the number of windows matches the length of the - # iterable - it = chain(iterable, [_marker] * (window_size - 1)) - windows = windowed(it, window_size) - - n = 0 - for w in windows: - # If the current window matches our predicate (and we haven't hit - # our maximum number of replacements), splice in the substitutes - # and then consume the following windows that overlap with this one. - # For example, if the iterable is (0, 1, 2, 3, 4...) - # and the window size is 2, we have (0, 1), (1, 2), (2, 3)... - # If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2) - if pred(*w): - if (count is None) or (n < count): - n += 1 - yield from substitutes - consume(windows, window_size - 1) - continue - - # If there was no match (or we've reached the replacement limit), - # yield the first item from the window. - if w and (w[0] is not _marker): - yield w[0] - - -def partitions(iterable): - """Yield all possible order-preserving partitions of *iterable*. - - >>> iterable = 'abc' - >>> for part in partitions(iterable): - ... print([''.join(p) for p in part]) - ['abc'] - ['a', 'bc'] - ['ab', 'c'] - ['a', 'b', 'c'] - - This is unrelated to :func:`partition`. - - """ - sequence = list(iterable) - n = len(sequence) - for i in powerset(range(1, n)): - yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))] - - -def set_partitions(iterable, k=None): - """ - Yield the set partitions of *iterable* into *k* parts. Set partitions are - not order-preserving. - - >>> iterable = 'abc' - >>> for part in set_partitions(iterable, 2): - ... print([''.join(p) for p in part]) - ['a', 'bc'] - ['ab', 'c'] - ['b', 'ac'] - - - If *k* is not given, every set partition is generated. - - >>> iterable = 'abc' - >>> for part in set_partitions(iterable): - ... print([''.join(p) for p in part]) - ['abc'] - ['a', 'bc'] - ['ab', 'c'] - ['b', 'ac'] - ['a', 'b', 'c'] - - """ - L = list(iterable) - n = len(L) - if k is not None: - if k < 1: - raise ValueError( - "Can't partition in a negative or zero number of groups" - ) - elif k > n: - return - - def set_partitions_helper(L, k): - n = len(L) - if k == 1: - yield [L] - elif n == k: - yield [[s] for s in L] - else: - e, *M = L - for p in set_partitions_helper(M, k - 1): - yield [[e], *p] - for p in set_partitions_helper(M, k): - for i in range(len(p)): - yield p[:i] + [[e] + p[i]] + p[i + 1 :] - - if k is None: - for k in range(1, n + 1): - yield from set_partitions_helper(L, k) - else: - yield from set_partitions_helper(L, k) - - -class time_limited: - """ - Yield items from *iterable* until *limit_seconds* have passed. - If the time limit expires before all items have been yielded, the - ``timed_out`` parameter will be set to ``True``. - - >>> from time import sleep - >>> def generator(): - ... yield 1 - ... yield 2 - ... sleep(0.2) - ... yield 3 - >>> iterable = time_limited(0.1, generator()) - >>> list(iterable) - [1, 2] - >>> iterable.timed_out - True - - Note that the time is checked before each item is yielded, and iteration - stops if the time elapsed is greater than *limit_seconds*. If your time - limit is 1 second, but it takes 2 seconds to generate the first item from - the iterable, the function will run for 2 seconds and not yield anything. - - """ - - def __init__(self, limit_seconds, iterable): - if limit_seconds < 0: - raise ValueError('limit_seconds must be positive') - self.limit_seconds = limit_seconds - self._iterable = iter(iterable) - self._start_time = monotonic() - self.timed_out = False - - def __iter__(self): - return self - - def __next__(self): - item = next(self._iterable) - if monotonic() - self._start_time > self.limit_seconds: - self.timed_out = True - raise StopIteration - - return item - - -def only(iterable, default=None, too_long=None): - """If *iterable* has only one item, return it. - If it has zero items, return *default*. - If it has more than one item, raise the exception given by *too_long*, - which is ``ValueError`` by default. - - >>> only([], default='missing') - 'missing' - >>> only([1]) - 1 - >>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - ValueError: Expected exactly one item in iterable, but got 1, 2, - and perhaps more.' - >>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - TypeError - - Note that :func:`only` attempts to advance *iterable* twice to ensure there - is only one item. See :func:`spy` or :func:`peekable` to check - iterable contents less destructively. - """ - it = iter(iterable) - first_value = next(it, default) - - try: - second_value = next(it) - except StopIteration: - pass - else: - msg = ( - 'Expected exactly one item in iterable, but got {!r}, {!r}, ' - 'and perhaps more.'.format(first_value, second_value) - ) - raise too_long or ValueError(msg) - - return first_value - - -def ichunked(iterable, n): - """Break *iterable* into sub-iterables with *n* elements each. - :func:`ichunked` is like :func:`chunked`, but it yields iterables - instead of lists. - - If the sub-iterables are read in order, the elements of *iterable* - won't be stored in memory. - If they are read out of order, :func:`itertools.tee` is used to cache - elements as necessary. - - >>> from itertools import count - >>> all_chunks = ichunked(count(), 4) - >>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks) - >>> list(c_2) # c_1's elements have been cached; c_3's haven't been - [4, 5, 6, 7] - >>> list(c_1) - [0, 1, 2, 3] - >>> list(c_3) - [8, 9, 10, 11] - - """ - source = iter(iterable) - - while True: - # Check to see whether we're at the end of the source iterable - item = next(source, _marker) - if item is _marker: - return - - # Clone the source and yield an n-length slice - source, it = tee(chain([item], source)) - yield islice(it, n) - - # Advance the source iterable - consume(source, n) - - -def distinct_combinations(iterable, r): - """Yield the distinct combinations of *r* items taken from *iterable*. - - >>> list(distinct_combinations([0, 0, 1], 2)) - [(0, 0), (0, 1)] - - Equivalent to ``set(combinations(iterable))``, except duplicates are not - generated and thrown away. For larger input sequences this is much more - efficient. - - """ - if r < 0: - raise ValueError('r must be non-negative') - elif r == 0: - yield () - return - pool = tuple(iterable) - generators = [unique_everseen(enumerate(pool), key=itemgetter(1))] - current_combo = [None] * r - level = 0 - while generators: - try: - cur_idx, p = next(generators[-1]) - except StopIteration: - generators.pop() - level -= 1 - continue - current_combo[level] = p - if level + 1 == r: - yield tuple(current_combo) - else: - generators.append( - unique_everseen( - enumerate(pool[cur_idx + 1 :], cur_idx + 1), - key=itemgetter(1), - ) - ) - level += 1 - - -def filter_except(validator, iterable, *exceptions): - """Yield the items from *iterable* for which the *validator* function does - not raise one of the specified *exceptions*. - - *validator* is called for each item in *iterable*. - It should be a function that accepts one argument and raises an exception - if that item is not valid. - - >>> iterable = ['1', '2', 'three', '4', None] - >>> list(filter_except(int, iterable, ValueError, TypeError)) - ['1', '2', '4'] - - If an exception other than one given by *exceptions* is raised by - *validator*, it is raised like normal. - """ - for item in iterable: - try: - validator(item) - except exceptions: - pass - else: - yield item - - -def map_except(function, iterable, *exceptions): - """Transform each item from *iterable* with *function* and yield the - result, unless *function* raises one of the specified *exceptions*. - - *function* is called to transform each item in *iterable*. - It should be a accept one argument. - - >>> iterable = ['1', '2', 'three', '4', None] - >>> list(map_except(int, iterable, ValueError, TypeError)) - [1, 2, 4] - - If an exception other than one given by *exceptions* is raised by - *function*, it is raised like normal. - """ - for item in iterable: - try: - yield function(item) - except exceptions: - pass - - -def _sample_unweighted(iterable, k): - # Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li: - # "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))". - - # Fill up the reservoir (collection of samples) with the first `k` samples - reservoir = take(k, iterable) - - # Generate random number that's the largest in a sample of k U(0,1) numbers - # Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic - W = exp(log(random()) / k) - - # The number of elements to skip before changing the reservoir is a random - # number with a geometric distribution. Sample it using random() and logs. - next_index = k + floor(log(random()) / log(1 - W)) - - for index, element in enumerate(iterable, k): - - if index == next_index: - reservoir[randrange(k)] = element - # The new W is the largest in a sample of k U(0, `old_W`) numbers - W *= exp(log(random()) / k) - next_index += floor(log(random()) / log(1 - W)) + 1 - - return reservoir - - -def _sample_weighted(iterable, k, weights): - # Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. : - # "Weighted random sampling with a reservoir". - - # Log-transform for numerical stability for weights that are small/large - weight_keys = (log(random()) / weight for weight in weights) - - # Fill up the reservoir (collection of samples) with the first `k` - # weight-keys and elements, then heapify the list. - reservoir = take(k, zip(weight_keys, iterable)) - heapify(reservoir) - - # The number of jumps before changing the reservoir is a random variable - # with an exponential distribution. Sample it using random() and logs. - smallest_weight_key, _ = reservoir[0] - weights_to_skip = log(random()) / smallest_weight_key - - for weight, element in zip(weights, iterable): - if weight >= weights_to_skip: - # The notation here is consistent with the paper, but we store - # the weight-keys in log-space for better numerical stability. - smallest_weight_key, _ = reservoir[0] - t_w = exp(weight * smallest_weight_key) - r_2 = uniform(t_w, 1) # generate U(t_w, 1) - weight_key = log(r_2) / weight - heapreplace(reservoir, (weight_key, element)) - smallest_weight_key, _ = reservoir[0] - weights_to_skip = log(random()) / smallest_weight_key - else: - weights_to_skip -= weight - - # Equivalent to [element for weight_key, element in sorted(reservoir)] - return [heappop(reservoir)[1] for _ in range(k)] - - -def sample(iterable, k, weights=None): - """Return a *k*-length list of elements chosen (without replacement) - from the *iterable*. Like :func:`random.sample`, but works on iterables - of unknown length. - - >>> iterable = range(100) - >>> sample(iterable, 5) # doctest: +SKIP - [81, 60, 96, 16, 4] - - An iterable with *weights* may also be given: - - >>> iterable = range(100) - >>> weights = (i * i + 1 for i in range(100)) - >>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP - [79, 67, 74, 66, 78] - - The algorithm can also be used to generate weighted random permutations. - The relative weight of each item determines the probability that it - appears late in the permutation. - - >>> data = "abcdefgh" - >>> weights = range(1, len(data) + 1) - >>> sample(data, k=len(data), weights=weights) # doctest: +SKIP - ['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f'] - """ - if k == 0: - return [] - - iterable = iter(iterable) - if weights is None: - return _sample_unweighted(iterable, k) - else: - weights = iter(weights) - return _sample_weighted(iterable, k, weights) - - -def is_sorted(iterable, key=None, reverse=False): - """Returns ``True`` if the items of iterable are in sorted order, and - ``False`` otherwise. *key* and *reverse* have the same meaning that they do - in the built-in :func:`sorted` function. - - >>> is_sorted(['1', '2', '3', '4', '5'], key=int) - True - >>> is_sorted([5, 4, 3, 1, 2], reverse=True) - False - - The function returns ``False`` after encountering the first out-of-order - item. If there are no out-of-order items, the iterable is exhausted. - """ - - compare = lt if reverse else gt - it = iterable if (key is None) else map(key, iterable) - return not any(starmap(compare, pairwise(it))) - - -class AbortThread(BaseException): - pass - - -class callback_iter: - """Convert a function that uses callbacks to an iterator. - - Let *func* be a function that takes a `callback` keyword argument. - For example: - - >>> def func(callback=None): - ... for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]: - ... if callback: - ... callback(i, c) - ... return 4 - - - Use ``with callback_iter(func)`` to get an iterator over the parameters - that are delivered to the callback. - - >>> with callback_iter(func) as it: - ... for args, kwargs in it: - ... print(args) - (1, 'a') - (2, 'b') - (3, 'c') - - The function will be called in a background thread. The ``done`` property - indicates whether it has completed execution. - - >>> it.done - True - - If it completes successfully, its return value will be available - in the ``result`` property. - - >>> it.result - 4 - - Notes: - - * If the function uses some keyword argument besides ``callback``, supply - *callback_kwd*. - * If it finished executing, but raised an exception, accessing the - ``result`` property will raise the same exception. - * If it hasn't finished executing, accessing the ``result`` - property from within the ``with`` block will raise ``RuntimeError``. - * If it hasn't finished executing, accessing the ``result`` property from - outside the ``with`` block will raise a - ``more_itertools.AbortThread`` exception. - * Provide *wait_seconds* to adjust how frequently the it is polled for - output. - - """ - - def __init__(self, func, callback_kwd='callback', wait_seconds=0.1): - self._func = func - self._callback_kwd = callback_kwd - self._aborted = False - self._future = None - self._wait_seconds = wait_seconds - self._executor = __import__("concurrent.futures").futures.ThreadPoolExecutor(max_workers=1) - self._iterator = self._reader() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self._aborted = True - self._executor.shutdown() - - def __iter__(self): - return self - - def __next__(self): - return next(self._iterator) - - @property - def done(self): - if self._future is None: - return False - return self._future.done() - - @property - def result(self): - if not self.done: - raise RuntimeError('Function has not yet completed') - - return self._future.result() - - def _reader(self): - q = Queue() - - def callback(*args, **kwargs): - if self._aborted: - raise AbortThread('canceled by user') - - q.put((args, kwargs)) - - self._future = self._executor.submit( - self._func, **{self._callback_kwd: callback} - ) - - while True: - try: - item = q.get(timeout=self._wait_seconds) - except Empty: - pass - else: - q.task_done() - yield item - - if self._future.done(): - break - - remaining = [] - while True: - try: - item = q.get_nowait() - except Empty: - break - else: - q.task_done() - remaining.append(item) - q.join() - yield from remaining - - -def windowed_complete(iterable, n): - """ - Yield ``(beginning, middle, end)`` tuples, where: - - * Each ``middle`` has *n* items from *iterable* - * Each ``beginning`` has the items before the ones in ``middle`` - * Each ``end`` has the items after the ones in ``middle`` - - >>> iterable = range(7) - >>> n = 3 - >>> for beginning, middle, end in windowed_complete(iterable, n): - ... print(beginning, middle, end) - () (0, 1, 2) (3, 4, 5, 6) - (0,) (1, 2, 3) (4, 5, 6) - (0, 1) (2, 3, 4) (5, 6) - (0, 1, 2) (3, 4, 5) (6,) - (0, 1, 2, 3) (4, 5, 6) () - - Note that *n* must be at least 0 and most equal to the length of - *iterable*. - - This function will exhaust the iterable and may require significant - storage. - """ - if n < 0: - raise ValueError('n must be >= 0') - - seq = tuple(iterable) - size = len(seq) - - if n > size: - raise ValueError('n must be <= len(seq)') - - for i in range(size - n + 1): - beginning = seq[:i] - middle = seq[i : i + n] - end = seq[i + n :] - yield beginning, middle, end - - -def all_unique(iterable, key=None): - """ - Returns ``True`` if all the elements of *iterable* are unique (no two - elements are equal). - - >>> all_unique('ABCB') - False - - If a *key* function is specified, it will be used to make comparisons. - - >>> all_unique('ABCb') - True - >>> all_unique('ABCb', str.lower) - False - - The function returns as soon as the first non-unique element is - encountered. Iterables with a mix of hashable and unhashable items can - be used, but the function will be slower for unhashable items. - """ - seenset = set() - seenset_add = seenset.add - seenlist = [] - seenlist_add = seenlist.append - for element in map(key, iterable) if key else iterable: - try: - if element in seenset: - return False - seenset_add(element) - except TypeError: - if element in seenlist: - return False - seenlist_add(element) - return True - - -def nth_product(index, *args): - """Equivalent to ``list(product(*args))[index]``. - - The products of *args* can be ordered lexicographically. - :func:`nth_product` computes the product at sort position *index* without - computing the previous products. - - >>> nth_product(8, range(2), range(2), range(2), range(2)) - (1, 0, 0, 0) - - ``IndexError`` will be raised if the given *index* is invalid. - """ - pools = list(map(tuple, reversed(args))) - ns = list(map(len, pools)) - - c = reduce(mul, ns) - - if index < 0: - index += c - - if not 0 <= index < c: - raise IndexError - - result = [] - for pool, n in zip(pools, ns): - result.append(pool[index % n]) - index //= n - - return tuple(reversed(result)) - - -def nth_permutation(iterable, r, index): - """Equivalent to ``list(permutations(iterable, r))[index]``` - - The subsequences of *iterable* that are of length *r* where order is - important can be ordered lexicographically. :func:`nth_permutation` - computes the subsequence at sort position *index* directly, without - computing the previous subsequences. - - >>> nth_permutation('ghijk', 2, 5) - ('h', 'i') - - ``ValueError`` will be raised If *r* is negative or greater than the length - of *iterable*. - ``IndexError`` will be raised if the given *index* is invalid. - """ - pool = list(iterable) - n = len(pool) - - if r is None or r == n: - r, c = n, factorial(n) - elif not 0 <= r < n: - raise ValueError - else: - c = factorial(n) // factorial(n - r) - - if index < 0: - index += c - - if not 0 <= index < c: - raise IndexError - - if c == 0: - return tuple() - - result = [0] * r - q = index * factorial(n) // c if r < n else index - for d in range(1, n + 1): - q, i = divmod(q, d) - if 0 <= n - d < r: - result[n - d] = i - if q == 0: - break - - return tuple(map(pool.pop, result)) - - -def value_chain(*args): - """Yield all arguments passed to the function in the same order in which - they were passed. If an argument itself is iterable then iterate over its - values. - - >>> list(value_chain(1, 2, 3, [4, 5, 6])) - [1, 2, 3, 4, 5, 6] - - Binary and text strings are not considered iterable and are emitted - as-is: - - >>> list(value_chain('12', '34', ['56', '78'])) - ['12', '34', '56', '78'] - - - Multiple levels of nesting are not flattened. - - """ - for value in args: - if isinstance(value, (str, bytes)): - yield value - continue - try: - yield from value - except TypeError: - yield value - - -def product_index(element, *args): - """Equivalent to ``list(product(*args)).index(element)`` - - The products of *args* can be ordered lexicographically. - :func:`product_index` computes the first index of *element* without - computing the previous products. - - >>> product_index([8, 2], range(10), range(5)) - 42 - - ``ValueError`` will be raised if the given *element* isn't in the product - of *args*. - """ - index = 0 - - for x, pool in zip_longest(element, args, fillvalue=_marker): - if x is _marker or pool is _marker: - raise ValueError('element is not a product of args') - - pool = tuple(pool) - index = index * len(pool) + pool.index(x) - - return index - - -def combination_index(element, iterable): - """Equivalent to ``list(combinations(iterable, r)).index(element)`` - - The subsequences of *iterable* that are of length *r* can be ordered - lexicographically. :func:`combination_index` computes the index of the - first *element*, without computing the previous combinations. - - >>> combination_index('adf', 'abcdefg') - 10 - - ``ValueError`` will be raised if the given *element* isn't one of the - combinations of *iterable*. - """ - element = enumerate(element) - k, y = next(element, (None, None)) - if k is None: - return 0 - - indexes = [] - pool = enumerate(iterable) - for n, x in pool: - if x == y: - indexes.append(n) - tmp, y = next(element, (None, None)) - if tmp is None: - break - else: - k = tmp - else: - raise ValueError('element is not a combination of iterable') - - n, _ = last(pool, default=(n, None)) - - # Python versiosn below 3.8 don't have math.comb - index = 1 - for i, j in enumerate(reversed(indexes), start=1): - j = n - j - if i <= j: - index += factorial(j) // (factorial(i) * factorial(j - i)) - - return factorial(n + 1) // (factorial(k + 1) * factorial(n - k)) - index - - -def permutation_index(element, iterable): - """Equivalent to ``list(permutations(iterable, r)).index(element)``` - - The subsequences of *iterable* that are of length *r* where order is - important can be ordered lexicographically. :func:`permutation_index` - computes the index of the first *element* directly, without computing - the previous permutations. - - >>> permutation_index([1, 3, 2], range(5)) - 19 - - ``ValueError`` will be raised if the given *element* isn't one of the - permutations of *iterable*. - """ - index = 0 - pool = list(iterable) - for i, x in zip(range(len(pool), -1, -1), element): - r = pool.index(x) - index = index * i + r - del pool[r] - - return index - - -class countable: - """Wrap *iterable* and keep a count of how many items have been consumed. - - The ``items_seen`` attribute starts at ``0`` and increments as the iterable - is consumed: - - >>> iterable = map(str, range(10)) - >>> it = countable(iterable) - >>> it.items_seen - 0 - >>> next(it), next(it) - ('0', '1') - >>> list(it) - ['2', '3', '4', '5', '6', '7', '8', '9'] - >>> it.items_seen - 10 - """ - - def __init__(self, iterable): - self._it = iter(iterable) - self.items_seen = 0 - - def __iter__(self): - return self - - def __next__(self): - item = next(self._it) - self.items_seen += 1 - - return item diff --git a/spaces/plzdontcry/dakubettergpt/src/components/PromptLibraryMenu/ExportPrompt.tsx b/spaces/plzdontcry/dakubettergpt/src/components/PromptLibraryMenu/ExportPrompt.tsx deleted file mode 100644 index f4856e9d5d1823062a900c5dacd95c1abf08a127..0000000000000000000000000000000000000000 --- a/spaces/plzdontcry/dakubettergpt/src/components/PromptLibraryMenu/ExportPrompt.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import React from 'react'; -import { useTranslation } from 'react-i18next'; -import useStore from '@store/store'; -import { exportPrompts } from '@utils/prompt'; - -const ExportPrompt = () => { - const { t } = useTranslation(); - const prompts = useStore.getState().prompts; - - return ( -
        -
        - {t('export')} (CSV) -
        - -
        - ); -}; - -export default ExportPrompt; diff --git a/spaces/portal/Control-Net-Video/style.css b/spaces/portal/Control-Net-Video/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/portal/Control-Net-Video/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/power2/JoJoGan-powerhow2/e4e/datasets/gt_res_dataset.py b/spaces/power2/JoJoGan-powerhow2/e4e/datasets/gt_res_dataset.py deleted file mode 100644 index c0beacfee5335aa10aa7e8b7cabe206d7f9a56f7..0000000000000000000000000000000000000000 --- a/spaces/power2/JoJoGan-powerhow2/e4e/datasets/gt_res_dataset.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/python -# encoding: utf-8 -import os -from torch.utils.data import Dataset -from PIL import Image -import torch - -class GTResDataset(Dataset): - - def __init__(self, root_path, gt_dir=None, transform=None, transform_train=None): - self.pairs = [] - for f in os.listdir(root_path): - image_path = os.path.join(root_path, f) - gt_path = os.path.join(gt_dir, f) - if f.endswith(".jpg") or f.endswith(".png"): - self.pairs.append([image_path, gt_path.replace('.png', '.jpg'), None]) - self.transform = transform - self.transform_train = transform_train - - def __len__(self): - return len(self.pairs) - - def __getitem__(self, index): - from_path, to_path, _ = self.pairs[index] - from_im = Image.open(from_path).convert('RGB') - to_im = Image.open(to_path).convert('RGB') - - if self.transform: - to_im = self.transform(to_im) - from_im = self.transform(from_im) - - return from_im, to_im diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/colorama/tests/initialise_test.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/colorama/tests/initialise_test.py deleted file mode 100644 index 89f9b07511c8fee74686d9cc434bf66345a46d6d..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/colorama/tests/initialise_test.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. -import sys -from unittest import TestCase, main, skipUnless - -try: - from unittest.mock import patch, Mock -except ImportError: - from mock import patch, Mock - -from ..ansitowin32 import StreamWrapper -from ..initialise import init, just_fix_windows_console, _wipe_internal_state_for_tests -from .utils import osname, replace_by - -orig_stdout = sys.stdout -orig_stderr = sys.stderr - - -class InitTest(TestCase): - - @skipUnless(sys.stdout.isatty(), "sys.stdout is not a tty") - def setUp(self): - # sanity check - self.assertNotWrapped() - - def tearDown(self): - _wipe_internal_state_for_tests() - sys.stdout = orig_stdout - sys.stderr = orig_stderr - - def assertWrapped(self): - self.assertIsNot(sys.stdout, orig_stdout, 'stdout should be wrapped') - self.assertIsNot(sys.stderr, orig_stderr, 'stderr should be wrapped') - self.assertTrue(isinstance(sys.stdout, StreamWrapper), - 'bad stdout wrapper') - self.assertTrue(isinstance(sys.stderr, StreamWrapper), - 'bad stderr wrapper') - - def assertNotWrapped(self): - self.assertIs(sys.stdout, orig_stdout, 'stdout should not be wrapped') - self.assertIs(sys.stderr, orig_stderr, 'stderr should not be wrapped') - - @patch('colorama.initialise.reset_all') - @patch('colorama.ansitowin32.winapi_test', lambda *_: True) - @patch('colorama.ansitowin32.enable_vt_processing', lambda *_: False) - def testInitWrapsOnWindows(self, _): - with osname("nt"): - init() - self.assertWrapped() - - @patch('colorama.initialise.reset_all') - @patch('colorama.ansitowin32.winapi_test', lambda *_: False) - def testInitDoesntWrapOnEmulatedWindows(self, _): - with osname("nt"): - init() - self.assertNotWrapped() - - def testInitDoesntWrapOnNonWindows(self): - with osname("posix"): - init() - self.assertNotWrapped() - - def testInitDoesntWrapIfNone(self): - with replace_by(None): - init() - # We can't use assertNotWrapped here because replace_by(None) - # changes stdout/stderr already. - self.assertIsNone(sys.stdout) - self.assertIsNone(sys.stderr) - - def testInitAutoresetOnWrapsOnAllPlatforms(self): - with osname("posix"): - init(autoreset=True) - self.assertWrapped() - - def testInitWrapOffDoesntWrapOnWindows(self): - with osname("nt"): - init(wrap=False) - self.assertNotWrapped() - - def testInitWrapOffIncompatibleWithAutoresetOn(self): - self.assertRaises(ValueError, lambda: init(autoreset=True, wrap=False)) - - @patch('colorama.win32.SetConsoleTextAttribute') - @patch('colorama.initialise.AnsiToWin32') - def testAutoResetPassedOn(self, mockATW32, _): - with osname("nt"): - init(autoreset=True) - self.assertEqual(len(mockATW32.call_args_list), 2) - self.assertEqual(mockATW32.call_args_list[1][1]['autoreset'], True) - self.assertEqual(mockATW32.call_args_list[0][1]['autoreset'], True) - - @patch('colorama.initialise.AnsiToWin32') - def testAutoResetChangeable(self, mockATW32): - with osname("nt"): - init() - - init(autoreset=True) - self.assertEqual(len(mockATW32.call_args_list), 4) - self.assertEqual(mockATW32.call_args_list[2][1]['autoreset'], True) - self.assertEqual(mockATW32.call_args_list[3][1]['autoreset'], True) - - init() - self.assertEqual(len(mockATW32.call_args_list), 6) - self.assertEqual( - mockATW32.call_args_list[4][1]['autoreset'], False) - self.assertEqual( - mockATW32.call_args_list[5][1]['autoreset'], False) - - - @patch('colorama.initialise.atexit.register') - def testAtexitRegisteredOnlyOnce(self, mockRegister): - init() - self.assertTrue(mockRegister.called) - mockRegister.reset_mock() - init() - self.assertFalse(mockRegister.called) - - -class JustFixWindowsConsoleTest(TestCase): - def _reset(self): - _wipe_internal_state_for_tests() - sys.stdout = orig_stdout - sys.stderr = orig_stderr - - def tearDown(self): - self._reset() - - @patch("colorama.ansitowin32.winapi_test", lambda: True) - def testJustFixWindowsConsole(self): - if sys.platform != "win32": - # just_fix_windows_console should be a no-op - just_fix_windows_console() - self.assertIs(sys.stdout, orig_stdout) - self.assertIs(sys.stderr, orig_stderr) - else: - def fake_std(): - # Emulate stdout=not a tty, stderr=tty - # to check that we handle both cases correctly - stdout = Mock() - stdout.closed = False - stdout.isatty.return_value = False - stdout.fileno.return_value = 1 - sys.stdout = stdout - - stderr = Mock() - stderr.closed = False - stderr.isatty.return_value = True - stderr.fileno.return_value = 2 - sys.stderr = stderr - - for native_ansi in [False, True]: - with patch( - 'colorama.ansitowin32.enable_vt_processing', - lambda *_: native_ansi - ): - self._reset() - fake_std() - - # Regular single-call test - prev_stdout = sys.stdout - prev_stderr = sys.stderr - just_fix_windows_console() - self.assertIs(sys.stdout, prev_stdout) - if native_ansi: - self.assertIs(sys.stderr, prev_stderr) - else: - self.assertIsNot(sys.stderr, prev_stderr) - - # second call without resetting is always a no-op - prev_stdout = sys.stdout - prev_stderr = sys.stderr - just_fix_windows_console() - self.assertIs(sys.stdout, prev_stdout) - self.assertIs(sys.stderr, prev_stderr) - - self._reset() - fake_std() - - # If init() runs first, just_fix_windows_console should be a no-op - init() - prev_stdout = sys.stdout - prev_stderr = sys.stderr - just_fix_windows_console() - self.assertIs(prev_stdout, sys.stdout) - self.assertIs(prev_stderr, sys.stderr) - - -if __name__ == '__main__': - main() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/misc/treeTools.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/misc/treeTools.py deleted file mode 100644 index 24e10ba5b19ef41d56a552527680a4c73503cc3c..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/misc/treeTools.py +++ /dev/null @@ -1,45 +0,0 @@ -"""Generic tools for working with trees.""" - -from math import ceil, log - - -def build_n_ary_tree(leaves, n): - """Build N-ary tree from sequence of leaf nodes. - - Return a list of lists where each non-leaf node is a list containing - max n nodes. - """ - if not leaves: - return [] - - assert n > 1 - - depth = ceil(log(len(leaves), n)) - - if depth <= 1: - return list(leaves) - - # Fully populate complete subtrees of root until we have enough leaves left - root = [] - unassigned = None - full_step = n ** (depth - 1) - for i in range(0, len(leaves), full_step): - subtree = leaves[i : i + full_step] - if len(subtree) < full_step: - unassigned = subtree - break - while len(subtree) > n: - subtree = [subtree[k : k + n] for k in range(0, len(subtree), n)] - root.append(subtree) - - if unassigned: - # Recurse to fill the last subtree, which is the only partially populated one - subtree = build_n_ary_tree(unassigned, n) - if len(subtree) <= n - len(root): - # replace last subtree with its children if they can still fit - root.extend(subtree) - else: - root.append(subtree) - assert len(root) <= n - - return root diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/voltLib/ast.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/voltLib/ast.py deleted file mode 100644 index 82c2cca8b7f350bbf2ee579b0978937c22331a2f..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/voltLib/ast.py +++ /dev/null @@ -1,448 +0,0 @@ -from fontTools.voltLib.error import VoltLibError -from typing import NamedTuple - - -class Pos(NamedTuple): - adv: int - dx: int - dy: int - adv_adjust_by: dict - dx_adjust_by: dict - dy_adjust_by: dict - - def __str__(self): - res = " POS" - for attr in ("adv", "dx", "dy"): - value = getattr(self, attr) - if value is not None: - res += f" {attr.upper()} {value}" - adjust_by = getattr(self, f"{attr}_adjust_by", {}) - for size, adjustment in adjust_by.items(): - res += f" ADJUST_BY {adjustment} AT {size}" - res += " END_POS" - return res - - -class Element(object): - def __init__(self, location=None): - self.location = location - - def build(self, builder): - pass - - def __str__(self): - raise NotImplementedError - - -class Statement(Element): - pass - - -class Expression(Element): - pass - - -class VoltFile(Statement): - def __init__(self): - Statement.__init__(self, location=None) - self.statements = [] - - def build(self, builder): - for s in self.statements: - s.build(builder) - - def __str__(self): - return "\n" + "\n".join(str(s) for s in self.statements) + " END\n" - - -class GlyphDefinition(Statement): - def __init__(self, name, gid, gunicode, gtype, components, location=None): - Statement.__init__(self, location) - self.name = name - self.id = gid - self.unicode = gunicode - self.type = gtype - self.components = components - - def __str__(self): - res = f'DEF_GLYPH "{self.name}" ID {self.id}' - if self.unicode is not None: - if len(self.unicode) > 1: - unicodes = ",".join(f"U+{u:04X}" for u in self.unicode) - res += f' UNICODEVALUES "{unicodes}"' - else: - res += f" UNICODE {self.unicode[0]}" - if self.type is not None: - res += f" TYPE {self.type}" - if self.components is not None: - res += f" COMPONENTS {self.components}" - res += " END_GLYPH" - return res - - -class GroupDefinition(Statement): - def __init__(self, name, enum, location=None): - Statement.__init__(self, location) - self.name = name - self.enum = enum - self.glyphs_ = None - - def glyphSet(self, groups=None): - if groups is not None and self.name in groups: - raise VoltLibError( - 'Group "%s" contains itself.' % (self.name), self.location - ) - if self.glyphs_ is None: - if groups is None: - groups = set({self.name}) - else: - groups.add(self.name) - self.glyphs_ = self.enum.glyphSet(groups) - return self.glyphs_ - - def __str__(self): - enum = self.enum and str(self.enum) or "" - return f'DEF_GROUP "{self.name}"\n{enum}\nEND_GROUP' - - -class GlyphName(Expression): - """A single glyph name, such as cedilla.""" - - def __init__(self, glyph, location=None): - Expression.__init__(self, location) - self.glyph = glyph - - def glyphSet(self): - return (self.glyph,) - - def __str__(self): - return f' GLYPH "{self.glyph}"' - - -class Enum(Expression): - """An enum""" - - def __init__(self, enum, location=None): - Expression.__init__(self, location) - self.enum = enum - - def __iter__(self): - for e in self.glyphSet(): - yield e - - def glyphSet(self, groups=None): - glyphs = [] - for element in self.enum: - if isinstance(element, (GroupName, Enum)): - glyphs.extend(element.glyphSet(groups)) - else: - glyphs.extend(element.glyphSet()) - return tuple(glyphs) - - def __str__(self): - enum = "".join(str(e) for e in self.enum) - return f" ENUM{enum} END_ENUM" - - -class GroupName(Expression): - """A glyph group""" - - def __init__(self, group, parser, location=None): - Expression.__init__(self, location) - self.group = group - self.parser_ = parser - - def glyphSet(self, groups=None): - group = self.parser_.resolve_group(self.group) - if group is not None: - self.glyphs_ = group.glyphSet(groups) - return self.glyphs_ - else: - raise VoltLibError( - 'Group "%s" is used but undefined.' % (self.group), self.location - ) - - def __str__(self): - return f' GROUP "{self.group}"' - - -class Range(Expression): - """A glyph range""" - - def __init__(self, start, end, parser, location=None): - Expression.__init__(self, location) - self.start = start - self.end = end - self.parser = parser - - def glyphSet(self): - return tuple(self.parser.glyph_range(self.start, self.end)) - - def __str__(self): - return f' RANGE "{self.start}" TO "{self.end}"' - - -class ScriptDefinition(Statement): - def __init__(self, name, tag, langs, location=None): - Statement.__init__(self, location) - self.name = name - self.tag = tag - self.langs = langs - - def __str__(self): - res = "DEF_SCRIPT" - if self.name is not None: - res += f' NAME "{self.name}"' - res += f' TAG "{self.tag}"\n\n' - for lang in self.langs: - res += f"{lang}" - res += "END_SCRIPT" - return res - - -class LangSysDefinition(Statement): - def __init__(self, name, tag, features, location=None): - Statement.__init__(self, location) - self.name = name - self.tag = tag - self.features = features - - def __str__(self): - res = "DEF_LANGSYS" - if self.name is not None: - res += f' NAME "{self.name}"' - res += f' TAG "{self.tag}"\n\n' - for feature in self.features: - res += f"{feature}" - res += "END_LANGSYS\n" - return res - - -class FeatureDefinition(Statement): - def __init__(self, name, tag, lookups, location=None): - Statement.__init__(self, location) - self.name = name - self.tag = tag - self.lookups = lookups - - def __str__(self): - res = f'DEF_FEATURE NAME "{self.name}" TAG "{self.tag}"\n' - res += " " + " ".join(f'LOOKUP "{l}"' for l in self.lookups) + "\n" - res += "END_FEATURE\n" - return res - - -class LookupDefinition(Statement): - def __init__( - self, - name, - process_base, - process_marks, - mark_glyph_set, - direction, - reversal, - comments, - context, - sub, - pos, - location=None, - ): - Statement.__init__(self, location) - self.name = name - self.process_base = process_base - self.process_marks = process_marks - self.mark_glyph_set = mark_glyph_set - self.direction = direction - self.reversal = reversal - self.comments = comments - self.context = context - self.sub = sub - self.pos = pos - - def __str__(self): - res = f'DEF_LOOKUP "{self.name}"' - res += f' {self.process_base and "PROCESS_BASE" or "SKIP_BASE"}' - if self.process_marks: - res += " PROCESS_MARKS " - if self.mark_glyph_set: - res += f'MARK_GLYPH_SET "{self.mark_glyph_set}"' - elif isinstance(self.process_marks, str): - res += f'"{self.process_marks}"' - else: - res += "ALL" - else: - res += " SKIP_MARKS" - if self.direction is not None: - res += f" DIRECTION {self.direction}" - if self.reversal: - res += " REVERSAL" - if self.comments is not None: - comments = self.comments.replace("\n", r"\n") - res += f'\nCOMMENTS "{comments}"' - if self.context: - res += "\n" + "\n".join(str(c) for c in self.context) - else: - res += "\nIN_CONTEXT\nEND_CONTEXT" - if self.sub: - res += f"\n{self.sub}" - if self.pos: - res += f"\n{self.pos}" - return res - - -class SubstitutionDefinition(Statement): - def __init__(self, mapping, location=None): - Statement.__init__(self, location) - self.mapping = mapping - - def __str__(self): - res = "AS_SUBSTITUTION\n" - for src, dst in self.mapping.items(): - src = "".join(str(s) for s in src) - dst = "".join(str(d) for d in dst) - res += f"SUB{src}\nWITH{dst}\nEND_SUB\n" - res += "END_SUBSTITUTION" - return res - - -class SubstitutionSingleDefinition(SubstitutionDefinition): - pass - - -class SubstitutionMultipleDefinition(SubstitutionDefinition): - pass - - -class SubstitutionLigatureDefinition(SubstitutionDefinition): - pass - - -class SubstitutionReverseChainingSingleDefinition(SubstitutionDefinition): - pass - - -class PositionAttachDefinition(Statement): - def __init__(self, coverage, coverage_to, location=None): - Statement.__init__(self, location) - self.coverage = coverage - self.coverage_to = coverage_to - - def __str__(self): - coverage = "".join(str(c) for c in self.coverage) - res = f"AS_POSITION\nATTACH{coverage}\nTO" - for coverage, anchor in self.coverage_to: - coverage = "".join(str(c) for c in coverage) - res += f'{coverage} AT ANCHOR "{anchor}"' - res += "\nEND_ATTACH\nEND_POSITION" - return res - - -class PositionAttachCursiveDefinition(Statement): - def __init__(self, coverages_exit, coverages_enter, location=None): - Statement.__init__(self, location) - self.coverages_exit = coverages_exit - self.coverages_enter = coverages_enter - - def __str__(self): - res = "AS_POSITION\nATTACH_CURSIVE" - for coverage in self.coverages_exit: - coverage = "".join(str(c) for c in coverage) - res += f"\nEXIT {coverage}" - for coverage in self.coverages_enter: - coverage = "".join(str(c) for c in coverage) - res += f"\nENTER {coverage}" - res += "\nEND_ATTACH\nEND_POSITION" - return res - - -class PositionAdjustPairDefinition(Statement): - def __init__(self, coverages_1, coverages_2, adjust_pair, location=None): - Statement.__init__(self, location) - self.coverages_1 = coverages_1 - self.coverages_2 = coverages_2 - self.adjust_pair = adjust_pair - - def __str__(self): - res = "AS_POSITION\nADJUST_PAIR\n" - for coverage in self.coverages_1: - coverage = " ".join(str(c) for c in coverage) - res += f" FIRST {coverage}" - res += "\n" - for coverage in self.coverages_2: - coverage = " ".join(str(c) for c in coverage) - res += f" SECOND {coverage}" - res += "\n" - for (id_1, id_2), (pos_1, pos_2) in self.adjust_pair.items(): - res += f" {id_1} {id_2} BY{pos_1}{pos_2}\n" - res += "\nEND_ADJUST\nEND_POSITION" - return res - - -class PositionAdjustSingleDefinition(Statement): - def __init__(self, adjust_single, location=None): - Statement.__init__(self, location) - self.adjust_single = adjust_single - - def __str__(self): - res = "AS_POSITION\nADJUST_SINGLE" - for coverage, pos in self.adjust_single: - coverage = "".join(str(c) for c in coverage) - res += f"{coverage} BY{pos}" - res += "\nEND_ADJUST\nEND_POSITION" - return res - - -class ContextDefinition(Statement): - def __init__(self, ex_or_in, left=None, right=None, location=None): - Statement.__init__(self, location) - self.ex_or_in = ex_or_in - self.left = left if left is not None else [] - self.right = right if right is not None else [] - - def __str__(self): - res = self.ex_or_in + "\n" - for coverage in self.left: - coverage = "".join(str(c) for c in coverage) - res += f" LEFT{coverage}\n" - for coverage in self.right: - coverage = "".join(str(c) for c in coverage) - res += f" RIGHT{coverage}\n" - res += "END_CONTEXT" - return res - - -class AnchorDefinition(Statement): - def __init__(self, name, gid, glyph_name, component, locked, pos, location=None): - Statement.__init__(self, location) - self.name = name - self.gid = gid - self.glyph_name = glyph_name - self.component = component - self.locked = locked - self.pos = pos - - def __str__(self): - locked = self.locked and " LOCKED" or "" - return ( - f'DEF_ANCHOR "{self.name}"' - f" ON {self.gid}" - f" GLYPH {self.glyph_name}" - f" COMPONENT {self.component}" - f"{locked}" - f" AT {self.pos} END_ANCHOR" - ) - - -class SettingDefinition(Statement): - def __init__(self, name, value, location=None): - Statement.__init__(self, location) - self.name = name - self.value = value - - def __str__(self): - if self.value is True: - return f"{self.name}" - if isinstance(self.value, (tuple, list)): - value = " ".join(str(v) for v in self.value) - return f"{self.name} {value}" - return f"{self.name} {self.value}" diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Example-d0f6c2cc.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Example-d0f6c2cc.js deleted file mode 100644 index d70be46712dda44840445d2714ede0a08920187f..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Example-d0f6c2cc.js +++ /dev/null @@ -1,2 +0,0 @@ -/* empty css */const{SvelteComponent:y,add_iframe_resize_listener:b,add_render_callback:m,append:v,attr:h,binding_callbacks:w,detach:z,element:p,init:k,insert:S,noop:f,safe_not_equal:q,set_data:C,text:E,toggle_class:_}=window.__gradio__svelte__internal,{onMount:M}=window.__gradio__svelte__internal;function P(t){let e,i,r;return{c(){e=p("div"),i=E(t[0]),h(e,"class","svelte-1viwdyg"),m(()=>t[5].call(e)),_(e,"table",t[1]==="table"),_(e,"gallery",t[1]==="gallery"),_(e,"selected",t[2])},m(l,s){S(l,e,s),v(e,i),r=b(e,t[5].bind(e)),t[6](e)},p(l,[s]){s&1&&C(i,l[0]),s&2&&_(e,"table",l[1]==="table"),s&2&&_(e,"gallery",l[1]==="gallery"),s&4&&_(e,"selected",l[2])},i:f,o:f,d(l){l&&z(e),r(),t[6](null)}}}function W(t,e,i){let{value:r}=e,{type:l}=e,{selected:s=!1}=e,d,a;function u(n,c){!n||!c||(a.style.setProperty("--local-text-width",`${c<150?c:200}px`),i(4,a.style.whiteSpace="unset",a))}M(()=>{u(a,d)});function o(){d=this.clientWidth,i(3,d)}function g(n){w[n?"unshift":"push"](()=>{a=n,i(4,a)})}return t.$$set=n=>{"value"in n&&i(0,r=n.value),"type"in n&&i(1,l=n.type),"selected"in n&&i(2,s=n.selected)},[r,l,s,d,a,o,g]}class A extends y{constructor(e){super(),k(this,e,W,P,q,{value:0,type:1,selected:2})}}export{A as default}; -//# sourceMappingURL=Example-d0f6c2cc.js.map diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-e94af8f4.css b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-e94af8f4.css deleted file mode 100644 index 1d01932deadcbb3b64dcaa622b91f038bc62fdce..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-e94af8f4.css +++ /dev/null @@ -1 +0,0 @@ -.load-wrap.svelte-1wi8on7{display:flex;justify-content:center;align-items:center;height:100%}.loader.svelte-1wi8on7{display:flex;position:relative;background-color:var(--border-color-accent-subdued);animation:svelte-1wi8on7-shadowPulse 2s linear infinite;box-shadow:-24px 0 var(--border-color-accent-subdued),24px 0 var(--border-color-accent-subdued);margin:var(--spacing-md);border-radius:50%;width:10px;height:10px;scale:.5}@keyframes svelte-1wi8on7-shadowPulse{33%{box-shadow:-24px 0 var(--border-color-accent-subdued),24px 0 #fff;background:#fff}66%{box-shadow:-24px 0 #fff,24px 0 #fff;background:var(--border-color-accent-subdued)}to{box-shadow:-24px 0 #fff,24px 0 var(--border-color-accent-subdued);background:#fff}}.container.svelte-1wi8on7{display:flex;flex-direction:column;align-items:center;justify-content:center;margin:var(--spacing-lg) var(--spacing-lg) 0 var(--spacing-lg)}#timeline.svelte-1wi8on7{display:flex;height:var(--size-10);flex:1;position:relative}img.svelte-1wi8on7{flex:1 1 auto;min-width:0;object-fit:cover;height:var(--size-12);border:1px solid var(--block-border-color);user-select:none;z-index:1}.handle.svelte-1wi8on7{width:3px;background-color:var(--color-accent);cursor:ew-resize;height:var(--size-12);z-index:3;position:absolute}.opaque-layer.svelte-1wi8on7{background-color:#e6672840;border:1px solid var(--color-accent);height:var(--size-12);position:absolute;z-index:2}.container.svelte-sxyn79.svelte-sxyn79{width:100%}time.svelte-sxyn79.svelte-sxyn79{color:var(--color-accent);font-weight:700;padding-left:var(--spacing-xs)}.timeline-wrapper.svelte-sxyn79.svelte-sxyn79{display:flex;align-items:center;justify-content:center;width:100%}.settings-wrapper.svelte-sxyn79.svelte-sxyn79{display:flex;justify-self:self-end}.text-button.svelte-sxyn79.svelte-sxyn79{border:1px solid var(--neutral-400);border-radius:var(--radius-sm);font-weight:300;font-size:var(--size-3);text-align:center;color:var(--neutral-400);height:var(--size-5);font-weight:700;padding:0 5px;margin-left:5px}.hidden.svelte-sxyn79.svelte-sxyn79{display:none}.text-button.svelte-sxyn79.svelte-sxyn79:hover,.text-button.svelte-sxyn79.svelte-sxyn79:focus{color:var(--color-accent);border-color:var(--color-accent)}.controls.svelte-sxyn79.svelte-sxyn79{display:grid;grid-template-columns:1fr 1fr;margin:var(--spacing-lg);overflow:hidden;text-align:left}@media (max-width: 320px){.controls.svelte-sxyn79.svelte-sxyn79{display:flex;flex-wrap:wrap}.controls.svelte-sxyn79 .svelte-sxyn79{margin:var(--spacing-sm)}.controls.svelte-sxyn79 .text-button.svelte-sxyn79{margin-left:0}}.action.svelte-sxyn79.svelte-sxyn79{width:var(--size-5);color:var(--neutral-400);margin-left:var(--spacing-md)}.action.svelte-sxyn79.svelte-sxyn79:disabled{cursor:not-allowed;color:var(--border-color-accent-subdued)}.action.svelte-sxyn79.svelte-sxyn79:disabled:hover{color:var(--border-color-accent-subdued)}.icon.svelte-sxyn79.svelte-sxyn79:hover,.icon.svelte-sxyn79.svelte-sxyn79:focus{color:var(--color-accent)}.container.svelte-sxyn79.svelte-sxyn79{display:flex;flex-direction:column}span.svelte-1i3qraf.svelte-1i3qraf{text-shadow:0 0 8px rgba(0,0,0,.5)}progress.svelte-1i3qraf.svelte-1i3qraf{margin-right:var(--size-3);border-radius:var(--radius-sm);width:var(--size-full);height:var(--size-2)}progress.svelte-1i3qraf.svelte-1i3qraf::-webkit-progress-bar{border-radius:2px;background-color:#fff3;overflow:hidden}progress.svelte-1i3qraf.svelte-1i3qraf::-webkit-progress-value{background-color:#ffffffe6}.mirror.svelte-1i3qraf.svelte-1i3qraf{transform:scaleX(-1)}.controls.svelte-1i3qraf.svelte-1i3qraf{position:absolute;bottom:0;opacity:0;transition:.5s;margin:var(--size-2);border-radius:var(--radius-md);background:var(--color-grey-800);padding:var(--size-2) var(--size-1);width:calc(100% - .75rem);width:calc(100% - var(--size-2) * 2)}.wrap.svelte-1i3qraf:hover .controls.svelte-1i3qraf{opacity:1}.inner.svelte-1i3qraf.svelte-1i3qraf{display:flex;justify-content:space-between;align-items:center;padding-right:var(--size-2);padding-left:var(--size-2);width:var(--size-full);height:var(--size-full)}.icon.svelte-1i3qraf.svelte-1i3qraf{display:flex;justify-content:center;cursor:pointer;width:var(--size-6);color:#fff}.time.svelte-1i3qraf.svelte-1i3qraf{flex-shrink:0;margin-right:var(--size-3);margin-left:var(--size-3);color:#fff;font-size:var(--text-sm);font-family:var(--font-mono)}.wrap.svelte-1i3qraf.svelte-1i3qraf{position:relative;background-color:var(--background-fill-secondary);height:var(--size-full);width:var(--size-full);border-radius:var(--radius-xl)}.file-name.svelte-1ipnm0o{padding:var(--size-6);font-size:var(--text-xxl);word-break:break-all}.file-size.svelte-1ipnm0o{padding:var(--size-2);font-size:var(--text-xl)}.source-selection.svelte-1ipnm0o{display:flex;align-items:center;justify-content:center;border-top:1px solid var(--border-color-primary);width:95%;margin:0 auto}.icon.svelte-1ipnm0o{width:22px;height:22px;margin:var(--spacing-lg) var(--spacing-xs);padding:var(--spacing-xs);color:var(--neutral-400);border-radius:var(--radius-md)}.icon.svelte-1ipnm0o:hover,.icon.svelte-1ipnm0o:focus{color:var(--color-accent)}.icon-buttons.svelte-rvdo70{display:flex;position:absolute;top:6px;right:6px;gap:var(--size-1)} diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-8fd13499.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-8fd13499.js deleted file mode 100644 index dd9019b385f2f88c62440c40702ffc083d324d7a..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-8fd13499.js +++ /dev/null @@ -1,2 +0,0 @@ -import{B as O}from"./Button-8eeccca1.js";import{B as Q}from"./BlockTitle-7572070c.js";import{S as R}from"./Index-c74a8b7c.js";import{default as Te}from"./Example-254ceac2.js";import"./index-50ad4c77.js";import"./svelte/svelte.js";import"./Info-ba93744d.js";const{SvelteComponent:V,append:P,attr:v,bubble:j,create_component:W,destroy_component:X,detach:A,element:z,init:Y,insert:F,listen:w,mount_component:Z,run_all:p,safe_not_equal:y,set_data:x,set_input_value:D,space:$,text:ee,transition_in:te,transition_out:ne}=window.__gradio__svelte__internal,{createEventDispatcher:ie,afterUpdate:le}=window.__gradio__svelte__internal;function se(n){let e;return{c(){e=ee(n[1])},m(t,i){F(t,e,i)},p(t,i){i&2&&x(e,t[1])},d(t){t&&A(e)}}}function ae(n){let e,t,i,s,o,h,b;return t=new Q({props:{show_label:n[4],info:n[2],$$slots:{default:[se]},$$scope:{ctx:n}}}),{c(){e=z("label"),W(t.$$.fragment),i=$(),s=z("input"),v(s,"type","color"),s.disabled=n[3],v(s,"class","svelte-56zyyb"),v(e,"class","block")},m(u,r){F(u,e,r),Z(t,e,null),P(e,i),P(e,s),D(s,n[0]),o=!0,h||(b=[w(s,"input",n[8]),w(s,"focus",n[6]),w(s,"blur",n[7])],h=!0)},p(u,[r]){const d={};r&16&&(d.show_label=u[4]),r&4&&(d.info=u[2]),r&2050&&(d.$$scope={dirty:r,ctx:u}),t.$set(d),(!o||r&8)&&(s.disabled=u[3]),r&1&&D(s,u[0])},i(u){o||(te(t.$$.fragment,u),o=!0)},o(u){ne(t.$$.fragment,u),o=!1},d(u){u&&A(e),X(t),h=!1,p(b)}}}function ue(n,e,t){let{value:i="#000000"}=e,{value_is_output:s=!1}=e,{label:o}=e,{info:h=void 0}=e,{disabled:b=!1}=e,{show_label:u=!0}=e;const r=ie();function d(){r("change",i),s||r("input")}le(()=>{t(5,s=!1)});function g(_){j.call(this,n,_)}function l(_){j.call(this,n,_)}function c(){i=this.value,t(0,i)}return n.$$set=_=>{"value"in _&&t(0,i=_.value),"value_is_output"in _&&t(5,s=_.value_is_output),"label"in _&&t(1,o=_.label),"info"in _&&t(2,h=_.info),"disabled"in _&&t(3,b=_.disabled),"show_label"in _&&t(4,u=_.show_label)},n.$$.update=()=>{n.$$.dirty&1&&d()},[i,o,h,b,u,s,g,l,c]}class oe extends V{constructor(e){super(),Y(this,e,ue,ae,y,{value:0,value_is_output:5,label:1,info:2,disabled:3,show_label:4})}}const _e=oe,{SvelteComponent:ce,add_flush_callback:I,assign:re,bind:T,binding_callbacks:U,create_component:k,destroy_component:C,detach:fe,flush:m,get_spread_object:he,get_spread_update:me,init:be,insert:de,mount_component:B,safe_not_equal:ge,space:ve,transition_in:S,transition_out:q}=window.__gradio__svelte__internal;function we(n){let e,t,i,s,o,h;const b=[{autoscroll:n[12].autoscroll},{i18n:n[12].i18n},n[11]];let u={};for(let l=0;lT(i,"value",r)),U.push(()=>T(i,"value_is_output",d)),i.$on("change",n[16]),i.$on("input",n[17]),i.$on("submit",n[18]),i.$on("blur",n[19]),i.$on("focus",n[20]),{c(){k(e.$$.fragment),t=ve(),k(i.$$.fragment)},m(l,c){B(e,l,c),de(l,t,c),B(i,l,c),h=!0},p(l,c){const _=c&6144?me(b,[c&4096&&{autoscroll:l[12].autoscroll},c&4096&&{i18n:l[12].i18n},c&2048&&he(l[11])]):{};e.$set(_);const f={};c&4&&(f.label=l[2]),c&8&&(f.info=l[3]),c&128&&(f.show_label=l[7]),c&8192&&(f.disabled=!l[13]),!s&&c&1&&(s=!0,f.value=l[0],I(()=>s=!1)),!o&&c&2&&(o=!0,f.value_is_output=l[1],I(()=>o=!1)),i.$set(f)},i(l){h||(S(e.$$.fragment,l),S(i.$$.fragment,l),h=!0)},o(l){q(e.$$.fragment,l),q(i.$$.fragment,l),h=!1},d(l){l&&fe(t),C(e,l),C(i,l)}}}function ke(n){let e,t;return e=new O({props:{visible:n[6],elem_id:n[4],elem_classes:n[5],container:n[8],scale:n[9],min_width:n[10],$$slots:{default:[we]},$$scope:{ctx:n}}}),{c(){k(e.$$.fragment)},m(i,s){B(e,i,s),t=!0},p(i,[s]){const o={};s&64&&(o.visible=i[6]),s&16&&(o.elem_id=i[4]),s&32&&(o.elem_classes=i[5]),s&256&&(o.container=i[8]),s&512&&(o.scale=i[9]),s&1024&&(o.min_width=i[10]),s&2111631&&(o.$$scope={dirty:s,ctx:i}),e.$set(o)},i(i){t||(S(e.$$.fragment,i),t=!0)},o(i){q(e.$$.fragment,i),t=!1},d(i){C(e,i)}}}function Ce(n,e,t){let{label:i="ColorPicker"}=e,{info:s=void 0}=e,{elem_id:o=""}=e,{elem_classes:h=[]}=e,{visible:b=!0}=e,{value:u}=e,{value_is_output:r=!1}=e,{show_label:d}=e,{container:g=!0}=e,{scale:l=null}=e,{min_width:c=void 0}=e,{loading_status:_}=e,{gradio:f}=e,{interactive:E}=e;function G(a){u=a,t(0,u)}function H(a){r=a,t(1,r)}const J=()=>f.dispatch("change"),K=()=>f.dispatch("input"),L=()=>f.dispatch("submit"),M=()=>f.dispatch("blur"),N=()=>f.dispatch("focus");return n.$$set=a=>{"label"in a&&t(2,i=a.label),"info"in a&&t(3,s=a.info),"elem_id"in a&&t(4,o=a.elem_id),"elem_classes"in a&&t(5,h=a.elem_classes),"visible"in a&&t(6,b=a.visible),"value"in a&&t(0,u=a.value),"value_is_output"in a&&t(1,r=a.value_is_output),"show_label"in a&&t(7,d=a.show_label),"container"in a&&t(8,g=a.container),"scale"in a&&t(9,l=a.scale),"min_width"in a&&t(10,c=a.min_width),"loading_status"in a&&t(11,_=a.loading_status),"gradio"in a&&t(12,f=a.gradio),"interactive"in a&&t(13,E=a.interactive)},[u,r,i,s,o,h,b,d,g,l,c,_,f,E,G,H,J,K,L,M,N]}class ze extends ce{constructor(e){super(),be(this,e,Ce,ke,ge,{label:2,info:3,elem_id:4,elem_classes:5,visible:6,value:0,value_is_output:1,show_label:7,container:8,scale:9,min_width:10,loading_status:11,gradio:12,interactive:13})}get label(){return this.$$.ctx[2]}set label(e){this.$$set({label:e}),m()}get info(){return this.$$.ctx[3]}set info(e){this.$$set({info:e}),m()}get elem_id(){return this.$$.ctx[4]}set elem_id(e){this.$$set({elem_id:e}),m()}get elem_classes(){return this.$$.ctx[5]}set elem_classes(e){this.$$set({elem_classes:e}),m()}get visible(){return this.$$.ctx[6]}set visible(e){this.$$set({visible:e}),m()}get value(){return this.$$.ctx[0]}set value(e){this.$$set({value:e}),m()}get value_is_output(){return this.$$.ctx[1]}set value_is_output(e){this.$$set({value_is_output:e}),m()}get show_label(){return this.$$.ctx[7]}set show_label(e){this.$$set({show_label:e}),m()}get container(){return this.$$.ctx[8]}set container(e){this.$$set({container:e}),m()}get scale(){return this.$$.ctx[9]}set scale(e){this.$$set({scale:e}),m()}get min_width(){return this.$$.ctx[10]}set min_width(e){this.$$set({min_width:e}),m()}get loading_status(){return this.$$.ctx[11]}set loading_status(e){this.$$set({loading_status:e}),m()}get gradio(){return this.$$.ctx[12]}set gradio(e){this.$$set({gradio:e}),m()}get interactive(){return this.$$.ctx[13]}set interactive(e){this.$$set({interactive:e}),m()}}export{_e as BaseColorPicker,Te as BaseExample,ze as default}; -//# sourceMappingURL=Index-8fd13499.js.map diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/openai/openai_response.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/openai/openai_response.py deleted file mode 100644 index d2230b1540923f2abe7cbc91b6db4b935f240343..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/openai/openai_response.py +++ /dev/null @@ -1,31 +0,0 @@ -from typing import Optional - - -class OpenAIResponse: - def __init__(self, data, headers): - self._headers = headers - self.data = data - - @property - def request_id(self) -> Optional[str]: - return self._headers.get("request-id") - - @property - def retry_after(self) -> Optional[int]: - try: - return int(self._headers.get("retry-after")) - except TypeError: - return None - - @property - def operation_location(self) -> Optional[str]: - return self._headers.get("operation-location") - - @property - def organization(self) -> Optional[str]: - return self._headers.get("OpenAI-Organization") - - @property - def response_ms(self) -> Optional[int]: - h = self._headers.get("Openai-Processing-Ms") - return None if h is None else round(float(h)) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/datetimes/test_reductions.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/datetimes/test_reductions.py deleted file mode 100644 index 59a4443ac9e19f357f5136bfd4b905443ecfe732..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/datetimes/test_reductions.py +++ /dev/null @@ -1,183 +0,0 @@ -import numpy as np -import pytest - -from pandas.core.dtypes.dtypes import DatetimeTZDtype - -import pandas as pd -from pandas import NaT -import pandas._testing as tm -from pandas.core.arrays import DatetimeArray - - -class TestReductions: - @pytest.fixture(params=["s", "ms", "us", "ns"]) - def unit(self, request): - return request.param - - @pytest.fixture - def arr1d(self, tz_naive_fixture): - """Fixture returning DatetimeArray with parametrized timezones""" - tz = tz_naive_fixture - dtype = DatetimeTZDtype(tz=tz) if tz is not None else np.dtype("M8[ns]") - arr = DatetimeArray._from_sequence( - [ - "2000-01-03", - "2000-01-03", - "NaT", - "2000-01-02", - "2000-01-05", - "2000-01-04", - ], - dtype=dtype, - ) - return arr - - def test_min_max(self, arr1d, unit): - arr = arr1d - arr = arr.as_unit(unit) - tz = arr.tz - - result = arr.min() - expected = pd.Timestamp("2000-01-02", tz=tz).as_unit(unit) - assert result == expected - assert result.unit == expected.unit - - result = arr.max() - expected = pd.Timestamp("2000-01-05", tz=tz).as_unit(unit) - assert result == expected - assert result.unit == expected.unit - - result = arr.min(skipna=False) - assert result is NaT - - result = arr.max(skipna=False) - assert result is NaT - - @pytest.mark.parametrize("tz", [None, "US/Central"]) - @pytest.mark.parametrize("skipna", [True, False]) - def test_min_max_empty(self, skipna, tz): - dtype = DatetimeTZDtype(tz=tz) if tz is not None else np.dtype("M8[ns]") - arr = DatetimeArray._from_sequence([], dtype=dtype) - result = arr.min(skipna=skipna) - assert result is NaT - - result = arr.max(skipna=skipna) - assert result is NaT - - @pytest.mark.parametrize("tz", [None, "US/Central"]) - @pytest.mark.parametrize("skipna", [True, False]) - def test_median_empty(self, skipna, tz): - dtype = DatetimeTZDtype(tz=tz) if tz is not None else np.dtype("M8[ns]") - arr = DatetimeArray._from_sequence([], dtype=dtype) - result = arr.median(skipna=skipna) - assert result is NaT - - arr = arr.reshape(0, 3) - result = arr.median(axis=0, skipna=skipna) - expected = type(arr)._from_sequence([NaT, NaT, NaT], dtype=arr.dtype) - tm.assert_equal(result, expected) - - result = arr.median(axis=1, skipna=skipna) - expected = type(arr)._from_sequence([], dtype=arr.dtype) - tm.assert_equal(result, expected) - - def test_median(self, arr1d): - arr = arr1d - - result = arr.median() - assert result == arr[0] - result = arr.median(skipna=False) - assert result is NaT - - result = arr.dropna().median(skipna=False) - assert result == arr[0] - - result = arr.median(axis=0) - assert result == arr[0] - - def test_median_axis(self, arr1d): - arr = arr1d - assert arr.median(axis=0) == arr.median() - assert arr.median(axis=0, skipna=False) is NaT - - msg = r"abs\(axis\) must be less than ndim" - with pytest.raises(ValueError, match=msg): - arr.median(axis=1) - - @pytest.mark.filterwarnings("ignore:All-NaN slice encountered:RuntimeWarning") - def test_median_2d(self, arr1d): - arr = arr1d.reshape(1, -1) - - # axis = None - assert arr.median() == arr1d.median() - assert arr.median(skipna=False) is NaT - - # axis = 0 - result = arr.median(axis=0) - expected = arr1d - tm.assert_equal(result, expected) - - # Since column 3 is all-NaT, we get NaT there with or without skipna - result = arr.median(axis=0, skipna=False) - expected = arr1d - tm.assert_equal(result, expected) - - # axis = 1 - result = arr.median(axis=1) - expected = type(arr)._from_sequence([arr1d.median()]) - tm.assert_equal(result, expected) - - result = arr.median(axis=1, skipna=False) - expected = type(arr)._from_sequence([NaT], dtype=arr.dtype) - tm.assert_equal(result, expected) - - def test_mean(self, arr1d): - arr = arr1d - - # manually verified result - expected = arr[0] + 0.4 * pd.Timedelta(days=1) - - result = arr.mean() - assert result == expected - result = arr.mean(skipna=False) - assert result is NaT - - result = arr.dropna().mean(skipna=False) - assert result == expected - - result = arr.mean(axis=0) - assert result == expected - - def test_mean_2d(self): - dti = pd.date_range("2016-01-01", periods=6, tz="US/Pacific") - dta = dti._data.reshape(3, 2) - - result = dta.mean(axis=0) - expected = dta[1] - tm.assert_datetime_array_equal(result, expected) - - result = dta.mean(axis=1) - expected = dta[:, 0] + pd.Timedelta(hours=12) - tm.assert_datetime_array_equal(result, expected) - - result = dta.mean(axis=None) - expected = dti.mean() - assert result == expected - - @pytest.mark.parametrize("skipna", [True, False]) - def test_mean_empty(self, arr1d, skipna): - arr = arr1d[:0] - - assert arr.mean(skipna=skipna) is NaT - - arr2d = arr.reshape(0, 3) - result = arr2d.mean(axis=0, skipna=skipna) - expected = DatetimeArray._from_sequence([NaT, NaT, NaT], dtype=arr.dtype) - tm.assert_datetime_array_equal(result, expected) - - result = arr2d.mean(axis=1, skipna=skipna) - expected = arr # i.e. 1D, empty - tm.assert_datetime_array_equal(result, expected) - - result = arr2d.mean(axis=None, skipna=skipna) - assert result is NaT diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_tz_convert.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_tz_convert.py deleted file mode 100644 index 8a484abaab54cbd104acbaf4bb3273f4e41ff55a..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_tz_convert.py +++ /dev/null @@ -1,131 +0,0 @@ -import numpy as np -import pytest - -from pandas import ( - DataFrame, - Index, - MultiIndex, - Series, - date_range, -) -import pandas._testing as tm - - -class TestTZConvert: - def test_tz_convert(self, frame_or_series): - rng = date_range("1/1/2011", periods=200, freq="D", tz="US/Eastern") - - obj = DataFrame({"a": 1}, index=rng) - obj = tm.get_obj(obj, frame_or_series) - - result = obj.tz_convert("Europe/Berlin") - expected = DataFrame({"a": 1}, rng.tz_convert("Europe/Berlin")) - expected = tm.get_obj(expected, frame_or_series) - - assert result.index.tz.zone == "Europe/Berlin" - tm.assert_equal(result, expected) - - def test_tz_convert_axis1(self): - rng = date_range("1/1/2011", periods=200, freq="D", tz="US/Eastern") - - obj = DataFrame({"a": 1}, index=rng) - - obj = obj.T - result = obj.tz_convert("Europe/Berlin", axis=1) - assert result.columns.tz.zone == "Europe/Berlin" - - expected = DataFrame({"a": 1}, rng.tz_convert("Europe/Berlin")) - - tm.assert_equal(result, expected.T) - - def test_tz_convert_naive(self, frame_or_series): - # can't convert tz-naive - rng = date_range("1/1/2011", periods=200, freq="D") - ts = Series(1, index=rng) - ts = frame_or_series(ts) - - with pytest.raises(TypeError, match="Cannot convert tz-naive"): - ts.tz_convert("US/Eastern") - - @pytest.mark.parametrize("fn", ["tz_localize", "tz_convert"]) - def test_tz_convert_and_localize(self, fn): - l0 = date_range("20140701", periods=5, freq="D") - l1 = date_range("20140701", periods=5, freq="D") - - int_idx = Index(range(5)) - - if fn == "tz_convert": - l0 = l0.tz_localize("UTC") - l1 = l1.tz_localize("UTC") - - for idx in [l0, l1]: - l0_expected = getattr(idx, fn)("US/Pacific") - l1_expected = getattr(idx, fn)("US/Pacific") - - df1 = DataFrame(np.ones(5), index=l0) - df1 = getattr(df1, fn)("US/Pacific") - tm.assert_index_equal(df1.index, l0_expected) - - # MultiIndex - # GH7846 - df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1])) - - # freq is not preserved in MultiIndex construction - l1_expected = l1_expected._with_freq(None) - l0_expected = l0_expected._with_freq(None) - l1 = l1._with_freq(None) - l0 = l0._with_freq(None) - - df3 = getattr(df2, fn)("US/Pacific", level=0) - assert not df3.index.levels[0].equals(l0) - tm.assert_index_equal(df3.index.levels[0], l0_expected) - tm.assert_index_equal(df3.index.levels[1], l1) - assert not df3.index.levels[1].equals(l1_expected) - - df3 = getattr(df2, fn)("US/Pacific", level=1) - tm.assert_index_equal(df3.index.levels[0], l0) - assert not df3.index.levels[0].equals(l0_expected) - tm.assert_index_equal(df3.index.levels[1], l1_expected) - assert not df3.index.levels[1].equals(l1) - - df4 = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0])) - - # TODO: untested - getattr(df4, fn)("US/Pacific", level=1) - - tm.assert_index_equal(df3.index.levels[0], l0) - assert not df3.index.levels[0].equals(l0_expected) - tm.assert_index_equal(df3.index.levels[1], l1_expected) - assert not df3.index.levels[1].equals(l1) - - # Bad Inputs - - # Not DatetimeIndex / PeriodIndex - with pytest.raises(TypeError, match="DatetimeIndex"): - df = DataFrame(index=int_idx) - getattr(df, fn)("US/Pacific") - - # Not DatetimeIndex / PeriodIndex - with pytest.raises(TypeError, match="DatetimeIndex"): - df = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0])) - getattr(df, fn)("US/Pacific", level=0) - - # Invalid level - with pytest.raises(ValueError, match="not valid"): - df = DataFrame(index=l0) - getattr(df, fn)("US/Pacific", level=1) - - @pytest.mark.parametrize("copy", [True, False]) - def test_tz_convert_copy_inplace_mutate(self, copy, frame_or_series): - # GH#6326 - obj = frame_or_series( - np.arange(0, 5), - index=date_range("20131027", periods=5, freq="1H", tz="Europe/Berlin"), - ) - orig = obj.copy() - result = obj.tz_convert("UTC", copy=copy) - expected = frame_or_series(np.arange(0, 5), index=obj.index.tz_convert("UTC")) - tm.assert_equal(result, expected) - tm.assert_equal(obj, orig) - assert result.index is not obj.index - assert result is not obj diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_compare.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_compare.py deleted file mode 100644 index fe2016a245ec7c1373c72f40c7b6e7d899cf4f96..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_compare.py +++ /dev/null @@ -1,141 +0,0 @@ -import numpy as np -import pytest - -import pandas as pd -import pandas._testing as tm - - -@pytest.mark.parametrize("align_axis", [0, 1, "index", "columns"]) -def test_compare_axis(align_axis): - # GH#30429 - s1 = pd.Series(["a", "b", "c"]) - s2 = pd.Series(["x", "b", "z"]) - - result = s1.compare(s2, align_axis=align_axis) - - if align_axis in (1, "columns"): - indices = pd.Index([0, 2]) - columns = pd.Index(["self", "other"]) - expected = pd.DataFrame( - [["a", "x"], ["c", "z"]], index=indices, columns=columns - ) - tm.assert_frame_equal(result, expected) - else: - indices = pd.MultiIndex.from_product([[0, 2], ["self", "other"]]) - expected = pd.Series(["a", "x", "c", "z"], index=indices) - tm.assert_series_equal(result, expected) - - -@pytest.mark.parametrize( - "keep_shape, keep_equal", - [ - (True, False), - (False, True), - (True, True), - # False, False case is already covered in test_compare_axis - ], -) -def test_compare_various_formats(keep_shape, keep_equal): - s1 = pd.Series(["a", "b", "c"]) - s2 = pd.Series(["x", "b", "z"]) - - result = s1.compare(s2, keep_shape=keep_shape, keep_equal=keep_equal) - - if keep_shape: - indices = pd.Index([0, 1, 2]) - columns = pd.Index(["self", "other"]) - if keep_equal: - expected = pd.DataFrame( - [["a", "x"], ["b", "b"], ["c", "z"]], index=indices, columns=columns - ) - else: - expected = pd.DataFrame( - [["a", "x"], [np.nan, np.nan], ["c", "z"]], - index=indices, - columns=columns, - ) - else: - indices = pd.Index([0, 2]) - columns = pd.Index(["self", "other"]) - expected = pd.DataFrame( - [["a", "x"], ["c", "z"]], index=indices, columns=columns - ) - tm.assert_frame_equal(result, expected) - - -def test_compare_with_equal_nulls(): - # We want to make sure two NaNs are considered the same - # and dropped where applicable - s1 = pd.Series(["a", "b", np.nan]) - s2 = pd.Series(["x", "b", np.nan]) - - result = s1.compare(s2) - expected = pd.DataFrame([["a", "x"]], columns=["self", "other"]) - tm.assert_frame_equal(result, expected) - - -def test_compare_with_non_equal_nulls(): - # We want to make sure the relevant NaNs do not get dropped - s1 = pd.Series(["a", "b", "c"]) - s2 = pd.Series(["x", "b", np.nan]) - - result = s1.compare(s2, align_axis=0) - - indices = pd.MultiIndex.from_product([[0, 2], ["self", "other"]]) - expected = pd.Series(["a", "x", "c", np.nan], index=indices) - tm.assert_series_equal(result, expected) - - -def test_compare_multi_index(): - index = pd.MultiIndex.from_arrays([[0, 0, 1], [0, 1, 2]]) - s1 = pd.Series(["a", "b", "c"], index=index) - s2 = pd.Series(["x", "b", "z"], index=index) - - result = s1.compare(s2, align_axis=0) - - indices = pd.MultiIndex.from_arrays( - [[0, 0, 1, 1], [0, 0, 2, 2], ["self", "other", "self", "other"]] - ) - expected = pd.Series(["a", "x", "c", "z"], index=indices) - tm.assert_series_equal(result, expected) - - -def test_compare_unaligned_objects(): - # test Series with different indices - msg = "Can only compare identically-labeled Series objects" - with pytest.raises(ValueError, match=msg): - ser1 = pd.Series([1, 2, 3], index=["a", "b", "c"]) - ser2 = pd.Series([1, 2, 3], index=["a", "b", "d"]) - ser1.compare(ser2) - - # test Series with different lengths - msg = "Can only compare identically-labeled Series objects" - with pytest.raises(ValueError, match=msg): - ser1 = pd.Series([1, 2, 3]) - ser2 = pd.Series([1, 2, 3, 4]) - ser1.compare(ser2) - - -def test_compare_datetime64_and_string(): - # Issue https://github.com/pandas-dev/pandas/issues/45506 - # Catch OverflowError when comparing datetime64 and string - data = [ - {"a": "2015-07-01", "b": "08335394550"}, - {"a": "2015-07-02", "b": "+49 (0) 0345 300033"}, - {"a": "2015-07-03", "b": "+49(0)2598 04457"}, - {"a": "2015-07-04", "b": "0741470003"}, - {"a": "2015-07-05", "b": "04181 83668"}, - ] - dtypes = {"a": "datetime64[ns]", "b": "string"} - df = pd.DataFrame(data=data).astype(dtypes) - - result_eq1 = df["a"].eq(df["b"]) - result_eq2 = df["a"] == df["b"] - result_neq = df["a"] != df["b"] - - expected_eq = pd.Series([False] * 5) # For .eq and == - expected_neq = pd.Series([True] * 5) # For != - - tm.assert_series_equal(result_eq1, expected_eq) - tm.assert_series_equal(result_eq2, expected_eq) - tm.assert_series_equal(result_neq, expected_neq) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/operations/install/editable_legacy.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/operations/install/editable_legacy.py deleted file mode 100644 index bb548cdca75a924bf090f2be29779e2be1951a2c..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/operations/install/editable_legacy.py +++ /dev/null @@ -1,47 +0,0 @@ -"""Legacy editable installation process, i.e. `setup.py develop`. -""" -import logging -from typing import List, Optional, Sequence - -from pip._internal.build_env import BuildEnvironment -from pip._internal.utils.logging import indent_log -from pip._internal.utils.setuptools_build import make_setuptools_develop_args -from pip._internal.utils.subprocess import call_subprocess - -logger = logging.getLogger(__name__) - - -def install_editable( - install_options: List[str], - global_options: Sequence[str], - prefix: Optional[str], - home: Optional[str], - use_user_site: bool, - name: str, - setup_py_path: str, - isolated: bool, - build_env: BuildEnvironment, - unpacked_source_directory: str, -) -> None: - """Install a package in editable mode. Most arguments are pass-through - to setuptools. - """ - logger.info("Running setup.py develop for %s", name) - - args = make_setuptools_develop_args( - setup_py_path, - global_options=global_options, - install_options=install_options, - no_user_config=isolated, - prefix=prefix, - home=home, - use_user_site=use_user_site, - ) - - with indent_log(): - with build_env: - call_subprocess( - args, - command_desc="python setup.py develop", - cwd=unpacked_source_directory, - ) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/ezhil.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/ezhil.py deleted file mode 100644 index 49478ea00da2b399a545a9b44e10717b3542f7d2..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/ezhil.py +++ /dev/null @@ -1,77 +0,0 @@ -""" - pygments.lexers.ezhil - ~~~~~~~~~~~~~~~~~~~~~ - - Pygments lexers for Ezhil language. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, include, words -from pygments.token import Keyword, Comment, Name, String, Number, \ - Punctuation, Operator, Whitespace - -__all__ = ['EzhilLexer'] - - -class EzhilLexer(RegexLexer): - """ - Lexer for Ezhil, a Tamil script-based programming language. - - .. versionadded:: 2.1 - """ - name = 'Ezhil' - url = 'http://ezhillang.org' - aliases = ['ezhil'] - filenames = ['*.n'] - mimetypes = ['text/x-ezhil'] - # Refer to tamil.utf8.tamil_letters from open-tamil for a stricter version of this. - # This much simpler version is close enough, and includes combining marks. - _TALETTERS = '[a-zA-Z_]|[\u0b80-\u0bff]' - tokens = { - 'root': [ - include('keywords'), - (r'#.*$', Comment.Single), - (r'[@+/*,^\-%]|[!<>=]=?|&&?|\|\|?', Operator), - ('இல்', Operator.Word), - (words(('assert', 'max', 'min', - 'நீளம்', 'சரம்_இடமாற்று', 'சரம்_கண்டுபிடி', - 'பட்டியல்', 'பின்இணை', 'வரிசைப்படுத்து', - 'எடு', 'தலைகீழ்', 'நீட்டிக்க', 'நுழைக்க', 'வை', - 'கோப்பை_திற', 'கோப்பை_எழுது', 'கோப்பை_மூடு', - 'pi', 'sin', 'cos', 'tan', 'sqrt', 'hypot', 'pow', - 'exp', 'log', 'log10', 'exit', - ), suffix=r'\b'), Name.Builtin), - (r'(True|False)\b', Keyword.Constant), - (r'[^\S\n]+', Whitespace), - include('identifier'), - include('literal'), - (r'[(){}\[\]:;.]', Punctuation), - ], - 'keywords': [ - ('பதிப்பி|தேர்ந்தெடு|தேர்வு|ஏதேனில்|ஆனால்|இல்லைஆனால்|இல்லை|ஆக|ஒவ்வொன்றாக|இல்|வரை|செய்|முடியேனில்|பின்கொடு|முடி|நிரல்பாகம்|தொடர்|நிறுத்து|நிரல்பாகம்', Keyword), - ], - 'identifier': [ - ('(?:'+_TALETTERS+')(?:[0-9]|'+_TALETTERS+')*', Name), - ], - 'literal': [ - (r'".*?"', String), - (r'\d+((\.\d*)?[eE][+-]?\d+|\.\d*)', Number.Float), - (r'\d+', Number.Integer), - ] - } - - def analyse_text(text): - """This language uses Tamil-script. We'll assume that if there's a - decent amount of Tamil-characters, it's this language. This assumption - is obviously horribly off if someone uses string literals in tamil - in another language.""" - if len(re.findall(r'[\u0b80-\u0bff]', text)) > 10: - return 0.25 - - def __init__(self, **options): - super().__init__(**options) - self.encoding = options.get('encoding', 'utf-8') diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/progress_bar.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/progress_bar.py deleted file mode 100644 index 67361df2e49d48dd56c91e291ba92553e9afe344..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/progress_bar.py +++ /dev/null @@ -1,224 +0,0 @@ -import math -from functools import lru_cache -from time import monotonic -from typing import Iterable, List, Optional - -from .color import Color, blend_rgb -from .color_triplet import ColorTriplet -from .console import Console, ConsoleOptions, RenderResult -from .jupyter import JupyterMixin -from .measure import Measurement -from .segment import Segment -from .style import Style, StyleType - -# Number of characters before 'pulse' animation repeats -PULSE_SIZE = 20 - - -class ProgressBar(JupyterMixin): - """Renders a (progress) bar. Used by rich.progress. - - Args: - total (float, optional): Number of steps in the bar. Defaults to 100. Set to None to render a pulsing animation. - completed (float, optional): Number of steps completed. Defaults to 0. - width (int, optional): Width of the bar, or ``None`` for maximum width. Defaults to None. - pulse (bool, optional): Enable pulse effect. Defaults to False. Will pulse if a None total was passed. - style (StyleType, optional): Style for the bar background. Defaults to "bar.back". - complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete". - finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished". - pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse". - animation_time (Optional[float], optional): Time in seconds to use for animation, or None to use system time. - """ - - def __init__( - self, - total: Optional[float] = 100.0, - completed: float = 0, - width: Optional[int] = None, - pulse: bool = False, - style: StyleType = "bar.back", - complete_style: StyleType = "bar.complete", - finished_style: StyleType = "bar.finished", - pulse_style: StyleType = "bar.pulse", - animation_time: Optional[float] = None, - ): - self.total = total - self.completed = completed - self.width = width - self.pulse = pulse - self.style = style - self.complete_style = complete_style - self.finished_style = finished_style - self.pulse_style = pulse_style - self.animation_time = animation_time - - self._pulse_segments: Optional[List[Segment]] = None - - def __repr__(self) -> str: - return f"" - - @property - def percentage_completed(self) -> Optional[float]: - """Calculate percentage complete.""" - if self.total is None: - return None - completed = (self.completed / self.total) * 100.0 - completed = min(100, max(0.0, completed)) - return completed - - @lru_cache(maxsize=16) - def _get_pulse_segments( - self, - fore_style: Style, - back_style: Style, - color_system: str, - no_color: bool, - ascii: bool = False, - ) -> List[Segment]: - """Get a list of segments to render a pulse animation. - - Returns: - List[Segment]: A list of segments, one segment per character. - """ - bar = "-" if ascii else "━" - segments: List[Segment] = [] - if color_system not in ("standard", "eight_bit", "truecolor") or no_color: - segments += [Segment(bar, fore_style)] * (PULSE_SIZE // 2) - segments += [Segment(" " if no_color else bar, back_style)] * ( - PULSE_SIZE - (PULSE_SIZE // 2) - ) - return segments - - append = segments.append - fore_color = ( - fore_style.color.get_truecolor() - if fore_style.color - else ColorTriplet(255, 0, 255) - ) - back_color = ( - back_style.color.get_truecolor() - if back_style.color - else ColorTriplet(0, 0, 0) - ) - cos = math.cos - pi = math.pi - _Segment = Segment - _Style = Style - from_triplet = Color.from_triplet - - for index in range(PULSE_SIZE): - position = index / PULSE_SIZE - fade = 0.5 + cos((position * pi * 2)) / 2.0 - color = blend_rgb(fore_color, back_color, cross_fade=fade) - append(_Segment(bar, _Style(color=from_triplet(color)))) - return segments - - def update(self, completed: float, total: Optional[float] = None) -> None: - """Update progress with new values. - - Args: - completed (float): Number of steps completed. - total (float, optional): Total number of steps, or ``None`` to not change. Defaults to None. - """ - self.completed = completed - self.total = total if total is not None else self.total - - def _render_pulse( - self, console: Console, width: int, ascii: bool = False - ) -> Iterable[Segment]: - """Renders the pulse animation. - - Args: - console (Console): Console instance. - width (int): Width in characters of pulse animation. - - Returns: - RenderResult: [description] - - Yields: - Iterator[Segment]: Segments to render pulse - """ - fore_style = console.get_style(self.pulse_style, default="white") - back_style = console.get_style(self.style, default="black") - - pulse_segments = self._get_pulse_segments( - fore_style, back_style, console.color_system, console.no_color, ascii=ascii - ) - segment_count = len(pulse_segments) - current_time = ( - monotonic() if self.animation_time is None else self.animation_time - ) - segments = pulse_segments * (int(width / segment_count) + 2) - offset = int(-current_time * 15) % segment_count - segments = segments[offset : offset + width] - yield from segments - - def __rich_console__( - self, console: Console, options: ConsoleOptions - ) -> RenderResult: - - width = min(self.width or options.max_width, options.max_width) - ascii = options.legacy_windows or options.ascii_only - should_pulse = self.pulse or self.total is None - if should_pulse: - yield from self._render_pulse(console, width, ascii=ascii) - return - - completed: Optional[float] = ( - min(self.total, max(0, self.completed)) if self.total is not None else None - ) - - bar = "-" if ascii else "━" - half_bar_right = " " if ascii else "╸" - half_bar_left = " " if ascii else "╺" - complete_halves = ( - int(width * 2 * completed / self.total) - if self.total and completed is not None - else width * 2 - ) - bar_count = complete_halves // 2 - half_bar_count = complete_halves % 2 - style = console.get_style(self.style) - is_finished = self.total is None or self.completed >= self.total - complete_style = console.get_style( - self.finished_style if is_finished else self.complete_style - ) - _Segment = Segment - if bar_count: - yield _Segment(bar * bar_count, complete_style) - if half_bar_count: - yield _Segment(half_bar_right * half_bar_count, complete_style) - - if not console.no_color: - remaining_bars = width - bar_count - half_bar_count - if remaining_bars and console.color_system is not None: - if not half_bar_count and bar_count: - yield _Segment(half_bar_left, style) - remaining_bars -= 1 - if remaining_bars: - yield _Segment(bar * remaining_bars, style) - - def __rich_measure__( - self, console: Console, options: ConsoleOptions - ) -> Measurement: - return ( - Measurement(self.width, self.width) - if self.width is not None - else Measurement(4, options.max_width) - ) - - -if __name__ == "__main__": # pragma: no cover - console = Console() - bar = ProgressBar(width=50, total=100) - - import time - - console.show_cursor(False) - for n in range(0, 101, 1): - bar.update(n) - console.print(bar) - console.file.write("\r") - time.sleep(0.05) - console.show_cursor(True) - console.print() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tomlkit/toml_char.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tomlkit/toml_char.py deleted file mode 100644 index b4bb4110c557de854a815c677a4f968be550c21d..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tomlkit/toml_char.py +++ /dev/null @@ -1,52 +0,0 @@ -import string - - -class TOMLChar(str): - def __init__(self, c): - super().__init__() - - if len(self) > 1: - raise ValueError("A TOML character must be of length 1") - - BARE = string.ascii_letters + string.digits + "-_" - KV = "= \t" - NUMBER = string.digits + "+-_.e" - SPACES = " \t" - NL = "\n\r" - WS = SPACES + NL - - def is_bare_key_char(self) -> bool: - """ - Whether the character is a valid bare key name or not. - """ - return self in self.BARE - - def is_kv_sep(self) -> bool: - """ - Whether the character is a valid key/value separator or not. - """ - return self in self.KV - - def is_int_float_char(self) -> bool: - """ - Whether the character if a valid integer or float value character or not. - """ - return self in self.NUMBER - - def is_ws(self) -> bool: - """ - Whether the character is a whitespace character or not. - """ - return self in self.WS - - def is_nl(self) -> bool: - """ - Whether the character is a new line character or not. - """ - return self in self.NL - - def is_spaces(self) -> bool: - """ - Whether the character is a space or not - """ - return self in self.SPACES diff --git a/spaces/projekt-rising-ai/Expert-Answer-Demo/README.md b/spaces/projekt-rising-ai/Expert-Answer-Demo/README.md deleted file mode 100644 index 1e8abd7112fb18a0c7d7abfd2d0ca8ed178acb0a..0000000000000000000000000000000000000000 --- a/spaces/projekt-rising-ai/Expert-Answer-Demo/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: ProjektRising Expert Chat Demo -emoji: 🚀 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.16.1 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: JavaFXpert/Chat-GPT-LangChain ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/protoxx91/webui-docker/oh-no.py b/spaces/protoxx91/webui-docker/oh-no.py deleted file mode 100644 index e8c0f3bd8d72805b4ee69d4d0fd9133347d00f92..0000000000000000000000000000000000000000 --- a/spaces/protoxx91/webui-docker/oh-no.py +++ /dev/null @@ -1,14 +0,0 @@ -import gradio as gr - -block = gr.Blocks() - -def run(): - with block: - gr.Markdown( - """ -

        oh no 😐 something wrong with the 🤗 hugging face servers 😐 hopefully, it will be fixed soon

        - """) - block.launch(server_name="0.0.0.0", server_port=7860) - -if __name__ == "__main__": - run() \ No newline at end of file diff --git a/spaces/pycui/RealChar/client/web/src/hooks/useSpeechRecognition.js b/spaces/pycui/RealChar/client/web/src/hooks/useSpeechRecognition.js deleted file mode 100644 index 641ea26773f7ed7d0d35188ae4a98f96a9108200..0000000000000000000000000000000000000000 --- a/spaces/pycui/RealChar/client/web/src/hooks/useSpeechRecognition.js +++ /dev/null @@ -1,56 +0,0 @@ -/** - * src/hooks/useSpeechRecognition.js - * Initialize speech recognition. Start and stop listening. - * - * created by Lynchee on 7/16/23 - */ - -import { useRef, useEffect } from 'react'; - -const useSpeechRecognition = (onResult, onSpeechEnd, callActive) => { - const recognition = useRef(null); - - // initialize speech recognition - const initializeSpeechRecognition = () => { - window.SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; - recognition.current = new window.SpeechRecognition(); - recognition.current.interimResults = true; - recognition.current.maxAlternatives = 1; - recognition.current.continuous = true; - - recognition.current.onend = () => { - if (callActive.current) { - startListening(); - } - }; - - recognition.current.onresult = onResult; - recognition.current.onspeechend = onSpeechEnd; - }; - - const startListening = () => { - if (!recognition.current) return; - console.log("start listening"); - recognition.current.start(); - } - - const stopListening = () => { - if (!recognition.current) return; - console.log("stop listening"); - recognition.current.stop(); - } - - const closeRecognition = () => { - stopListening(); - recognition.current = null; - } - - return { - startListening, - stopListening, - closeRecognition, - initializeSpeechRecognition, - }; -}; - -export default useSpeechRecognition; diff --git a/spaces/pyesonekyaw/faceforgerydetection/Scripts/sbi_generator.py b/spaces/pyesonekyaw/faceforgerydetection/Scripts/sbi_generator.py deleted file mode 100644 index 442f18eff10a0c8001ebc562740b3f5db06d8395..0000000000000000000000000000000000000000 --- a/spaces/pyesonekyaw/faceforgerydetection/Scripts/sbi_generator.py +++ /dev/null @@ -1,217 +0,0 @@ -from imutils import face_utils -import numpy as np -import random -import albumentations as alb -from .DeepFakeMask import dfl_full, extended, components, facehull -import cv2 - -def IoUfrom2bboxes(boxA, boxB): - xA = max(boxA[0], boxB[0]) - yA = max(boxA[1], boxB[1]) - xB = min(boxA[2], boxB[2]) - yB = min(boxA[3], boxB[3]) - interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1) - boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1) - boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1) - iou = interArea / float(boxAArea + boxBArea - interArea) - return iou - -def reorder_landmark(landmark): - landmark_add = np.zeros((13, 2)) - for idx, idx_l in enumerate([77, 75, 76, 68, 69, 70, 71, 80, 72, 73, 79, 74, 78]): - landmark_add[idx] = landmark[idx_l] - landmark[68:] = landmark_add - return landmark - -def get_dlib_landmarks(inp, dlib_face_detector, dlib_face_predictor): - faces = dlib_face_detector(inp, 1) - if len(faces)==0: - raise Exception("No faces detected") - landmarks=[] - size_list=[] - for face_idx in range(len(faces)): - landmark = dlib_face_predictor(inp, faces[face_idx]) - landmark = face_utils.shape_to_np(landmark) - x0,y0=landmark[:,0].min(),landmark[:,1].min() - x1,y1=landmark[:,0].max(),landmark[:,1].max() - face_s=(x1-x0)*(y1-y0) - size_list.append(face_s) - landmarks.append(landmark) - landmarks=np.concatenate(landmarks).reshape((len(size_list),)+landmark.shape) - landmarks=landmarks[np.argsort(np.array(size_list))[::-1]] - return landmarks - -def get_retina_bbox(inp,face_detector): - faces = face_detector.predict_jsons(inp) - landmarks=[] - size_list=[] - for face_idx in range(len(faces)): - - x0,y0,x1,y1=faces[face_idx]['bbox'] - landmark=np.array([[x0,y0],[x1,y1]]+faces[face_idx]['landmarks']) - face_s=(x1-x0)*(y1-y0) - size_list.append(face_s) - landmarks.append(landmark) - landmarks=np.concatenate(landmarks).reshape((len(size_list),)+landmark.shape) - landmarks=landmarks[np.argsort(np.array(size_list))[::-1]] - - return landmarks - -def random_get_hull(landmark,img, face_region): - face_region = int(face_region) - if face_region == 1: - mask = dfl_full(landmarks=landmark.astype('int32'),face=img, channels=3).mask - elif face_region == 2: - mask = extended(landmarks=landmark.astype('int32'),face=img, channels=3).mask - elif face_region == 3: - mask = components(landmarks=landmark.astype('int32'),face=img, channels=3).mask - else: - mask = facehull(landmarks=landmark.astype('int32'),face=img, channels=3).mask - return mask/255 - -class RandomDownScale(alb.core.transforms_interface.ImageOnlyTransform): - def apply(self,img,**params): - return self.randomdownscale(img) - - def randomdownscale(self,img): - keep_ratio=True - keep_input_shape=True - H,W,C=img.shape - ratio_list=[2,4] - r=ratio_list[np.random.randint(len(ratio_list))] - img_ds=cv2.resize(img,(int(W/r),int(H/r)),interpolation=cv2.INTER_NEAREST) - if keep_input_shape: - img_ds=cv2.resize(img_ds,(W,H),interpolation=cv2.INTER_LINEAR) - - return img_ds - -def get_source_transforms(): - return alb.Compose([ - alb.Compose([ - alb.RGBShift((-20, 20), (-20, 20), (-20, 20), p=0.3), - alb.HueSaturationValue( - hue_shift_limit=(-0.3, 0.3), sat_shift_limit=(-0.3, 0.3), val_shift_limit=(-0.3, 0.3), p=1), - alb.RandomBrightnessContrast( - brightness_limit=(-0.1, 0.1), contrast_limit=(-0.1, 0.1), p=1), - ], p=1), - - alb.OneOf([ - RandomDownScale(p=1), - alb.Sharpen(alpha=(0.2, 0.5), lightness=(0.5, 1.0), p=1), - ], p=1), - - ], p=1.) - -def randaffine(img, mask): - f = alb.Affine( - translate_percent={'x': (-0.03, 0.03), 'y': (-0.015, 0.015)}, - scale=[0.95, 1/0.95], - fit_output=False, - p=1) - - g = alb.ElasticTransform( - alpha=50, - sigma=7, - alpha_affine=0, - p=1, - ) - - transformed = f(image=img, mask=mask) - img = transformed['image'] - - mask = transformed['mask'] - transformed = g(image=img, mask=mask) - mask = transformed['mask'] - return img, mask - -def get_blend_mask(mask): - H,W=mask.shape - size_h=np.random.randint(192,257) - size_w=np.random.randint(192,257) - mask=cv2.resize(mask,(size_w,size_h)) - kernel_1=random.randrange(5,26,2) - kernel_1=(kernel_1,kernel_1) - kernel_2=random.randrange(5,26,2) - kernel_2=(kernel_2,kernel_2) - - mask_blured = cv2.GaussianBlur(mask, kernel_1, 0) - mask_blured = mask_blured/(mask_blured.max()) - mask_blured[mask_blured<1]=0 - - mask_blured = cv2.GaussianBlur(mask_blured, kernel_2, np.random.randint(5,46)) - mask_blured = mask_blured/(mask_blured.max()) - mask_blured = cv2.resize(mask_blured,(W,H)) - return mask_blured.reshape((mask_blured.shape+(1,))) - - -def dynamic_blend(source,target,mask,blending_type, mixup_ratio=[0.25,0.5,0.75,1,1,1]): - """Performs dynamic blending of source and target, using the mask as the blending region - - Args: - source: source image - target: target image - mask: mask image - - Returns: - img_blended: blended image - mask_blurred: augmented mask used for blending - """ - - mask_blured = get_blend_mask(mask) - mask_blured_copy = mask_blured.copy() - - if blending_type == "Poisson": - # Poisson blending - b_mask = (mask_blured_copy * 255).astype(np.uint8) - l, t, w, h = cv2.boundingRect(b_mask) - center = (int(l + w / 2), int(t + h / 2)) - img_blended = cv2.seamlessClone(source, target, b_mask, center, cv2.NORMAL_CLONE) - else: - # Mix up blending - blend_list=mixup_ratio - blend_ratio = blend_list[np.random.randint(len(blend_list))] - - mask_blured_copy = mask_blured.copy() - mask_blured_copy*=blend_ratio - - img_blended=(mask_blured_copy * source + (1 - mask_blured_copy) * target) - - return img_blended,mask_blured - -def get_transforms(): - return alb.Compose([ - - alb.RGBShift((-20, 20), (-20, 20), (-20, 20), p=0.3), - alb.HueSaturationValue( - hue_shift_limit=(-0.3, 0.3), sat_shift_limit=(-0.3, 0.3), val_shift_limit=(-0.3, 0.3), p=0.3), - alb.RandomBrightnessContrast( - brightness_limit=(-0.3, 0.3), contrast_limit=(-0.3, 0.3), p=0.3), - alb.ImageCompression(quality_lower=40, quality_upper=100, p=0.5), - - ], - additional_targets={f'image1': 'image'}, - p=1.) - - -def self_blending(img, landmark, blending_type, face_region): - if np.random.rand() < 0.25: - landmark = landmark[:68] - mask = random_get_hull(landmark, img, face_region) - if mask.shape[-1] == 3: - mask = mask[:, :, 0] - - mask_copy = mask - - source_transforms = get_source_transforms() - source = img.copy() - source = source_transforms(image=source.astype(np.uint8))['image'] - - source_before_affine_transforms, mask_before_affine_transforms = source, mask - source, mask = randaffine(source, mask) - source_after_affine_transforms, mask_after_affine_transforms = source, mask - - img_blended, mask = dynamic_blend(source, img, mask, blending_type) - img_blended = img_blended.astype(np.uint8) - img = img.astype(np.uint8) - - return img, img_blended, mask, mask_copy, source_before_affine_transforms, mask_before_affine_transforms, source_after_affine_transforms, mask_after_affine_transforms \ No newline at end of file diff --git a/spaces/qinzhu/diy-girlfriend/text/korean.py b/spaces/qinzhu/diy-girlfriend/text/korean.py deleted file mode 100644 index edee07429a450c55e3d8e246997faaa1e0b89cc9..0000000000000000000000000000000000000000 --- a/spaces/qinzhu/diy-girlfriend/text/korean.py +++ /dev/null @@ -1,210 +0,0 @@ -import re -from jamo import h2j, j2hcj -import ko_pron - - -# This is a list of Korean classifiers preceded by pure Korean numerals. -_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통' - -# List of (hangul, hangul divided) pairs: -_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄳ', 'ㄱㅅ'), - ('ㄵ', 'ㄴㅈ'), - ('ㄶ', 'ㄴㅎ'), - ('ㄺ', 'ㄹㄱ'), - ('ㄻ', 'ㄹㅁ'), - ('ㄼ', 'ㄹㅂ'), - ('ㄽ', 'ㄹㅅ'), - ('ㄾ', 'ㄹㅌ'), - ('ㄿ', 'ㄹㅍ'), - ('ㅀ', 'ㄹㅎ'), - ('ㅄ', 'ㅂㅅ'), - ('ㅘ', 'ㅗㅏ'), - ('ㅙ', 'ㅗㅐ'), - ('ㅚ', 'ㅗㅣ'), - ('ㅝ', 'ㅜㅓ'), - ('ㅞ', 'ㅜㅔ'), - ('ㅟ', 'ㅜㅣ'), - ('ㅢ', 'ㅡㅣ'), - ('ㅑ', 'ㅣㅏ'), - ('ㅒ', 'ㅣㅐ'), - ('ㅕ', 'ㅣㅓ'), - ('ㅖ', 'ㅣㅔ'), - ('ㅛ', 'ㅣㅗ'), - ('ㅠ', 'ㅣㅜ') -]] - -# List of (Latin alphabet, hangul) pairs: -_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', '에이'), - ('b', '비'), - ('c', '시'), - ('d', '디'), - ('e', '이'), - ('f', '에프'), - ('g', '지'), - ('h', '에이치'), - ('i', '아이'), - ('j', '제이'), - ('k', '케이'), - ('l', '엘'), - ('m', '엠'), - ('n', '엔'), - ('o', '오'), - ('p', '피'), - ('q', '큐'), - ('r', '아르'), - ('s', '에스'), - ('t', '티'), - ('u', '유'), - ('v', '브이'), - ('w', '더블유'), - ('x', '엑스'), - ('y', '와이'), - ('z', '제트') -]] - -# List of (ipa, lazy ipa) pairs: -_ipa_to_lazy_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('t͡ɕ','ʧ'), - ('d͡ʑ','ʥ'), - ('ɲ','n^'), - ('ɕ','ʃ'), - ('ʷ','w'), - ('ɭ','l`'), - ('ʎ','ɾ'), - ('ɣ','ŋ'), - ('ɰ','ɯ'), - ('ʝ','j'), - ('ʌ','ə'), - ('ɡ','g'), - ('\u031a','#'), - ('\u0348','='), - ('\u031e',''), - ('\u0320',''), - ('\u0339','') -]] - - -def latin_to_hangul(text): - for regex, replacement in _latin_to_hangul: - text = re.sub(regex, replacement, text) - return text - - -def divide_hangul(text): - text = j2hcj(h2j(text)) - for regex, replacement in _hangul_divided: - text = re.sub(regex, replacement, text) - return text - - -def hangul_number(num, sino=True): - '''Reference https://github.com/Kyubyong/g2pK''' - num = re.sub(',', '', num) - - if num == '0': - return '영' - if not sino and num == '20': - return '스무' - - digits = '123456789' - names = '일이삼사오육칠팔구' - digit2name = {d: n for d, n in zip(digits, names)} - - modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉' - decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔' - digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())} - digit2dec = {d: dec for d, dec in zip(digits, decimals.split())} - - spelledout = [] - for i, digit in enumerate(num): - i = len(num) - i - 1 - if sino: - if i == 0: - name = digit2name.get(digit, '') - elif i == 1: - name = digit2name.get(digit, '') + '십' - name = name.replace('일십', '십') - else: - if i == 0: - name = digit2mod.get(digit, '') - elif i == 1: - name = digit2dec.get(digit, '') - if digit == '0': - if i % 4 == 0: - last_three = spelledout[-min(3, len(spelledout)):] - if ''.join(last_three) == '': - spelledout.append('') - continue - else: - spelledout.append('') - continue - if i == 2: - name = digit2name.get(digit, '') + '백' - name = name.replace('일백', '백') - elif i == 3: - name = digit2name.get(digit, '') + '천' - name = name.replace('일천', '천') - elif i == 4: - name = digit2name.get(digit, '') + '만' - name = name.replace('일만', '만') - elif i == 5: - name = digit2name.get(digit, '') + '십' - name = name.replace('일십', '십') - elif i == 6: - name = digit2name.get(digit, '') + '백' - name = name.replace('일백', '백') - elif i == 7: - name = digit2name.get(digit, '') + '천' - name = name.replace('일천', '천') - elif i == 8: - name = digit2name.get(digit, '') + '억' - elif i == 9: - name = digit2name.get(digit, '') + '십' - elif i == 10: - name = digit2name.get(digit, '') + '백' - elif i == 11: - name = digit2name.get(digit, '') + '천' - elif i == 12: - name = digit2name.get(digit, '') + '조' - elif i == 13: - name = digit2name.get(digit, '') + '십' - elif i == 14: - name = digit2name.get(digit, '') + '백' - elif i == 15: - name = digit2name.get(digit, '') + '천' - spelledout.append(name) - return ''.join(elem for elem in spelledout) - - -def number_to_hangul(text): - '''Reference https://github.com/Kyubyong/g2pK''' - tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text)) - for token in tokens: - num, classifier = token - if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers: - spelledout = hangul_number(num, sino=False) - else: - spelledout = hangul_number(num, sino=True) - text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}') - # digit by digit for remaining digits - digits = '0123456789' - names = '영일이삼사오육칠팔구' - for d, n in zip(digits, names): - text = text.replace(d, n) - return text - - -def korean_to_lazy_ipa(text): - text = latin_to_hangul(text) - text = number_to_hangul(text) - text=re.sub('[\uac00-\ud7af]+',lambda x:ko_pron.romanise(x.group(0),'ipa').split('] ~ [')[0],text) - for regex, replacement in _ipa_to_lazy_ipa: - text = re.sub(regex, replacement, text) - return text - - -def korean_to_ipa(text): - text = korean_to_lazy_ipa(text) - return text.replace('ʧ','tʃ').replace('ʥ','dʑ') diff --git a/spaces/qkorbit/AltDiffusion/css_and_js.py b/spaces/qkorbit/AltDiffusion/css_and_js.py deleted file mode 100644 index 64e6dd5e703281d0b11e7a9ef7f05a264fb2341c..0000000000000000000000000000000000000000 --- a/spaces/qkorbit/AltDiffusion/css_and_js.py +++ /dev/null @@ -1,92 +0,0 @@ -from os import path -import json - - -def readTextFile(*args): - dir = path.dirname(__file__) - entry = path.join(dir, *args) - with open(entry, "r", encoding="utf8") as f: - data = f.read() - return data - - -def css(opt): - styling = readTextFile("css", "styles.css") - # TODO: @altryne restore this before merge - if not opt.no_progressbar_hiding: - styling += readTextFile("css", "no_progress_bar.css") - return styling - - -def js(opt): - data = readTextFile("js", "index.js") - data = "(z) => {" + data + "; return z ?? [] }" - return data - - -# TODO : @altryne fix this to the new JS format -js_copy_txt2img_output = "(x) => {navigator.clipboard.writeText(document.querySelector('gradio-app').shadowRoot.querySelector('#highlight .textfield').textContent.replace(/\s+/g,' ').replace(/: /g,':'))}" - - - -js_parse_prompt =""" -(txt2img_prompt, txt2img_width, txt2img_height, txt2img_steps, txt2img_seed, txt2img_batch_count, txt2img_cfg) => { - -const prompt_input = document.querySelector('gradio-app').shadowRoot.querySelector('#prompt_input [data-testid="textbox"]'); -const multiline = document.querySelector('gradio-app').shadowRoot.querySelector('#submit_on_enter label:nth-child(2)') -if (prompt_input.scrollWidth > prompt_input.clientWidth + 10 ) { - multiline.click(); -} - - -let height_match = /(?:-h|-H|--height|height)[ :]?(?\d+) /.exec(txt2img_prompt); -if (height_match) { - txt2img_height = Math.round(height_match.groups.height / 64) * 64; - txt2img_prompt = txt2img_prompt.replace(height_match[0], ''); -} -let width_match = /(?:-w|-W|--width|width)[ :]?(?\d+) /.exec(txt2img_prompt); -if (width_match) { - txt2img_width = Math.round(width_match.groups.width / 64) * 64; - txt2img_prompt = txt2img_prompt.replace(width_match[0], ''); -} -let steps_match = /(?:-s|--steps|steps)[ :]?(?\d+) /.exec(txt2img_prompt); -if (steps_match) { - txt2img_steps = steps_match.groups.steps.trim(); - txt2img_prompt = txt2img_prompt.replace(steps_match[0], ''); -} -let seed_match = /(?:-S|--seed|seed)[ :]?(?\d+) /.exec(txt2img_prompt); -if (seed_match) { - txt2img_seed = seed_match.groups.seed; - txt2img_prompt = txt2img_prompt.replace(seed_match[0], ''); -} -let batch_count_match = /(?:-n|-N|--number|number)[ :]?(?\d+) /.exec(txt2img_prompt); -if (batch_count_match) { - txt2img_batch_count = batch_count_match.groups.batch_count; - txt2img_prompt = txt2img_prompt.replace(batch_count_match[0], ''); -} -let cfg_scale_match = /(?:-c|-C|--cfg-scale|cfg_scale|cfg)[ :]?(?\d\.?\d+?) /.exec(txt2img_prompt); -if (cfg_scale_match) { - txt2img_cfg = parseFloat(cfg_scale_match.groups.cfgscale).toFixed(1); - txt2img_prompt = txt2img_prompt.replace(cfg_scale_match[0], ''); -} -let sampler_match = /(?:-A|--sampler|sampler)[ :]?(?\w+) /.exec(txt2img_prompt); -if (sampler_match) { - - txt2img_prompt = txt2img_prompt.replace(sampler_match[0], ''); -} - -return [txt2img_prompt, parseInt(txt2img_width), parseInt(txt2img_height), parseInt(txt2img_steps), txt2img_seed, parseInt(txt2img_batch_count), parseFloat(txt2img_cfg)]; -} -""" - - -# Wrap the typical SD method call into async closure for ease of use -# Supplies the js function with a params object -# That includes all the passed arguments and input from Gradio: x -# ATTENTION: x is an array of values of all components passed to your -# python event handler -# Example call in Gradio component's event handler (pass the result to _js arg): -# _js=call_JS("myJsMethod", arg1="string", arg2=100, arg3=[]) -def call_JS(sd_method, **kwargs): - param_str = json.dumps(kwargs) - return f"async (...x) => {{ return await SD.{sd_method}({{ x, ...{param_str} }}) ?? []; }}" diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Call Of Duty 1 - Cracked No Setup. Torrent [EXCLUSIVE].md b/spaces/quidiaMuxgu/Expedit-SAM/Call Of Duty 1 - Cracked No Setup. Torrent [EXCLUSIVE].md deleted file mode 100644 index 15f6d9dcaa359999c67c780bbd3f8acc6f853010..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Call Of Duty 1 - Cracked No Setup. Torrent [EXCLUSIVE].md +++ /dev/null @@ -1,6 +0,0 @@ -

        Call Of Duty 1 - Cracked, No setup. torrent


        Download 🗸🗸🗸 https://geags.com/2uCqcy



        -
        -February 8, 2021 - Make sure you have a VPN when downloading torrents or your ISP may warn you! HOW TO DOWNLOAD CALL OF DUTY®. 1. Download... February 8, 2021 - Make sure you have a VPN when downloading torrents or your ISP may warn you! HOW TO DOWNLOAD CALL OF DUTY®. 1. Download the game using a VPN to bypass your ISP. 2. Download the torrent file to the folder where you have the game installed. 3. Create a folder named "CALL OF DUTY®" in your game folder. 4. Use the "CALL OF DUTY" folder to download your torrents. 5. Extract the game folder into your torrents folder. 6. 8a78ff9644
        -
        -
        -

        diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Kundli Chakra 2012 Professional Edition Serial Key.rar.md b/spaces/quidiaMuxgu/Expedit-SAM/Kundli Chakra 2012 Professional Edition Serial Key.rar.md deleted file mode 100644 index a82ac486c11ffac9f3e07cd4abbcea7dd2e43941..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Kundli Chakra 2012 Professional Edition Serial Key.rar.md +++ /dev/null @@ -1,28 +0,0 @@ - -

        How to Download Kundli Chakra 2012 Professional Edition for Free

        -

        If you are looking for a reliable and accurate software for astrological analysis and predictions, you might want to try Kundli Chakra 2012 Professional Edition. Kundli Chakra is a software package that compresses millennia of astrological insight and Vedic wisdom going back as far as the ancient Indus Valley civilization to high tech modern India[^3^]. It is ideal for professional practitioners of Astrology and related forms of Vedic insight, as well as for personal use.

        -

        Kundli Chakra 2012 Professional Edition has many features and tools that make it easy and convenient to generate horoscopes, match charts, analyze planetary positions, calculate dashas, yogas, transits, and more. It also has a large database of cities, countries, and time zones, as well as customizable options for charts, reports, predictions, and printing. You can also export your charts and reports to PDF, JPEG, BMP, or HTML formats.

        -

        kundli chakra 2012 professional edition serial key.rar


        Download File ····· https://geags.com/2uCsun



        -

        However, Kundli Chakra 2012 Professional Edition is not a free software. It costs $159 to purchase from the official website. But don't worry, there is a way to download it for free without paying anything. All you need is a serial key that can activate the software after installation. And we have just the right source for you.

        -

        There is a file called "kundli chakra 2012 professional edition serial key.rar" that contains the serial key for Kundli Chakra 2012 Professional Edition. You can download this file from the Internet Archive[^1^], a non-profit digital library that offers free access to millions of books, movies, music, software, and more. The file is safe and virus-free, and it has been uploaded by a verified user.

        -

        To download the file, follow these steps:

        -
          -
        1. Go to https://archive.org/details/kundli.chakra.2012.professional_201908
        2. -
        3. Click on the "DOWNLOAD OPTIONS" section on the right side of the page.
        4. -
        5. Select "RAR" from the list of formats.
        6. -
        7. Wait for the download to complete.
        8. -
        9. Extract the file using WinRAR or any other software that can open RAR files.
        10. -
        11. Open the file and copy the serial key.
        12. -
        -

        Now you have the serial key for Kundli Chakra 2012 Professional Edition. To install and activate the software, follow these steps:

        -
          -
        1. Go to https://filehippo.com/download_kundli-chakra-professional/
        2. -
        3. Click on the "Download Latest Version" button on the top right corner of the page.
        4. -
        5. Wait for the download to complete.
        6. -
        7. Run the setup file and follow the instructions to install the software.
        8. -
        9. When prompted, enter the serial key that you copied from the RAR file.
        10. -
        11. Complete the installation and launch the software.
        12. -
        -

        Congratulations! You have successfully downloaded Kundli Chakra 2012 Professional Edition for free. Enjoy using this powerful and comprehensive software for your astrological needs.

        d5da3c52bf
        -
        -
        \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/MassEffect3updatedalldlcsMulti7repackMrDJdownload BEST.md b/spaces/quidiaMuxgu/Expedit-SAM/MassEffect3updatedalldlcsMulti7repackMrDJdownload BEST.md deleted file mode 100644 index e3a23beabdb8e336d32233040546b77ab494bdf3..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/MassEffect3updatedalldlcsMulti7repackMrDJdownload BEST.md +++ /dev/null @@ -1,6 +0,0 @@ -

        MassEffect3updatedalldlcsMulti7repackMrDJdownload


        Download Ziphttps://geags.com/2uCqMg



        - -MassEffect3updatedalldlcsMulti7repackMrDJdownload __TOP__. MassEffect3updatedalldlcsMulti7repackMrDJdownload ... 4d29de3e1b
        -
        -
        -

        diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Maya LT 2014 Keygen Kickass To.md b/spaces/quidiaMuxgu/Expedit-SAM/Maya LT 2014 Keygen Kickass To.md deleted file mode 100644 index 26fad9a14da6797338fe3eeac36e9a626269e23c..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Maya LT 2014 Keygen Kickass To.md +++ /dev/null @@ -1,9 +0,0 @@ -
        -

        MULROUTES BACKUP VIDEO EDUCATIONAL
        Xforce Keygen Autodesk Design Review 2019 Win64
        SONGS FOR PEOPLE 2017 64 Bits Crack FULL
        technical support and repair keygenxforce
        WPC AND MAC DOWNLOAD
        oh my baby slave in tv channel
        PAGLIONE AL CAPITANO GENERALE CRACK IMEBIKE
        X-force FBX Review for Revit 2017 64 Bit Windows 7 SP 1
        iapprophetsthisis 10.0 crack keygen
        xbox 360 xpro controller + mouse driver install
        game of war not working

        -

        Maya LT 2014 Keygen Kickass To


        Download Filehttps://geags.com/2uCq1b



        -

        AutoCAD Home a reference manual
        AutoCAD 2019 Serial Key Keygen Full HD
        X-Force 7 KARMA for homeworld
        X-force FBX 2019 For Revit 2017 64 Bit Full
        AutoCAD Fireworks 2014 Keygen + Crack
        new york city taxi cab voice sample kannada
        arabic movie download movil umml
        monneu Pianos Trumpet And Reeds Preamp
        X-force FBX Crack For Revit 2019 Win64
        HOW TO FILE UNTOUCHED IN HULK
        Autocad Design Premium 2014 Torrent FLAC

        -

        RealAudio Programs License Code Validate
        Autodesk AutoCAD 2014 Serial Key Free
        X-force Keygen For AutoCAD 2019 Win64
        kazao zomby torrent vostok.com
        kronhvarg danske film Lett 2013 12a 653823 Download
        Transformers Diaries Download Mp3
        moviesmoviesmovies.com PORN MOVIE FROM BRAZIL
        Black Commando 3 Crack Portable
        windows vista 10.0 free full version with crack
        box music award: beatles

        -

        General Author Publisher web site Discharge Date Apr 10, 2015 Time Added Come july 1st 21, 2015 Version 2016 Class Class Subcategory Operating Systems Windows Vista/7/8 Additional Requirements None of them Download Details File Size 1.03GC File Title AutodeskMaya2016ENJPZHWindowsdlm.sfx.exe Reputation Total Downloads 53,528 Downloads Final Week 115 Pricing License Design Free of charge to try Restrictions 30-time trial Cost $185.

        -

        899543212b
        -
        -
        \ No newline at end of file diff --git a/spaces/r3gm/RVC_HF/lib/infer_pack/models_onnx.py b/spaces/r3gm/RVC_HF/lib/infer_pack/models_onnx.py deleted file mode 100644 index 963e67b29f828e9fdd096397952054fe77cf3d10..0000000000000000000000000000000000000000 --- a/spaces/r3gm/RVC_HF/lib/infer_pack/models_onnx.py +++ /dev/null @@ -1,819 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from lib.infer_pack import modules -from lib.infer_pack import attentions -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from lib.infer_pack.commons import init_weights -import numpy as np -from lib.infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMsNSFsidM(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - version, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - if version == "v1": - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - else: - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - self.speaker_map = None - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def construct_spkmixmap(self, n_speaker): - self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels)) - for i in range(n_speaker): - self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]])) - self.speaker_map = self.speaker_map.unsqueeze(0) - - def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None): - if self.speaker_map is not None: # [N, S] * [S, B, 1, H] - g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1] - g = g * self.speaker_map # [N, S, B, 1, H] - g = torch.sum(g, dim=1) # [N, 1, B, 1, H] - g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N] - else: - g = g.unsqueeze(0) - g = self.emb_g(g).transpose(1, 2) - - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/raedeXanto/academic-chatgpt-beta/.github/ISSUE_TEMPLATE/bug_report.md b/spaces/raedeXanto/academic-chatgpt-beta/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index ac668766a39892be5bc9e03f3ea626f8b3bf4b57..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: '' -labels: '' -assignees: '' - ---- - -- **(1) Describe the bug 简述** - - -- **(2) Screen Shot 截图** - - -- **(3) Terminal Traceback 终端traceback(如有)** - - -- **(4) Material to Help Reproduce Bugs 帮助我们复现的测试材料样本(如有)** - - - -Before submitting an issue 提交issue之前: -- Please try to upgrade your code. 如果您的代码不是最新的,建议您先尝试更新代码 -- Please check project wiki for common problem solutions.项目[wiki](https://github.com/binary-husky/chatgpt_academic/wiki)有一些常见问题的解决方法 diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Equilibrium Film Completo In Italiano Download Gratuito Hd 720p !!INSTALL!!.md b/spaces/raedeXanto/academic-chatgpt-beta/Equilibrium Film Completo In Italiano Download Gratuito Hd 720p !!INSTALL!!.md deleted file mode 100644 index fd654c65d402910cd6a6b0b3464528744fbf98ee..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Equilibrium Film Completo In Italiano Download Gratuito Hd 720p !!INSTALL!!.md +++ /dev/null @@ -1,18 +0,0 @@ - -

        Equilibrium: il film di fantascienza che sfida il potere dell'emozione

        - -

        Equilibrium è un film del 2002 scritto e diretto da Kurt Wimmer, con protagonisti Christian Bale, Emily Watson e Taye Diggs. Il film racconta la storia di John Preston (Christian Bale), un agente governativo in un futuro in cui le emozioni e l'espressione artistica sono proibite e i cittadini devono assumere quotidianamente delle iniezioni di potenti droghe psicoattive per sopprimere i loro sentimenti. Dopo aver accidentalmente saltato una dose, Preston inizia a provare emozioni, che lo portano a mettere in dubbio la sua moralità e a moderare le sue azioni, cercando di rimanere inosservato dalla società sospetta in cui vive. Infine, aiuta un movimento di resistenza usando le avanzate arti marziali, che gli sono state insegnate dal regime che sta contribuendo a rovesciare.

        -

        Equilibrium film completo in italiano download gratuito hd 720p


        Download Zip ———>>> https://tinourl.com/2uL0Mz



        - -

        Equilibrium è ambientato nel 2072 nella città-stato totalitaria di Libria, fondata dai sopravvissuti della Terza Guerra Mondiale, che attribuisce la causa del conflitto alla capacità umana di provare emozioni. Qualsiasi attività o oggetto che stimoli l'emozione è severamente vietato. Chi viola questa regola viene etichettato come "Trasgressore di Sensi" e condannato a morte. La popolazione è costretta a prendere una iniezione giornaliera di "Prozium II" per sopprimere l'emozione. Libria è governata dal Consiglio del Tetragrammaton, guidato da "Padre", che comunica la propaganda attraverso giganteschi schermi video sparsi per la città. All'apice dell'applicazione della legge ci sono i Chierici del Grammaton, addestrati nell'arte marziale del gun kata. I Chierici fanno frequentemente irruzione nelle case per cercare e distruggere i materiali illegali - arte, letteratura e musica - eseguendo i trasgressori sul posto. Un movimento di resistenza, noto come "Sotterraneo", emerge per rovesciare Padre e il Consiglio del Tetragrammaton.

        - -

        Equilibrium è un film di fantascienza che sfida il potere dell'emozione e la sua influenza sul comportamento umano. Il film esplora i temi della libertà, della ribellione, dell'umanità e dell'etica, ponendo domande sul prezzo da pagare per una società pacifica e ordinata a scapito della creatività e della diversità. Il film si ispira ad opere letterarie come 1984 di George Orwell, Fahrenheit 451 di Ray Bradbury e Il mondo nuovo di Aldous Huxley, oltre che ad altri film di fantascienza come Matrix e The Matrix Reloaded. Il film si distingue anche per le sue scene d'azione coreografate e stilizzate, basate sul gun kata, una tecnica fittizia che combina le armi da fuoco con le arti marziali.

        - -

        Equilibrium ha ricevuto recensioni contrastanti da parte della critica, che lo ha lodato per le sue sequenze d'azione e le sue idee originali, ma lo ha criticato per la sua sceneggiatura debole, i suoi personaggi poco sviluppati e la sua somiglianza con altri film precedenti. Il film ha avuto anche un scarso successo al botteghino, incassando solo 5,3 milioni di dollari a fronte di un budget di 20 milioni. Tuttavia, il film ha acquisito uno status di culto tra gli appassionati del genere e ha influenzato altri lavori successivi.

        - -

        Se sei interessato a vedere Equilibrium, puoi scaricare gratuitamente il film completo in italiano in HD 720p dal link qui sotto. Buona visione!

        -

        - -Equilibrium film completo in italiano download gratuito HD 720p

        cec2833e83
        -
        -
        \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Freedownloadmapdota690cairar The Ultimate Guide to Downloading and Installing Dota 6.90c AI Map.md b/spaces/raedeXanto/academic-chatgpt-beta/Freedownloadmapdota690cairar The Ultimate Guide to Downloading and Installing Dota 6.90c AI Map.md deleted file mode 100644 index 52c5571af4d88f477ea7d8802a69cbd2455b6812..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Freedownloadmapdota690cairar The Ultimate Guide to Downloading and Installing Dota 6.90c AI Map.md +++ /dev/null @@ -1,96 +0,0 @@ - -

        What is freedownloadmapdota690cairar and why you should care

        -

        If you are a fan of Warcraft III, you probably have heard of Dota Allstars, one of the most popular and influential custom maps ever created for the game. Dota Allstars is a multiplayer online battle arena (MOBA) game that pits two teams of five players against each other in a map divided by a river. Each team has a base with a main structure called the Ancient, which they must defend from the enemy while trying to destroy theirs. Each player controls a unique hero with different abilities and items that can be bought with gold earned by killing enemies and creeps (neutral units).

        -

        freedownloadmapdota690cairar


        Downloadhttps://tinourl.com/2uL3pd



        -

        But what is freedownloadmapdota690cairar? It is simply the name of the latest version of Dota Allstars map that you can download for free from the official website of the project. The name consists of several parts: free download map dota 6.90c ai.rar. The first part indicates that it is a free download, the second part indicates that it is a map file for Warcraft III, the third part indicates that it is for Dota Allstars, the fourth part indicates that it is version 6.90c, and the last part indicates that it is compressed in a rar format.

        -

        Why should you care about freedownloadmapdota690cairar? Because it is one of the best ways to enjoy Dota Allstars in its original form, with updated heroes, items, balance changes, graphics, sounds, performance, and compatibility with Ranked Gaming Client (RGC), a platform for competitive Dota Allstars matches. Whether you are a veteran or a newcomer to Dota Allstars, you will find something to love in freedownloadmapdota690cairar.

        -

        How to download and install freedownloadmapdota690cairar

        -

        Downloading and installing freedownloadmapdota690cairar is very easy and straightforward. Just follow these simple steps:

        -
          -
        1. Visit the official website of Dota Allstars at https://www.dota682.com/ and find the latest version of the map (DotA_Allstars_7.04b0.w3x) under the Getdota map section.
        2. -
        3. Download the map file (DotA_Allstars_7.04b0.w3x) by clicking on the download link or button.
        4. -
        5. Extract the map file (DotA_Allstars_7.04b0.w3x) from the compressed file (freedownloadmapdota690cairar) using a program like WinRAR or 7-Zip.
        6. -
        7. Copy or move the map file (DotA_Allstars_7.04b0.w3x) to your Warcraft III folder (usually located at C:\Program Files\Warcraft III\Maps\Download).
        8. -
        9. Launch Warcraft III and select Dota Allstars from the custom game menu.
        10. -
        -

        Congratulations! You have successfully downloaded and installed freedownloadmapdota690cairar.

        -

        What are the features and benefits of freedownloadmapdota690cairar

        -

        Freedownloadmapdota690cairar is not just any ordinary Dota Allstars map. It is a map that has been edited and updated by Icefrog, the original developer of Dota Allstars, and DracoL1ch, a talented modder who has taken over the project since Icefrog joined Valve to work on Dota 2. Freedownloadmapdota690cairar has many features and benefits that make it superior to other versions of Dota Allstars. Here are some of them:

        -
          -
        • Updated heroes, items, and balance changes: Freedownloadmapdota690cairar has over 120 heroes and over 200 items that have been tweaked and balanced to provide a fair and fun gameplay experience for all players. Some heroes have been reworked or added new abilities, some items have been modified or added new effects, some mechanics have been changed or improved to make them more consistent or logical.
        • -
        • Compatible with Ranked Gaming Client (RGC): Freedownloadmapdota690cairar is compatible with RGC, a platform for competitive Dota Allstars matches that allows players to join or create rooms with different game modes, regions, languages, skill levels, etc., as well as track their stats, rankings, replays, etc.
        • -
        • Enhanced graphics, sounds, and performance: Freedownloadmapdota690cairar has enhanced graphics, sounds, and performance that make it more appealing and enjoyable to play than other versions of Dota Allstars. Some examples are: improved models, textures, animations, icons, effects; improved sounds quality; improved loading time; improved stability; improved compatibility; etc.
        • -
        -

        How to play and enjoy freedownloadmapdota690cairar

        -

        Freedownloadmapdota690cairar is easy to play but hard to master. It requires skill, strategy,

        How to play and enjoy freedownloadmapdota690cairar

        -

        Freedownloadmapdota690cairar is easy to play but hard to master. It requires skill, strategy, teamwork, and practice to become a good player. Here are some tips and tricks that can help you play and enjoy freedownloadmapdota690cairar better:

        -
          -
        • Choose a hero that suits your playstyle and role in the team: Freedownloadmapdota690cairar has over 120 heroes that belong to three main attributes: strength, agility, and intelligence. Each hero has different abilities, strengths, weaknesses, and roles in the game. Some heroes are more suited for carrying (dealing damage), some for supporting (helping allies), some for initiating (starting fights), some for ganking (killing enemies), some for pushing (destroying towers), etc. You should choose a hero that matches your personal preference and the needs of your team. You can also random a hero if you want to try something new or challenge yourself.
        • -
        • Communicate and cooperate with your teammates to achieve objectives and win fights: Freedownloadmapdota690cairar is a team game that requires coordination and cooperation among players. You should communicate with your teammates using chat, voice, or pings to share information, plan strategies, request help, warn about dangers, etc. You should also cooperate with your teammates by following their calls, backing them up, sharing resources, etc. You should avoid flaming, blaming, or trolling your teammates as it will only ruin the game for everyone.
        • -
        • Learn from your mistakes and improve your skills by watching replays and guides: Freedownloadmapdota690cairar is a complex game that has a steep learning curve. You will make mistakes and lose games as you play, but you should not give up or get discouraged. Instead, you should learn from your mistakes and try to improve your skills by watching replays of your own or other players' games, analyzing what went wrong or right, what you could have done better or differently, etc. You can also watch guides from pro players or experts on YouTube or other platforms to learn tips and tricks on how to play certain heroes, items, strategies, etc.
        • -
        -

        How to get more out of freedownloadmapdota690cairar

        -

        Freedownloadmapdota690cairar is not only a game but also a community. There are many resources and opportunities that you can use to get more out of freedownloadmapdota690cairar. Here are some of them:

        -
          -
        • Visit the official forum of Dota Allstars for news, updates, feedback, and support: The official forum of Dota Allstars is the best place to get the latest news and updates on the map development, as well as to give feedback or suggestions to the developers. You can also find support for any technical issues or bugs that you encounter while playing the map.
        • -
        • Join the RGC community for tournaments, rankings, chat, and more: RGC is a platform for competitive Dota Allstars matches that allows you to join or create rooms with different game modes, regions, languages, skill levels, etc., as well as track your stats, rankings, replays, etc. You can also join tournaments hosted by RGC or other organizers for prizes and glory. You can also chat with other players in the lobby or in-game using RGC's chat system.
        • -
        • Check out other Dota Allstars maps and mods for more variety and fun: Freedownloadmapdota690cairar is not the only Dota Allstars map available. There are many other maps and mods that offer different features and gameplay experiences for Dota Allstars fans. Some examples are: Dota 6.83d Mobaz by Dota68, a map that adds new heroes and items inspired by other MOBA games; Dota 6.88d2 lod by Dota68, a map that allows you to pick and mix skills from different heroes; DotA IMBA by Firetoad, a map that makes everything imbalanced and crazy; etc.
        • -
        -

        Conclusion

        -

        Freedownloadmapdota690cairar is one of the best ways to enjoy Dota Allstars in its original form,

        Conclusion

        -

        Freedownloadmapdota690cairar is one of the best ways to enjoy Dota Allstars in its original form, with updated heroes, items, balance changes, graphics, sounds, performance, and compatibility with RGC. It is a map that offers a fair and fun gameplay experience for all players, whether they are veterans or newcomers to Dota Allstars. It is a map that requires skill, strategy, teamwork, and practice to become a good player. It is also a map that has a strong community and many resources and opportunities to get more out of it.

        -

        How to download map dota 6.90c ai for free
        -Dota 6.90c ai map free download mediafire
        -Free download map dota 6.90c ai.rar full version
        -Dota allstars map download 6.90c ai
        -Download dota map 6.90c ai with cheats
        -Dota 6.90c ai map latest update free download
        -Free download map dota 6.90c ai for warcraft 3
        -Dota 6.90c ai map features and changelog
        -Download dota map 6.90c ai offline
        -Dota 6.90c ai map best heroes and strategies
        -Free download map dota 6.90c ai for mac
        -Dota 6.90c ai map bugs and fixes
        -Download dota map 6.90c ai online
        -Dota 6.90c ai map tips and tricks
        -Free download map dota 6.90c ai for windows 10
        -Dota 6.90c ai map reviews and ratings
        -Download dota map 6.90c ai with bots
        -Dota 6.90c ai map custom modes and settings
        -Free download map dota 6.90c ai for android
        -Dota 6.90c ai map comparison with other versions
        -Download dota map 6.90c ai without survey
        -Dota 6.90c ai map installation guide and tutorial
        -Free download map dota 6.90c ai for linux
        -Dota 6.90c ai map gameplay and screenshots
        -Download dota map 6.90c ai with soundtracks
        -Dota 6.90c ai map secrets and easter eggs
        -Free download map dota 6.90c ai for iphone
        -Dota 6.90c ai map balance and difficulty
        -Download dota map 6.90c ai with skins and models
        -Dota 6.90c ai map history and development
        -Free download map dota 6.90c ai for pc
        -Dota 6.90c ai map system requirements and compatibility
        -Download dota map 6.90c ai with mods and addons
        -Dota 6.90c ai map fun and challenges
        -Free download map dota 6.90c ai for laptop
        -Dota 6.90c ai map support and feedback
        -Download dota map 6.90c ai with patch notes
        -Dota 6.90c ai map advantages and disadvantages
        -Free download map dota 6.90c ai for tablet
        -Dota 6.90c ai map alternatives and recommendations

        -

        If you are a fan of Warcraft III or MOBA games, you should definitely try freedownloadmapdota690cairar. You will not regret it. You will find something to love in this map. You will have a blast playing this map.

        -

        What are you waiting for? Download freedownloadmapdota690cairar now and join the millions of players who play Dota Allstars every day. You will not be disappointed.

        -

        Thank you for reading this article. I hope you found it informative and helpful. If you did, please share it with your friends and leave a comment below. I would love to hear your thoughts and experiences with freedownloadmapdota690cairar.

        -

        FAQs

        -

        Here are some frequently asked questions about freedownloadmapdota690cairar:

        -
          -
        1. What is the difference between Dota Allstars and Dota 2?: Dota Allstars is the original custom map for Warcraft III that started the MOBA genre. Dota 2 is the standalone sequel developed by Valve that has improved graphics, features, and support. Both games have similar gameplay and heroes, but they also have some differences in balance, mechanics, and design.
        2. -
        3. Where can I find more information about Dota Allstars?: You can find more information about Dota Allstars on the official website, the official forum, the official wiki, or other websites like StrategyWiki or Trusted Reviews.
        4. -
        5. How can I play Dota Allstars online with other players?: You can play Dota Allstars online with other players by using platforms like RGC, Garena, Battle.net, etc. You can also play offline with bots or friends using LAN or Hamachi.
        6. -
        7. What are some tips for beginners who want to play Dota Allstars?: Some tips for beginners who want to play Dota Allstars are: choose a simple hero that fits your role and team; learn the basics of the game such as laning, farming, warding, etc.; communicate and cooperate with your teammates; watch replays and guides to improve your skills; have fun and don't give up.
        8. -
        9. How can I contact the developers of freedownloadmapdota690cairar?: You can contact the developers of freedownloadmapdota690cairar by sending them an email at icefrog@gmail.com or dracol1ch.dev@gmail.com. You can also follow them on Twitter at @IceFrog or @DracoL1ch.
        10. -
        -

        0a6ba089eb
        -
        -
        \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/HD 2013 LED Software Free 48 Download and Install the Latest Version of Huidu LED Control System.md b/spaces/raedeXanto/academic-chatgpt-beta/HD 2013 LED Software Free 48 Download and Install the Latest Version of Huidu LED Control System.md deleted file mode 100644 index 25095268aa618baf3c505ff122b065edfa4a5b1f..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/HD 2013 LED Software Free 48 Download and Install the Latest Version of Huidu LED Control System.md +++ /dev/null @@ -1,144 +0,0 @@ -
        -

        WBS Schedule Pro 5.1 Crack 16: A Comprehensive Review

        -

        If you are looking for a powerful and easy-to-use project management software that can help you plan and manage your projects effectively, you might want to check out WBS Schedule Pro 5.1 Crack 16. This software combines a Work Breakdown Structure (WBS) Chart, a Network Chart, a Gantt Chart, a Task Sheet and many other features to create a comprehensive tool for project planning and execution.

        -

        wbs schedule pro 5 1 crack 16


        Download > https://tinourl.com/2uL1x2



        -

        In this article, we will review WBS Schedule Pro 5.1 Crack 16 in detail and show you how to install and crack it on your computer. We will also discuss the pros and cons of using this software and answer some frequently asked questions about it.

        -

        What is WBS Schedule Pro?

        -

        WBS Schedule Pro is a Windows-based project management software that was developed by Critical Tools, Inc. It is designed to help project managers and teams to brainstorm, plan, schedule, track and report on their projects using various visual tools.

        -

        WBS Schedule Pro can be used as a standalone application or as an add-on to Microsoft Project. It can also import and export data from other project management software such as Excel, PowerPoint, MindManager, etc.

        -

        Features and benefits of WBS Schedule Pro

        -

        WBS Schedule Pro has many features and benefits that make it a useful tool for project management. Some of them are:

        -

        Work Breakdown Structure (WBS) Chart

        -

        A WBS Chart is a graphical representation of the project scope that shows how the project is broken down into smaller and manageable pieces. It helps to define the project objectives, deliverables, tasks, subtasks and dependencies.

        -

        With WBS Schedule Pro, you can create WBS Charts easily by using drag-and-drop, cut-and-paste, undo-redo and other intuitive features. You can also customize the appearance of the chart by changing the colors, shapes, fonts, symbols, etc.

        -

        Network Chart

        -

        A Network Chart is a graphical representation of the project schedule that shows how the tasks are sequenced and linked together. It helps to determine the critical path, the slack time, the milestones and the dependencies.

        -

        With WBS Schedule Pro, you can create Network Charts automatically from the WBS Charts or manually by using the built-in drawing tools. You can also adjust the layout of the chart by using zooming, panning, scrolling and other features.

        -

        Gantt Chart

        -

        A Gantt Chart is a graphical representation of the project timeline that shows when the tasks start and finish, how long they take and how they overlap with each other. It helps to monitor the progress, status and performance of the project.

        -

        wbs schedule pro 5.1 activation code
        -wbs schedule pro 5.1 serial key
        -wbs schedule pro 5.1 license key
        -wbs schedule pro 5.1 keygen
        -wbs schedule pro 5.1 patch
        -wbs schedule pro 5.1 full version
        -wbs schedule pro 5.1 free download
        -wbs schedule pro 5.1 torrent
        -wbs schedule pro 5.1 crack download
        -wbs schedule pro 5.1 crack file
        -wbs schedule pro 5.1 crack only
        -wbs schedule pro 5.1 crack software
        -wbs schedule pro 5.1 crack windows
        -wbs schedule pro 5.1 crack mac
        -wbs schedule pro 5.1 crack linux
        -wbs schedule pro 5.1 crack online
        -wbs schedule pro 5.1 crack offline
        -wbs schedule pro 5.1 crack installer
        -wbs schedule pro 5.1 crack setup
        -wbs schedule pro 5.1 crack exe
        -wbs schedule pro 5.1 crack zip
        -wbs schedule pro 5.1 crack rar
        -wbs schedule pro 5.1 crack iso
        -wbs schedule pro 5.1 crack portable
        -wbs schedule pro 5.1 crack mega
        -wbs schedule pro 5.1 crack mediafire
        -wbs schedule pro 5.1 crack google drive
        -wbs schedule pro 5.1 crack dropbox
        -wbs schedule pro 5.1 crack zippyshare
        -wbs schedule pro 5.1 crack direct link
        -wbs schedule pro 5.1 crack no survey
        -wbs schedule pro 5.1 crack no password
        -wbs schedule pro 5.1 crack no virus
        -wbs schedule pro 5.1 crack working
        -wbs schedule pro 5.1 crack latest
        -wbs schedule pro 5.1 crack updated
        -wbs schedule pro 5.1 crack review
        -wbs schedule pro 5.1 crack tutorial
        -wbs schedule pro 5.1 crack guide
        -wbs schedule pro 5.1 crack tips
        -wbs schedule pro 5.1 crack tricks
        -wbs schedule pro 5.1 crack hacks
        -wbs schedule pro 5.1 crack cheats
        -wbs schedule pro 5.1 crack mods
        -wbs schedule pro 5.1 crack features
        -wbs schedule pro 5.1 crack benefits
        -wbs schedule pro 5.1 crack advantages
        -wbs schedule pro 5.1 crack disadvantages
        -wbs schedule pro 5.1 crack alternatives
        -wbs schedule pro 5.1 crack comparison

        -

        With WBS Schedule Pro, you can create Gantt Charts easily by using drag-and-drop, resizing, moving and other features. You can also customize the appearance of the chart by changing the colors, bars, lines, labels, etc.

        -

        Task Sheet

        -

        A Task Sheet is a tabular representation of the project data that shows the details of each task such as name, duration, start date, finish date, resources, cost, etc. It helps to edit, sort, filter and analyze the project information.

        -

        With WBS Schedule Pro, you can create Task Sheets easily by using data entry forms, drop-down lists, check boxes and other features. You can also customize the appearance of the sheet by changing the columns, rows, fonts, formats, etc.

        -

        How to install and crack WBS Schedule Pro 5.1?

        -

        If you want to use WBS Schedule Pro 5.1 for free without any limitations or restrictions, you need to install and crack it on your computer. Here are the steps to do so:

        -

        System requirements

        -

        Before installing WBS Schedule Pro 5.1 Crack 16 on your computer, you need to make sure that your system meets the following requirements:

        -
          -
        • Operating system: Windows XP/Vista/7/8/10
        • -
        • Processor: Pentium IV or higher
        • -
        • Memory: 512 MB RAM or more
        • -
        • Disk space: 100 MB free or more
        • -
        • Internet connection: Required for activation
        • -
        -

        Installation steps

        -
          -
        1. Download WBS Schedule Pro 5.1 Crack 16 from one of these links:
        2. -
        3. Extract the zip file using WinRAR or any other file extractor
        4. -
        5. Run the setup file (wbspro.exe) as administrator
        6. -
        7. Follow the instructions on the screen to complete the installation
        8. -
        9. Do not launch or run the program after installation
        10. -
        -

        Crack steps

        -
          -
        1. Navigate to the installation folder (usually C:\Program Files\Critical Tools\WBS Schedule Pro)
        2. -
        3. Copy the crack file (wbspro.exe) from the extracted folder
        4. -
        5. Paste and replace it in the installation folder
        6. -
        7. Run the program as administrator
        8. -
        9. Enter any name and serial number when prompted (for example: Name: John Doe, Serial: ABCD-EFGH-IJKL-MNOP)
        10. -
        11. Click on Activate Online button
        12. -
        13. You will see a message saying Activation Successful
        14. -
        15. Congratulations! You have successfully installed and cracked WBS Schedule Pro 5.1 Crack 16 on your computer
        16. -
        -

        Pros and cons of WBS Schedule Pro 5.1 Crack 16

        -

        Like any other software, WBS Schedule Pro 5.1 Crack 16 has its own pros and cons that you should be aware of before using it. Here are some of them:

        -

        Pros

        -
          -
        • It is easy to use and learn
        • It has many features and functions that can help you plan and manage your projects effectively
        • It has a user-friendly interface that can be customized according to your preferences
        • It can work with Microsoft Project and other project management software
        • It can export and import data in various formats such as JPG, PNG, HTML, BMP, etc.
        • It can create professional-looking charts and reports that can impress your clients and stakeholders
        -

        Cons

        -
          -
        • It is not free and requires activation
        • It may not be compatible with some newer versions of Windows or Microsoft Project
        • It may have some bugs or errors that can affect its performance or functionality
        • It may not support some advanced features or options that are available in other project management software
        • It may be detected as a virus or malware by some antivirus programs or firewalls
        -

        Conclusion

        -

        In conclusion, WBS Schedule Pro 5.1 Crack 16 is a powerful and easy-to-use project management software that can help you plan and manage your projects effectively. It combines a Work Breakdown Structure (WBS) Chart, a Network Chart, a Gantt Chart, a Task Sheet and many other features to create a comprehensive tool for project planning and execution.

        -

        If you want to use this software for free without any limitations or restrictions, you need to install and crack it on your computer by following the steps mentioned above. However, you should also be aware of its pros and cons before using it.

        -

        We hope this article has been helpful for you. If you have any questions or comments about WBS Schedule Pro 5.1 Crack 16,

        Frequently Asked Questions (FAQs)

        -

        Here are some of the frequently asked questions about WBS Schedule Pro 5.1 Crack 16 that you might find useful:

        -
          -
        1. What is the difference between WBS Schedule Pro and Microsoft Project?
        2. -

          WBS Schedule Pro and Microsoft Project are both project management software that can help you plan and manage your projects. However, they have some differences in terms of features, functions and interface. For example, WBS Schedule Pro focuses more on creating visual tools such as WBS Charts, Network Charts and Gantt Charts, while Microsoft Project focuses more on creating detailed schedules, budgets and resources. WBS Schedule Pro has a simpler and more intuitive interface than Microsoft Project, which can be more complex and overwhelming for some users. WBS Schedule Pro can also work as an add-on to Microsoft Project, which means you can use both software together to enhance your project management capabilities.

          -
        3. Is WBS Schedule Pro 5.1 Crack 16 safe to use?
        4. -

          WBS Schedule Pro 5.1 Crack 16 is a cracked version of the original software that allows you to use it for free without any limitations or restrictions. However, using cracked software can have some risks and disadvantages. For example, cracked software may not be updated or supported by the developers, which means you may miss out on some new features or bug fixes. Cracked software may also contain viruses or malware that can harm your computer or compromise your security. Cracked software may also violate the intellectual property rights of the developers, which can lead to legal consequences. Therefore, we do not recommend using cracked software and advise you to purchase the original software from the official website if you want to use it safely and legally.

          -
        5. How can I get support or help for WBS Schedule Pro 5.1 Crack 16?
        6. -

          Since WBS Schedule Pro 5.1 Crack 16 is a cracked version of the original software, you may not be able to get support or help from the developers or the official website. However, you may be able to find some online resources or communities that can provide you with some tips or solutions for using WBS Schedule Pro 5.1 Crack 16. For example, you can visit the following websites:

          -
            -
          • : This is a collection of WBS Schedule Pro 5.1 Crack 16 on OpenSea, where you can buy, sell or trade them with other users.
          • -
          • : This is a Trello board of WBS Schedule Pro 5.1 Crack 16, where you can see the tasks and progress of the project.
          • -
          • : This is a npm package of WBS Schedule Pro 5.1 Crack 16 ((new)), where you can download and install it on your project.
          • -
          -
        7. What are some alternatives to WBS Schedule Pro 5.1 Crack 16?
        8. -

          If you are looking for some alternatives to WBS Schedule Pro 5.1 Crack 16 that are free or affordable, you may want to check out some of these options:

          -
            -
          • MindMeister: This is an online mind mapping tool that can help you create and share visual project plans and brainstorm ideas.
          • -
          • Smartsheet: This is an online project management platform that can help you create and manage tasks, schedules, budgets and resources.
          • -
          • GanttPRO: This is an online Gantt chart software that can help you create and manage project timelines and dependencies.
          • -
          -
        9. How can I learn more about WBS Schedule Pro 5.1 Crack 16?
        10. -

          If you want to learn more about WBS Schedule Pro 5.1 Crack 16, you can visit some of these websites:

          -
            -
          • : This is a website that provides a detailed review and download link of WBS Schedule Pro 5.1 Crack 16.
          • -
          • : This is a website that provides a tutorial and download link of WBS Schedule Pro v5.1.0022 (WBS Version) + Crack.
          • -
          • Critical Tools: This is the official website of Critical Tools, Inc., the developer of WBS Schedule Pro.
          • -
          -

          0a6ba089eb
          -
          -
          \ No newline at end of file diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/abbrev/README.md b/spaces/rayan-saleh/whisper2notion/server/node_modules/abbrev/README.md deleted file mode 100644 index 99746fe67c4620f767749290e0a0863721861447..0000000000000000000000000000000000000000 --- a/spaces/rayan-saleh/whisper2notion/server/node_modules/abbrev/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# abbrev-js - -Just like [ruby's Abbrev](http://apidock.com/ruby/Abbrev). - -Usage: - - var abbrev = require("abbrev"); - abbrev("foo", "fool", "folding", "flop"); - - // returns: - { fl: 'flop' - , flo: 'flop' - , flop: 'flop' - , fol: 'folding' - , fold: 'folding' - , foldi: 'folding' - , foldin: 'folding' - , folding: 'folding' - , foo: 'foo' - , fool: 'fool' - } - -This is handy for command-line scripts, or other cases where you want to be able to accept shorthands. diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/Z3X-Box-3411-PATCHED-Crack.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/Z3X-Box-3411-PATCHED-Crack.md deleted file mode 100644 index 0c11d58cc8a87711c75a48fca62336f181e2ac78..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/Z3X-Box-3411-PATCHED-Crack.md +++ /dev/null @@ -1,59 +0,0 @@ -## Z3X Box 34.11 Crack - - - -**Click Here ✒ [https://www.google.com/url?q=https%3A%2F%2Furloso.com%2F2twEns&sa=D&sntz=1&usg=AOvVaw2SBhNBZOeIMVGpOcH1i92w](https://www.google.com/url?q=https%3A%2F%2Furloso.com%2F2twEns&sa=D&sntz=1&usg=AOvVaw2SBhNBZOeIMVGpOcH1i92w)** - - - -# Z3X Box 34.11 Crack: A Powerful Tool for Samsung Devices - - - -Z3X Box 34.11 Crack is a software that allows you to unlock, flash, repair, and backup Samsung devices. It is a professional tool that supports over 600 Samsung models and has many features and functions. With Z3X Box 34.11 Crack, you can easily bypass FRP lock, reset EFS, disable factory mode, wipe EFS/NVM, reset MSL, read and write EFS, QCN, SEC, and CERT files, and more. - - - -Z3X Box 34.11 Crack is compatible with Windows XP, Vista, 7, 8, 8.1, and 10. It requires a Z3X box hardware device to work. You can download the software from the official website or from various online sources. However, you should be careful about the source of the download as some files may contain viruses or malware that can harm your computer or device. - - - -To install Z3X Box 34.11 Crack, you need to extract the downloaded files to a folder on your computer. Then, you need to install the necessary drivers for your Samsung device. You can find the drivers on the Z3X box website or on the internet. After installing the drivers, you can run the Z3X Samsung Tool Pro.exe file and connect your Samsung device to the Z3X box using a USB cable. You can then select your device model and perform various operations using the software. - - - -Z3X Box 34.11 Crack is a powerful and reliable tool for Samsung devices. It can help you solve many problems and issues with your device. However, you should use it with caution and at your own risk as some operations may damage your device or void your warranty. You should also backup your data before using the software as some operations may erase your data. - - - -Z3X Box 34.11 Crack has many benefits for Samsung device users. It can help you unlock your device if you forgot your password, PIN, pattern, or fingerprint. It can also help you remove the Google account verification or FRP lock that prevents you from using your device after a factory reset. You can also use it to flash your device with a custom ROM or firmware that suits your needs and preferences. - - - -Z3X Box 34.11 Crack can also help you repair your device if it is damaged or malfunctioning. It can fix issues such as bootloop, network lock, IMEI null, baseband unknown, DRK error, and more. It can also restore your device to its original state if you want to sell it or send it for warranty service. You can also use it to backup and restore your data and files in case of data loss or corruption. - - - -Z3X Box 34.11 Crack is a user-friendly and easy-to-use tool for Samsung devices. It has a simple and intuitive interface that guides you through the steps of each operation. It also has a support section that provides you with useful information and tips on how to use the software. You can also contact the Z3X team or the online community for help and support if you encounter any problems or difficulties. - - - -If you want to use Z3X Box 34.11 Crack, you need to follow some simple steps. First, you need to download the software from a reliable source and extract it to a folder on your computer. Then, you need to install the Z3X box drivers and the Samsung USB drivers on your computer. You can find the drivers on the Z3X box website or on the internet. - - - -Next, you need to run the Z3X Samsung Tool Pro.exe file as administrator and connect your Z3X box to your computer using a USB cable. You will see a message that says "Card not found" or "Card not added". This is normal because you are using a cracked version of the software. You can ignore this message and proceed to the next step. - - - -Now, you need to connect your Samsung device to the Z3X box using another USB cable. Make sure your device is in download mode or recovery mode depending on the operation you want to perform. You can enter download mode by pressing and holding the volume down, home, and power buttons simultaneously. You can enter recovery mode by pressing and holding the volume up, home, and power buttons simultaneously. - - - -Once your device is connected, you will see your device model and information on the software interface. You can then select your device model from the drop-down menu and choose the operation you want to perform from the tabs. You can also use the search function to find your device model or operation. You can then click on the start button or execute button to begin the operation. - - - -The software will show you the progress and status of the operation on the log window. You can also see the instructions and tips on how to perform the operation on the support window. You should wait until the operation is completed and do not disconnect your device or Z3X box during the process. Once the operation is done, you will see a message that says "Done" or "Success". You can then disconnect your device and Z3X box and restart your device. - - 1b8d091108 \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/After Effect Cs4 Portable Free Download ((LINK)).md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/After Effect Cs4 Portable Free Download ((LINK)).md deleted file mode 100644 index 654e3e255b9dd0b6ba3f1c72e39771100b6a7439..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/After Effect Cs4 Portable Free Download ((LINK)).md +++ /dev/null @@ -1,11 +0,0 @@ -
          -

          adobe after effects cs4 portable is a powerful software for digital visual effects, motion graphics, and compositing applications. you can also edit images, video, and audio clips. for those people who are not familiar with this software, they can use this software to do other works of motion graphics. after effects cs4 portable can also be used for audio editing, video editing, and text editing. after effects cs4 portable can also be used for other purposes.

          -

          adobe after effects cs4 portable is a professional digital compositing and visual effects software. this software can be used for editing video or audio clips. for those people who are not familiar with this software, it can be used to do other works of motion graphics.

          -

          after effect cs4 portable free download


          Download ⚙⚙⚙ https://urlgoal.com/2uCL1T



          -

          adobe after effects cs4 portable is a professional digital visual effects, motion graphics, and compositing software. you can use it for editing images, video, and audio clips. for those people who are not familiar with this software, it can be used to do other works of motion graphics.

          -

          you can use this software to edit, animate, and render a motion video. after effects cs4 portable can be used for creating professional quality animated projects for websites, video games, television, or any other interactive media.

          -

          adobe after effects cs4 portable is a good application for the people who are starting as a motion graphics artist. you can use this software to create animations, videos, add visual effects to your video clips and do more with this creative and productive application.

          -

          after effects cs4 includes these new features (among others):

          -

          • create amazing motion graphics, videos, and 3d animations
          • expand your vision into motion graphics
          • create a comprehensive collection of motion graphics tools
          • view your animation from multiple perspectives in after effects
          • edit and render in a multifaceted, collaborative workspace with a user-friendly interface
          • plunge deeper into your collaboration with the ability to interact with multiple authors
          • make it easy to share your finished projects with those who want to build on your work
          899543212b
          -
          -
          \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Majalah Tempo Versi Pdf LINK.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Majalah Tempo Versi Pdf LINK.md deleted file mode 100644 index bae058e9c68ea2ed9de7fe8240168482e98f0cca..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Majalah Tempo Versi Pdf LINK.md +++ /dev/null @@ -1,6 +0,0 @@ -

          download majalah tempo versi pdf


          Download Zip ✵✵✵ https://urlgoal.com/2uCMPf



          - -ecmq.maavomom.site - Garageband 10.2 0 manual. ... itu saat ada majalah Kompas Rates, Tempo Edisi Khusus Soeharto Download Majalah Tempo Versi Pdf . 1fdad05405
          -
          -
          -

          diff --git a/spaces/riccorl/relik-entity-linking/app.py b/spaces/riccorl/relik-entity-linking/app.py deleted file mode 100644 index 8501d81722d7d022ea3121f7fe81c377afc7979e..0000000000000000000000000000000000000000 --- a/spaces/riccorl/relik-entity-linking/app.py +++ /dev/null @@ -1,237 +0,0 @@ -import os -import re -import time -from pathlib import Path - -import requests -import streamlit as st -from spacy import displacy -from streamlit_extras.badges import badge -from streamlit_extras.stylable_container import stylable_container - -# RELIK = os.getenv("RELIK", "localhost:8000/api/entities") - -import random - -from relik.inference.annotator import Relik - - -def get_random_color(ents): - colors = {} - random_colors = generate_pastel_colors(len(ents)) - for ent in ents: - colors[ent] = random_colors.pop(random.randint(0, len(random_colors) - 1)) - return colors - - -def floatrange(start, stop, steps): - if int(steps) == 1: - return [stop] - return [ - start + float(i) * (stop - start) / (float(steps) - 1) for i in range(steps) - ] - - -def hsl_to_rgb(h, s, l): - def hue_2_rgb(v1, v2, v_h): - while v_h < 0.0: - v_h += 1.0 - while v_h > 1.0: - v_h -= 1.0 - if 6 * v_h < 1.0: - return v1 + (v2 - v1) * 6.0 * v_h - if 2 * v_h < 1.0: - return v2 - if 3 * v_h < 2.0: - return v1 + (v2 - v1) * ((2.0 / 3.0) - v_h) * 6.0 - return v1 - - # if not (0 <= s <= 1): raise ValueError, "s (saturation) parameter must be between 0 and 1." - # if not (0 <= l <= 1): raise ValueError, "l (lightness) parameter must be between 0 and 1." - - r, b, g = (l * 255,) * 3 - if s != 0.0: - if l < 0.5: - var_2 = l * (1.0 + s) - else: - var_2 = (l + s) - (s * l) - var_1 = 2.0 * l - var_2 - r = 255 * hue_2_rgb(var_1, var_2, h + (1.0 / 3.0)) - g = 255 * hue_2_rgb(var_1, var_2, h) - b = 255 * hue_2_rgb(var_1, var_2, h - (1.0 / 3.0)) - - return int(round(r)), int(round(g)), int(round(b)) - - -def generate_pastel_colors(n): - """Return different pastel colours. - - Input: - n (integer) : The number of colors to return - - Output: - A list of colors in HTML notation (eg.['#cce0ff', '#ffcccc', '#ccffe0', '#f5ccff', '#f5ffcc']) - - Example: - >>> print generate_pastel_colors(5) - ['#cce0ff', '#f5ccff', '#ffcccc', '#f5ffcc', '#ccffe0'] - """ - if n == 0: - return [] - - # To generate colors, we use the HSL colorspace (see http://en.wikipedia.org/wiki/HSL_color_space) - start_hue = 0.0 # 0=red 1/3=0.333=green 2/3=0.666=blue - saturation = 1.0 - lightness = 0.9 - # We take points around the chromatic circle (hue): - # (Note: we generate n+1 colors, then drop the last one ([:-1]) because - # it equals the first one (hue 0 = hue 1)) - return [ - "#%02x%02x%02x" % hsl_to_rgb(hue, saturation, lightness) - for hue in floatrange(start_hue, start_hue + 1, n + 1) - ][:-1] - - -def set_sidebar(css): - white_link_wrapper = "{}" - with st.sidebar: - st.markdown(f"", unsafe_allow_html=True) - st.image( - "http://nlp.uniroma1.it/static/website/sapienza-nlp-logo-wh.svg", - use_column_width=True, - ) - st.markdown("## ReLiK") - st.write( - f""" - - {white_link_wrapper.format("#", "  Paper")} - - {white_link_wrapper.format("https://github.com/SapienzaNLP/relik", "  GitHub")} - - {white_link_wrapper.format("https://hub.docker.com/repository/docker/sapienzanlp/relik", "  Docker Hub")} - """, - unsafe_allow_html=True, - ) - st.markdown("## Sapienza NLP") - st.write( - f""" - - {white_link_wrapper.format("https://nlp.uniroma1.it", "  Webpage")} - - {white_link_wrapper.format("https://github.com/SapienzaNLP", "  GitHub")} - - {white_link_wrapper.format("https://twitter.com/SapienzaNLP", "  Twitter")} - - {white_link_wrapper.format("https://www.linkedin.com/company/79434450", "  LinkedIn")} - """, - unsafe_allow_html=True, - ) - - -def get_el_annotations(response): - el_link_wrapper = " {}" - # swap labels key with ents - ents = [ - { - "start": l.start, - "end": l.end, - "label": el_link_wrapper.format(l.label.replace(" ", "_"), l.label), - } - for l in response.labels - ] - dict_of_ents = {"text": response.text, "ents": ents} - label_in_text = set(l["label"] for l in dict_of_ents["ents"]) - options = {"ents": label_in_text, "colors": get_random_color(label_in_text)} - return dict_of_ents, options - - -@st.cache_resource() -def load_model(): - return Relik( - question_encoder="/home/user/app/models/relik-retriever-small-aida-blink-pretrain-omniencoder/question_encoder", - document_index="/home/user/app/models/relik-retriever-small-aida-blink-pretrain-omniencoder/document_index_filtered", - reader="/home/user/app/models/relik-reader-aida-deberta-small", - top_k=100, - window_size=32, - window_stride=16, - candidates_preprocessing_fn="relik.inference.preprocessing.wikipedia_title_and_openings_preprocessing", - ) - - -def set_intro(css): - # intro - st.markdown("# ReLik") - st.markdown( - "### Retrieve, Read and LinK: Fast and Accurate Entity Linking and Relation Extraction on an Academic Budget" - ) - # st.markdown( - # "This is a front-end for the paper [Universal Semantic Annotator: the First Unified API " - # "for WSD, SRL and Semantic Parsing](https://www.researchgate.net/publication/360671045_Universal_Semantic_Annotator_the_First_Unified_API_for_WSD_SRL_and_Semantic_Parsing), which will be presented at LREC 2022 by " - # "[Riccardo Orlando](https://riccorl.github.io), [Simone Conia](https://c-simone.github.io/), " - # "[Stefano Faralli](https://corsidilaurea.uniroma1.it/it/users/stefanofaralliuniroma1it), and [Roberto Navigli](https://www.diag.uniroma1.it/navigli/)." - # ) - badge(type="github", name="sapienzanlp/relik") - badge(type="pypi", name="relik") - - -def run_client(): - with open(Path(__file__).parent / "style.css") as f: - css = f.read() - - st.set_page_config( - page_title="ReLik", - page_icon="🦮", - layout="wide", - ) - set_sidebar(css) - set_intro(css) - - # text input - text = st.text_area( - "Enter Text Below:", - value="Michael Jordan was one of the best players in the NBA.", - height=200, - max_chars=1500, - ) - - with stylable_container( - key="annotate_button", - css_styles=""" - button { - background-color: #802433; - color: white; - border-radius: 25px; - } - """, - ): - submit = st.button("Annotate") - # submit = st.button("Run") - - if "relik_model" not in st.session_state.keys(): - st.session_state["relik_model"] = load_model() - relik_model = st.session_state["relik_model"] - - # ReLik API call - if submit: - text = text.strip() - if text: - st.markdown("####") - st.markdown("#### Entity Linking") - with st.spinner(text="In progress"): - response = relik_model(text) - # response = requests.post(RELIK, json=text) - # if response.status_code != 200: - # st.error("Error: {}".format(response.status_code)) - # else: - # response = response.json() - - # st.markdown("##") - dict_of_ents, options = get_el_annotations(response=response) - display = displacy.render( - dict_of_ents, manual=True, style="ent", options=options - ) - display = display.replace("\n", " ") - # heurstic, prevents split of annotation decorations - display = display.replace("border-radius: 0.35em;", "border-radius: 0.35em; white-space: nowrap;") - with st.container(): - st.write(display, unsafe_allow_html=True) - - else: - st.error("Please enter some text.") - - -if __name__ == "__main__": - run_client() diff --git a/spaces/sarinam/speaker-anonymization/IMSToucan/Preprocessing/ArticulatoryCombinedTextFrontend.py b/spaces/sarinam/speaker-anonymization/IMSToucan/Preprocessing/ArticulatoryCombinedTextFrontend.py deleted file mode 100644 index b4d47d087da6bb8888d1f5b729b97a74e41a5a99..0000000000000000000000000000000000000000 --- a/spaces/sarinam/speaker-anonymization/IMSToucan/Preprocessing/ArticulatoryCombinedTextFrontend.py +++ /dev/null @@ -1,323 +0,0 @@ -import re -import sys - -import panphon -import phonemizer -import torch - -from .papercup_features import generate_feature_table - - -class ArticulatoryCombinedTextFrontend: - - def __init__(self, - language, - use_word_boundaries=False, # goes together well with - # parallel models and a aligner. Doesn't go together - # well with autoregressive models. - use_explicit_eos=True, - use_prosody=False, # unfortunately the non-segmental - # nature of prosodic markers mixed with the sequential - # phonemes hurts the performance of end-to-end models a - # lot, even though one might think enriching the input - # with such information would help. - use_lexical_stress=False, - silent=True, - allow_unknown=False, - add_silence_to_end=True, - strip_silence=True): - """ - Mostly preparing ID lookups - """ - self.strip_silence = strip_silence - self.use_word_boundaries = use_word_boundaries - self.allow_unknown = allow_unknown - self.use_explicit_eos = use_explicit_eos - self.use_prosody = use_prosody - self.use_stress = use_lexical_stress - self.add_silence_to_end = add_silence_to_end - self.feature_table = panphon.FeatureTable() - - if language == "en": - self.g2p_lang = "en-us" - self.expand_abbreviations = english_text_expansion - if not silent: - print("Created an English Text-Frontend") - - elif language == "de": - self.g2p_lang = "de" - self.expand_abbreviations = lambda x: x - if not silent: - print("Created a German Text-Frontend") - - elif language == "el": - self.g2p_lang = "el" - self.expand_abbreviations = lambda x: x - if not silent: - print("Created a Greek Text-Frontend") - - elif language == "es": - self.g2p_lang = "es" - self.expand_abbreviations = lambda x: x - if not silent: - print("Created a Spanish Text-Frontend") - - elif language == "fi": - self.g2p_lang = "fi" - self.expand_abbreviations = lambda x: x - if not silent: - print("Created a Finnish Text-Frontend") - - elif language == "ru": - self.g2p_lang = "ru" - self.expand_abbreviations = lambda x: x - if not silent: - print("Created a Russian Text-Frontend") - - elif language == "hu": - self.g2p_lang = "hu" - self.expand_abbreviations = lambda x: x - if not silent: - print("Created a Hungarian Text-Frontend") - - elif language == "nl": - self.g2p_lang = "nl" - self.expand_abbreviations = lambda x: x - if not silent: - print("Created a Dutch Text-Frontend") - - elif language == "fr": - self.g2p_lang = "fr-fr" - self.expand_abbreviations = lambda x: x - if not silent: - print("Created a French Text-Frontend") - - elif language == "it": - self.g2p_lang = "it" - self.expand_abbreviations = lambda x: x - if not silent: - print("Created a Italian Text-Frontend") - - elif language == "pt": - self.g2p_lang = "pt" - self.expand_abbreviations = lambda x: x - if not silent: - print("Created a Portuguese Text-Frontend") - - elif language == "pl": - self.g2p_lang = "pl" - self.expand_abbreviations = lambda x: x - if not silent: - print("Created a Polish Text-Frontend") - - # remember to also update get_language_id() when adding something here - - else: - print("Language not supported yet") - sys.exit() - - self.phone_to_vector_papercup = generate_feature_table() - - self.phone_to_vector = dict() - for phone in self.phone_to_vector_papercup: - panphon_features = self.feature_table.word_to_vector_list(phone, numeric=True) - if panphon_features == []: - panphon_features = [[0] * 24] - papercup_features = self.phone_to_vector_papercup[phone] - self.phone_to_vector[phone] = papercup_features + panphon_features[0] - - self.phone_to_id = { # this lookup must be updated manually, because the only - # other way would be extracting them from a set, which can be non-deterministic - '~': 0, - '#': 1, - '?': 2, - '!': 3, - '.': 4, - 'ɜ': 5, - 'ɫ': 6, - 'ə': 7, - 'ɚ': 8, - 'a': 9, - 'ð': 10, - 'ɛ': 11, - 'ɪ': 12, - 'ᵻ': 13, - 'ŋ': 14, - 'ɔ': 15, - 'ɒ': 16, - 'ɾ': 17, - 'ʃ': 18, - 'θ': 19, - 'ʊ': 20, - 'ʌ': 21, - 'ʒ': 22, - 'æ': 23, - 'b': 24, - 'ʔ': 25, - 'd': 26, - 'e': 27, - 'f': 28, - 'g': 29, - 'h': 30, - 'i': 31, - 'j': 32, - 'k': 33, - 'l': 34, - 'm': 35, - 'n': 36, - 'ɳ': 37, - 'o': 38, - 'p': 39, - 'ɡ': 40, - 'ɹ': 41, - 'r': 42, - 's': 43, - 't': 44, - 'u': 45, - 'v': 46, - 'w': 47, - 'x': 48, - 'z': 49, - 'ʀ': 50, - 'ø': 51, - 'ç': 52, - 'ɐ': 53, - 'œ': 54, - 'y': 55, - 'ʏ': 56, - 'ɑ': 57, - 'c': 58, - 'ɲ': 59, - 'ɣ': 60, - 'ʎ': 61, - 'β': 62, - 'ʝ': 63, - 'ɟ': 64, - 'q': 65, - 'ɕ': 66, - 'ʲ': 67, - 'ɭ': 68, - 'ɵ': 69, - 'ʑ': 70, - 'ʋ': 71, - 'ʁ': 72, - 'ɨ': 73, - 'ʂ': 74, - 'ɬ': 75, - } # for the states of the ctc loss and dijkstra/mas in the aligner - - self.id_to_phone = {v: k for k, v in self.phone_to_id.items()} - - def string_to_tensor(self, text, view=False, device="cpu", handle_missing=True, input_phonemes=False): - """ - Fixes unicode errors, expands some abbreviations, - turns graphemes into phonemes and then vectorizes - the sequence as articulatory features - """ - if input_phonemes: - phones = text - else: - phones = self.get_phone_string(text=text, include_eos_symbol=True) - if view: - print("Phonemes: \n{}\n".format(phones)) - phones_vector = list() - # turn into numeric vectors - for char in phones: - if handle_missing: - try: - phones_vector.append(self.phone_to_vector[char]) - except KeyError: - print("unknown phoneme: {}".format(char)) - else: - phones_vector.append(self.phone_to_vector[char]) # leave error handling to elsewhere - - return torch.Tensor(phones_vector, device=device) - - def get_phone_string(self, text, include_eos_symbol=True): - # expand abbreviations - utt = self.expand_abbreviations(text) - # phonemize - phones = phonemizer.phonemize(utt, - language_switch='remove-flags', - backend="espeak", - language=self.g2p_lang, - preserve_punctuation=True, - strip=True, - punctuation_marks=';:,.!?¡¿—…"«»“”~/', - with_stress=self.use_stress).replace(";", ",").replace("/", " ").replace("—", "") \ - .replace(":", ",").replace('"', ",").replace("-", ",").replace("...", ",").replace("-", ",").replace("\n", " ") \ - .replace("\t", " ").replace("¡", "").replace("¿", "").replace(",", "~").replace(" ̃", "").replace('̩', "").replace("̃", "").replace("̪", "") - # less than 1 wide characters hidden here - phones = re.sub("~+", "~", phones) - if not self.use_prosody: - # retain ~ as heuristic pause marker, even though all other symbols are removed with this option. - # also retain . ? and ! since they can be indicators for the stop token - phones = phones.replace("ˌ", "").replace("ː", "").replace("ˑ", "") \ - .replace("˘", "").replace("|", "").replace("‖", "") - if not self.use_word_boundaries: - phones = phones.replace(" ", "") - else: - phones = re.sub(r"\s+", " ", phones) - phones = re.sub(" ", "~", phones) - if self.strip_silence: - phones = phones.lstrip("~").rstrip("~") - if self.add_silence_to_end: - phones += "~" # adding a silence in the end during add_silence_to_end produces more natural sounding prosody - if include_eos_symbol: - phones += "#" - - phones = "~" + phones - phones = re.sub("~+", "~", phones) - - return phones - - -def english_text_expansion(text): - """ - Apply as small part of the tacotron style text cleaning pipeline, suitable for e.g. LJSpeech. - See https://github.com/keithito/tacotron/ - Careful: Only apply to english datasets. Different languages need different cleaners. - """ - _abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in - [('Mrs.', 'misess'), ('Mr.', 'mister'), ('Dr.', 'doctor'), ('St.', 'saint'), ('Co.', 'company'), ('Jr.', 'junior'), ('Maj.', 'major'), - ('Gen.', 'general'), ('Drs.', 'doctors'), ('Rev.', 'reverend'), ('Lt.', 'lieutenant'), ('Hon.', 'honorable'), ('Sgt.', 'sergeant'), - ('Capt.', 'captain'), ('Esq.', 'esquire'), ('Ltd.', 'limited'), ('Col.', 'colonel'), ('Ft.', 'fort')]] - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def get_language_id(language): - if language == "en": - return torch.LongTensor([0]) - elif language == "de": - return torch.LongTensor([1]) - elif language == "el": - return torch.LongTensor([2]) - elif language == "es": - return torch.LongTensor([3]) - elif language == "fi": - return torch.LongTensor([4]) - elif language == "ru": - return torch.LongTensor([5]) - elif language == "hu": - return torch.LongTensor([6]) - elif language == "nl": - return torch.LongTensor([7]) - elif language == "fr": - return torch.LongTensor([8]) - elif language == "pt": - return torch.LongTensor([9]) - elif language == "pl": - return torch.LongTensor([10]) - elif language == "it": - return torch.LongTensor([11]) - - -if __name__ == '__main__': - # test an English utterance - tfr_en = ArticulatoryCombinedTextFrontend(language="en") - print(tfr_en.string_to_tensor("This is a complex sentence, it even has a pause! But can it do this? Nice.", view=True)) - - tfr_en = ArticulatoryCombinedTextFrontend(language="de") - print(tfr_en.string_to_tensor("Alles klar, jetzt testen wir einen deutschen Satz. Ich hoffe es gibt nicht mehr viele unspezifizierte Phoneme.", view=True)) diff --git a/spaces/sayakpaul/fetch-similar-images/app.py b/spaces/sayakpaul/fetch-similar-images/app.py deleted file mode 100644 index 8ab6b4f2ee616edac8cfc2c8d495935183a0027a..0000000000000000000000000000000000000000 --- a/spaces/sayakpaul/fetch-similar-images/app.py +++ /dev/null @@ -1,73 +0,0 @@ -""" -Thanks to Freddy Boulton (https://github.com/freddyaboulton) for helping with this. -""" - - -import pickle - -import gradio as gr -from datasets import load_dataset -from transformers import AutoModel - -# `LSH` and `Table` imports are necessary in order for the -# `lsh.pickle` file to load successfully. -from similarity_utils import LSH, BuildLSHTable, Table - -seed = 42 - -# Only runs once when the script is first run. -with open("lsh.pickle", "rb") as handle: - loaded_lsh = pickle.load(handle) - -# Load model for computing embeddings. -model_ckpt = "nateraw/vit-base-beans" -model = AutoModel.from_pretrained(model_ckpt) -lsh_builder = BuildLSHTable(model) -lsh_builder.lsh = loaded_lsh - -# Candidate images. -dataset = load_dataset("beans") -candidate_dataset = dataset["train"].shuffle(seed=seed) - - -def query(image, top_k): - results = lsh_builder.query(image) - - # Should be a list of string file paths for gr.Gallery to work - images = [] - # List of labels for each image in the gallery - labels = [] - - candidates = [] - - for idx, r in enumerate(sorted(results, key=results.get, reverse=True)): - if idx == top_k: - break - image_id, label = r.split("_")[0], r.split("_")[1] - candidates.append(candidate_dataset[int(image_id)]["image"]) - labels.append(f"Label: {label}") - - for i, candidate in enumerate(candidates): - filename = f"similar_{i}.png" - candidate.save(filename) - images.append(filename) - - # The gallery component can be a list of tuples, where the first element is a path to a file - # and the second element is an optional caption for that image - return list(zip(images, labels)) - - -title = "Fetch Similar Beans 🪴" -description = "This Space demos an image similarity system. You can refer to [this notebook](TODO) to know the details of the system. You can pick any image from the available samples below. On the right hand side, you'll find the similar images returned by the system. The example images have been named with their corresponding integer class labels for easier identification. The fetched images will also have their integer labels tagged so that you can validate the correctness of the results." - -# You can set the type of gr.Image to be PIL, numpy or str (filepath) -# Not sure what the best for this demo is. -gr.Interface( - query, - inputs=[gr.Image(type="pil"), gr.Slider(value=5, minimum=1, maximum=10, step=1)], - outputs=gr.Gallery().style(grid=[3], height="auto"), - # Filenames denote the integer labels. Know here: https://hf.co/datasets/beans - title=title, - description=description, - examples=[["0.png", 5], ["1.png", 5], ["2.png", 5]], -).launch() diff --git a/spaces/scedlatioru/img-to-music/example/Adobe Acrobat Reader DC Crack 2020.md b/spaces/scedlatioru/img-to-music/example/Adobe Acrobat Reader DC Crack 2020.md deleted file mode 100644 index ddd4fcd475924afb27b98607c474c039b18f9a33..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Adobe Acrobat Reader DC Crack 2020.md +++ /dev/null @@ -1,6 +0,0 @@ -

          Adobe Acrobat Reader DC Crack 2020


          Download Filehttps://gohhs.com/2uEAor



          - -Download & install Adobe Acrobat Pro Dc 2020 Trial. · Then Get crack and generate the activation code from the link here. · Now run the crack. · Wait for the ... 4d29de3e1b
          -
          -
          -

          diff --git a/spaces/scedlatioru/img-to-music/example/Tnpsc Tamil Shortcuts Pdf 92.md b/spaces/scedlatioru/img-to-music/example/Tnpsc Tamil Shortcuts Pdf 92.md deleted file mode 100644 index c94ba8834234ff8f9c9e41184fdcdfcaa34e6159..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Tnpsc Tamil Shortcuts Pdf 92.md +++ /dev/null @@ -1,10 +0,0 @@ -
          -

          in india, tamil nadu public service commission (tnpsc) group iv examination is conducted once in a year. this is a written exam and is conducted for recruitment of group iv clerk, clerk, head clerk, administrative, and other vacancies in the government of tamil nadu. the exam is conducted to recruit the candidates for the following posts:

          -

          tnpsc tamil shortcuts pdf 92


          Downloadhttps://gohhs.com/2uEziK



          -

          you can use the tnpsc group 4 exam papers and study material provided by our expert team to crack the exam. if you want to crack the exam, you need to go through the study material and prepare thoroughly. we also provide tnpsc group 4 exam answers. it will help you in cracking the exam easily.

          -

          you can go through all the study material given on our website and download the app. we hope that you will crack the tnpsc group 4 exam and get selected for the government job. if you need any kind of assistance, you can contact our team. our team of experts is always there for you to help you crack the exam and get the job.

          -

          tnpsc group 4 exam is conducted by the government of tamil nadu and the exam is a written exam. you can go through the answers of tnpsc group 4 exam paper. this will help you in cracking the exam and get selected for the government job.

          -

          -

          t-npsc - tnpsc group 4 exam results tnpsc group 4 cut-off marks :- the tnpsc group 4 cut-off will be announced on the official website. candidates can check the tnpsc group 4 cut-off 2021 from the link given below. the tnpsc group 4 cut-off will be released with the results of group 4 and vao exams. the tnpsc group 4 cut-off will be available for all the candidates who had applied for the group 4 and vao exams in the year 2021. candidates can check the tnpsc group 4 cut-off with the help of the scorecard that they receive after the exams. if you want to know more details about the tnpsc group 4 cut-off then you can check the details from the below-given links.

          899543212b
          -
          -
          \ No newline at end of file diff --git a/spaces/sdhsdhk/bingo111/src/lib/isomorphic/browser.ts b/spaces/sdhsdhk/bingo111/src/lib/isomorphic/browser.ts deleted file mode 100644 index de125b1f1786d1618cb1ff47f403d76c6784f4ce..0000000000000000000000000000000000000000 --- a/spaces/sdhsdhk/bingo111/src/lib/isomorphic/browser.ts +++ /dev/null @@ -1,11 +0,0 @@ -'use client' - -const debug = console.info.bind(console) - -class WebSocketAlias extends WebSocket { - constructor(address: string | URL, ...args: any) { - super(address) - } -} - -export default { fetch, WebSocket: WebSocketAlias, debug } diff --git a/spaces/segments-tobias/conex/espnet/nets/batch_beam_search_online_sim.py b/spaces/segments-tobias/conex/espnet/nets/batch_beam_search_online_sim.py deleted file mode 100644 index c3b348654ed51da54c38cf9d93420b69f0790fd0..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet/nets/batch_beam_search_online_sim.py +++ /dev/null @@ -1,270 +0,0 @@ -"""Parallel beam search module for online simulation.""" - -import logging -from pathlib import Path -from typing import List - -import yaml - -import torch - -from espnet.nets.batch_beam_search import BatchBeamSearch -from espnet.nets.beam_search import Hypothesis -from espnet.nets.e2e_asr_common import end_detect - - -class BatchBeamSearchOnlineSim(BatchBeamSearch): - """Online beam search implementation. - - This simulates streaming decoding. - It requires encoded features of entire utterance and - extracts block by block from it as it shoud be done - in streaming processing. - This is based on Tsunoo et al, "STREAMING TRANSFORMER ASR - WITH BLOCKWISE SYNCHRONOUS BEAM SEARCH" - (https://arxiv.org/abs/2006.14941). - """ - - def set_streaming_config(self, asr_config: str): - """Set config file for streaming decoding. - - Args: - asr_config (str): The config file for asr training - - """ - train_config_file = Path(asr_config) - self.block_size = None - self.hop_size = None - self.look_ahead = None - config = None - with train_config_file.open("r", encoding="utf-8") as f: - args = yaml.safe_load(f) - if "encoder_conf" in args.keys(): - if "block_size" in args["encoder_conf"].keys(): - self.block_size = args["encoder_conf"]["block_size"] - if "hop_size" in args["encoder_conf"].keys(): - self.hop_size = args["encoder_conf"]["hop_size"] - if "look_ahead" in args["encoder_conf"].keys(): - self.look_ahead = args["encoder_conf"]["look_ahead"] - elif "config" in args.keys(): - config = args["config"] - if config is None: - logging.info( - "Cannot find config file for streaming decoding: " - + "apply batch beam search instead." - ) - return - if ( - self.block_size is None or self.hop_size is None or self.look_ahead is None - ) and config is not None: - config_file = Path(config) - with config_file.open("r", encoding="utf-8") as f: - args = yaml.safe_load(f) - if "encoder_conf" in args.keys(): - enc_args = args["encoder_conf"] - if enc_args and "block_size" in enc_args: - self.block_size = enc_args["block_size"] - if enc_args and "hop_size" in enc_args: - self.hop_size = enc_args["hop_size"] - if enc_args and "look_ahead" in enc_args: - self.look_ahead = enc_args["look_ahead"] - - def set_block_size(self, block_size: int): - """Set block size for streaming decoding. - - Args: - block_size (int): The block size of encoder - """ - self.block_size = block_size - - def set_hop_size(self, hop_size: int): - """Set hop size for streaming decoding. - - Args: - hop_size (int): The hop size of encoder - """ - self.hop_size = hop_size - - def set_look_ahead(self, look_ahead: int): - """Set look ahead size for streaming decoding. - - Args: - look_ahead (int): The look ahead size of encoder - """ - self.look_ahead = look_ahead - - def forward( - self, x: torch.Tensor, maxlenratio: float = 0.0, minlenratio: float = 0.0 - ) -> List[Hypothesis]: - """Perform beam search. - - Args: - x (torch.Tensor): Encoded speech feature (T, D) - maxlenratio (float): Input length ratio to obtain max output length. - If maxlenratio=0.0 (default), it uses a end-detect function - to automatically find maximum hypothesis lengths - minlenratio (float): Input length ratio to obtain min output length. - - Returns: - list[Hypothesis]: N-best decoding results - - """ - self.conservative = True # always true - - if self.block_size and self.hop_size and self.look_ahead: - cur_end_frame = int(self.block_size - self.look_ahead) - else: - cur_end_frame = x.shape[0] - process_idx = 0 - if cur_end_frame < x.shape[0]: - h = x.narrow(0, 0, cur_end_frame) - else: - h = x - - # set length bounds - if maxlenratio == 0: - maxlen = x.shape[0] - else: - maxlen = max(1, int(maxlenratio * x.size(0))) - minlen = int(minlenratio * x.size(0)) - logging.info("decoder input length: " + str(x.shape[0])) - logging.info("max output length: " + str(maxlen)) - logging.info("min output length: " + str(minlen)) - - # main loop of prefix search - running_hyps = self.init_hyp(h) - prev_hyps = [] - ended_hyps = [] - prev_repeat = False - - continue_decode = True - - while continue_decode: - move_to_next_block = False - if cur_end_frame < x.shape[0]: - h = x.narrow(0, 0, cur_end_frame) - else: - h = x - - # extend states for ctc - self.extend(h, running_hyps) - - while process_idx < maxlen: - logging.debug("position " + str(process_idx)) - best = self.search(running_hyps, h) - - if process_idx == maxlen - 1: - # end decoding - running_hyps = self.post_process( - process_idx, maxlen, maxlenratio, best, ended_hyps - ) - n_batch = best.yseq.shape[0] - local_ended_hyps = [] - is_local_eos = ( - best.yseq[torch.arange(n_batch), best.length - 1] == self.eos - ) - for i in range(is_local_eos.shape[0]): - if is_local_eos[i]: - hyp = self._select(best, i) - local_ended_hyps.append(hyp) - # NOTE(tsunoo): check repetitions here - # This is a implicit implementation of - # Eq (11) in https://arxiv.org/abs/2006.14941 - # A flag prev_repeat is used instead of using set - elif ( - not prev_repeat - and best.yseq[i, -1] in best.yseq[i, :-1] - and cur_end_frame < x.shape[0] - ): - move_to_next_block = True - prev_repeat = True - if maxlenratio == 0.0 and end_detect( - [lh.asdict() for lh in local_ended_hyps], process_idx - ): - logging.info(f"end detected at {process_idx}") - continue_decode = False - break - if len(local_ended_hyps) > 0 and cur_end_frame < x.shape[0]: - move_to_next_block = True - - if move_to_next_block: - if ( - self.hop_size - and cur_end_frame + int(self.hop_size) + int(self.look_ahead) - < x.shape[0] - ): - cur_end_frame += int(self.hop_size) - else: - cur_end_frame = x.shape[0] - logging.debug("Going to next block: %d", cur_end_frame) - if process_idx > 1 and len(prev_hyps) > 0 and self.conservative: - running_hyps = prev_hyps - process_idx -= 1 - prev_hyps = [] - break - - prev_repeat = False - prev_hyps = running_hyps - running_hyps = self.post_process( - process_idx, maxlen, maxlenratio, best, ended_hyps - ) - - if cur_end_frame >= x.shape[0]: - for hyp in local_ended_hyps: - ended_hyps.append(hyp) - - if len(running_hyps) == 0: - logging.info("no hypothesis. Finish decoding.") - continue_decode = False - break - else: - logging.debug(f"remained hypotheses: {len(running_hyps)}") - # increment number - process_idx += 1 - - nbest_hyps = sorted(ended_hyps, key=lambda x: x.score, reverse=True) - # check the number of hypotheses reaching to eos - if len(nbest_hyps) == 0: - logging.warning( - "there is no N-best results, perform recognition " - "again with smaller minlenratio." - ) - return ( - [] - if minlenratio < 0.1 - else self.forward(x, maxlenratio, max(0.0, minlenratio - 0.1)) - ) - - # report the best result - best = nbest_hyps[0] - for k, v in best.scores.items(): - logging.info( - f"{v:6.2f} * {self.weights[k]:3} = {v * self.weights[k]:6.2f} for {k}" - ) - logging.info(f"total log probability: {best.score:.2f}") - logging.info(f"normalized log probability: {best.score / len(best.yseq):.2f}") - logging.info(f"total number of ended hypotheses: {len(nbest_hyps)}") - if self.token_list is not None: - logging.info( - "best hypo: " - + "".join([self.token_list[x] for x in best.yseq[1:-1]]) - + "\n" - ) - return nbest_hyps - - def extend(self, x: torch.Tensor, hyps: Hypothesis) -> List[Hypothesis]: - """Extend probabilities and states with more encoded chunks. - - Args: - x (torch.Tensor): The extended encoder output feature - hyps (Hypothesis): Current list of hypothesis - - Returns: - Hypothesis: The exxtended hypothesis - - """ - for k, d in self.scorers.items(): - if hasattr(d, "extend_prob"): - d.extend_prob(x) - if hasattr(d, "extend_state"): - hyps.states[k] = d.extend_state(hyps.states[k]) diff --git a/spaces/segments-tobias/conex/espnet/transform/__init__.py b/spaces/segments-tobias/conex/espnet/transform/__init__.py deleted file mode 100644 index f78ea5dbc9ae296270ed1cf2688313d52d6480b3..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet/transform/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Initialize main package.""" diff --git a/spaces/segments/panoptic-segment-anything/segment_anything/segment_anything/modeling/transformer.py b/spaces/segments/panoptic-segment-anything/segment_anything/segment_anything/modeling/transformer.py deleted file mode 100644 index f1a2812f613cc55b1d0b3e3e1d0c84a760d1fb87..0000000000000000000000000000000000000000 --- a/spaces/segments/panoptic-segment-anything/segment_anything/segment_anything/modeling/transformer.py +++ /dev/null @@ -1,240 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch -from torch import Tensor, nn - -import math -from typing import Tuple, Type - -from .common import MLPBlock - - -class TwoWayTransformer(nn.Module): - def __init__( - self, - depth: int, - embedding_dim: int, - num_heads: int, - mlp_dim: int, - activation: Type[nn.Module] = nn.ReLU, - attention_downsample_rate: int = 2, - ) -> None: - """ - A transformer decoder that attends to an input image using - queries whose positional embedding is supplied. - - Args: - depth (int): number of layers in the transformer - embedding_dim (int): the channel dimension for the input embeddings - num_heads (int): the number of heads for multihead attention. Must - divide embedding_dim - mlp_dim (int): the channel dimension internal to the MLP block - activation (nn.Module): the activation to use in the MLP block - """ - super().__init__() - self.depth = depth - self.embedding_dim = embedding_dim - self.num_heads = num_heads - self.mlp_dim = mlp_dim - self.layers = nn.ModuleList() - - for i in range(depth): - self.layers.append( - TwoWayAttentionBlock( - embedding_dim=embedding_dim, - num_heads=num_heads, - mlp_dim=mlp_dim, - activation=activation, - attention_downsample_rate=attention_downsample_rate, - skip_first_layer_pe=(i == 0), - ) - ) - - self.final_attn_token_to_image = Attention( - embedding_dim, num_heads, downsample_rate=attention_downsample_rate - ) - self.norm_final_attn = nn.LayerNorm(embedding_dim) - - def forward( - self, - image_embedding: Tensor, - image_pe: Tensor, - point_embedding: Tensor, - ) -> Tuple[Tensor, Tensor]: - """ - Args: - image_embedding (torch.Tensor): image to attend to. Should be shape - B x embedding_dim x h x w for any h and w. - image_pe (torch.Tensor): the positional encoding to add to the image. Must - have the same shape as image_embedding. - point_embedding (torch.Tensor): the embedding to add to the query points. - Must have shape B x N_points x embedding_dim for any N_points. - - Returns: - torch.Tensor: the processed point_embedding - torch.Tensor: the processed image_embedding - """ - # BxCxHxW -> BxHWxC == B x N_image_tokens x C - bs, c, h, w = image_embedding.shape - image_embedding = image_embedding.flatten(2).permute(0, 2, 1) - image_pe = image_pe.flatten(2).permute(0, 2, 1) - - # Prepare queries - queries = point_embedding - keys = image_embedding - - # Apply transformer blocks and final layernorm - for layer in self.layers: - queries, keys = layer( - queries=queries, - keys=keys, - query_pe=point_embedding, - key_pe=image_pe, - ) - - # Apply the final attenion layer from the points to the image - q = queries + point_embedding - k = keys + image_pe - attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys) - queries = queries + attn_out - queries = self.norm_final_attn(queries) - - return queries, keys - - -class TwoWayAttentionBlock(nn.Module): - def __init__( - self, - embedding_dim: int, - num_heads: int, - mlp_dim: int = 2048, - activation: Type[nn.Module] = nn.ReLU, - attention_downsample_rate: int = 2, - skip_first_layer_pe: bool = False, - ) -> None: - """ - A transformer block with four layers: (1) self-attention of sparse - inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp - block on sparse inputs, and (4) cross attention of dense inputs to sparse - inputs. - - Arguments: - embedding_dim (int): the channel dimension of the embeddings - num_heads (int): the number of heads in the attention layers - mlp_dim (int): the hidden dimension of the mlp block - activation (nn.Module): the activation of the mlp block - skip_first_layer_pe (bool): skip the PE on the first layer - """ - super().__init__() - self.self_attn = Attention(embedding_dim, num_heads) - self.norm1 = nn.LayerNorm(embedding_dim) - - self.cross_attn_token_to_image = Attention( - embedding_dim, num_heads, downsample_rate=attention_downsample_rate - ) - self.norm2 = nn.LayerNorm(embedding_dim) - - self.mlp = MLPBlock(embedding_dim, mlp_dim, activation) - self.norm3 = nn.LayerNorm(embedding_dim) - - self.norm4 = nn.LayerNorm(embedding_dim) - self.cross_attn_image_to_token = Attention( - embedding_dim, num_heads, downsample_rate=attention_downsample_rate - ) - - self.skip_first_layer_pe = skip_first_layer_pe - - def forward( - self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor - ) -> Tuple[Tensor, Tensor]: - # Self attention block - if self.skip_first_layer_pe: - queries = self.self_attn(q=queries, k=queries, v=queries) - else: - q = queries + query_pe - attn_out = self.self_attn(q=q, k=q, v=queries) - queries = queries + attn_out - queries = self.norm1(queries) - - # Cross attention block, tokens attending to image embedding - q = queries + query_pe - k = keys + key_pe - attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys) - queries = queries + attn_out - queries = self.norm2(queries) - - # MLP block - mlp_out = self.mlp(queries) - queries = queries + mlp_out - queries = self.norm3(queries) - - # Cross attention block, image embedding attending to tokens - q = queries + query_pe - k = keys + key_pe - attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries) - keys = keys + attn_out - keys = self.norm4(keys) - - return queries, keys - - -class Attention(nn.Module): - """ - An attention layer that allows for downscaling the size of the embedding - after projection to queries, keys, and values. - """ - - def __init__( - self, - embedding_dim: int, - num_heads: int, - downsample_rate: int = 1, - ) -> None: - super().__init__() - self.embedding_dim = embedding_dim - self.internal_dim = embedding_dim // downsample_rate - self.num_heads = num_heads - assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim." - - self.q_proj = nn.Linear(embedding_dim, self.internal_dim) - self.k_proj = nn.Linear(embedding_dim, self.internal_dim) - self.v_proj = nn.Linear(embedding_dim, self.internal_dim) - self.out_proj = nn.Linear(self.internal_dim, embedding_dim) - - def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor: - b, n, c = x.shape - x = x.reshape(b, n, num_heads, c // num_heads) - return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head - - def _recombine_heads(self, x: Tensor) -> Tensor: - b, n_heads, n_tokens, c_per_head = x.shape - x = x.transpose(1, 2) - return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C - - def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: - # Input projections - q = self.q_proj(q) - k = self.k_proj(k) - v = self.v_proj(v) - - # Separate into heads - q = self._separate_heads(q, self.num_heads) - k = self._separate_heads(k, self.num_heads) - v = self._separate_heads(v, self.num_heads) - - # Attention - _, _, _, c_per_head = q.shape - attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens - attn = attn / math.sqrt(c_per_head) - attn = torch.softmax(attn, dim=-1) - - # Get output - out = attn @ v - out = self._recombine_heads(out) - out = self.out_proj(out) - - return out diff --git a/spaces/senger/AI-TextGenerator/README.md b/spaces/senger/AI-TextGenerator/README.md deleted file mode 100644 index 5065eee12d50a8b6a6666a84525517c77f8e682b..0000000000000000000000000000000000000000 --- a/spaces/senger/AI-TextGenerator/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: AI TextGenerator -emoji: 😻 -colorFrom: green -colorTo: purple -sdk: static -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/senquan/ChuanhuChatGPT/run_Linux.sh b/spaces/senquan/ChuanhuChatGPT/run_Linux.sh deleted file mode 100644 index 62af07283093d8e580763d7acfe493c3d88e7b08..0000000000000000000000000000000000000000 --- a/spaces/senquan/ChuanhuChatGPT/run_Linux.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# 获取脚本所在目录 -script_dir=$(dirname "$0") - -# 将工作目录更改为脚本所在目录 -cd "$script_dir" - -# 检查Git仓库是否有更新 -git remote update -pwd - -if ! git status -uno | grep 'up to date' > /dev/null; then - # 如果有更新,关闭当前运行的服务器 - pkill -f ChuanhuChatbot.py - - # 拉取最新更改 - git pull - - # 安装依赖 - pip3 install -r requirements.txt - - # 重新启动服务器 - nohup python3 ChuanhuChatbot.py & -fi diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Lokicraft 1.17 Version APK for Free - The Best Minecraft Alternative.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Lokicraft 1.17 Version APK for Free - The Best Minecraft Alternative.md deleted file mode 100644 index ff2353cb21ddabfbfa458ae14aca8b69521744ce..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Lokicraft 1.17 Version APK for Free - The Best Minecraft Alternative.md +++ /dev/null @@ -1,110 +0,0 @@ -
          -

          Lokicraft 1.17 Version APK Download: Everything You Need to Know

          -

          Lokicraft is a popular sandbox game that lets you build and explore a vast open world with unlimited resources and creativity. It is inspired by Minecraft, but it is available for free on Android devices. If you are a fan of Lokicraft and want to enjoy the latest features and updates, you might be interested in downloading the Lokicraft 1.17 version APK file. In this article, we will tell you everything you need to know about Lokicraft 1.17 version APK download, including what it is, how to get it, what are the new features, and some tips and tricks to play the game better.

          -

          lokicraft 1.17 version apk download


          Download Zip ===== https://ssurll.com/2uNYlU



          -

          What is Lokicraft 1.17 Version APK?

          -

          An APK file is an Android Package file that contains the installation data for an app or game. It is usually used to install apps or games that are not available on the Google Play Store, or to get access to beta versions or modded versions of apps or games. Lokicraft 1.17 version APK is the latest version of the Lokicraft game that has been released by the developer akseno2. It contains new features and improvements that are not yet available on the official version of the game on the Google Play Store.

          -

          How to Download and Install Lokicraft 1.17 Version APK?

          -

          To download and install Lokicraft 1.17 version APK, you need to follow these steps:

          -
            -
          1. Go to a trusted website that provides the Lokicraft 1.17 version APK file, such as [APKCombo](^1^), [MCPE-Planet](^2^), or [MCPE-PLANET.COM](^3^).
          2. -
          3. Click on the download button and wait for the file to be downloaded on your device.
          4. -
          5. Before installing the file, make sure you have enabled the option to install apps from unknown sources on your device settings.
          6. -
          7. Locate the downloaded file on your device and tap on it to start the installation process.
          8. -
          9. Follow the instructions on the screen and wait for the installation to be completed.
          10. -
          11. Launch the game and enjoy playing Lokicraft 1.17 version.
          12. -
          -

          What are the New Features of Lokicraft 1.17 Version?

          -

          Lokicraft 1.17 version has many new features and improvements that make the game more fun and exciting. Some of the new features are:

          -

          lokicraft 2 game free download for android
          -lokicraft2 apk latest version 1.17
          -how to install lokicraft 2 on android device
          -lokicraft 2 mod apk unlimited resources
          -lokicraft 2 vs minecraft comparison
          -lokicraft 2 gameplay and features
          -lokicraft 2 tips and tricks for beginners
          -lokicraft 2 review and rating
          -lokicraft 2 update 1.17 what's new
          -lokicraft 2 cheats and hacks
          -lokicraft 2 online multiplayer mode
          -lokicraft 2 best seeds and maps
          -lokicraft 2 skins and texture packs
          -lokicraft 2 creative and survival modes
          -lokicraft 2 download link and installation guide
          -minecraft pe 1.17 apk free download for android
          -minecraft bedrock edition 1.17 caves and cliffs update
          -minecraft pe 1.17 new blocks and items
          -minecraft pe 1.17 copper ore and amethyst geodes
          -minecraft pe 1.17 raw ore blocks and tuff
          -minecraft pe 1.17 axolotls and glow squids
          -minecraft pe 1.17 goats and mountain generation
          -minecraft pe 1.17 candles and lightning rods
          -minecraft pe 1.17 spyglass and bundles
          -minecraft pe 1.17 download link and installation guide
          -how to play minecraft pe 1.17 on pc
          -how to update minecraft pe to 1.17 version
          -how to backup minecraft pe worlds before updating to 1.17
          -how to fix minecraft pe 1.17 crashing issues
          -how to enable experimental features in minecraft pe 1.17
          -how to join minecraft pe 1.17 beta program
          -how to download minecraft pe 1.17 mods and addons
          -how to download minecraft pe 1.17 shaders and texture packs
          -how to download minecraft pe 1.17 maps and seeds
          -how to download minecraft pe 1.17 skins and capes
          -how to play minecraft pe 1.17 online with friends
          -how to play minecraft pe 1.17 offline without internet connection
          -how to play minecraft pe 1.17 with controller support
          -how to play minecraft pe 1.17 with keyboard and mouse support
          -how to play minecraft pe 1.17 with split screen mode
          -best minecraft pe 1.17 servers to join in 2023
          -best minecraft pe 1.17 realms to join in 2023
          -best minecraft pe 1.17 custom maps to play in 2023
          -best minecraft pe 1.17 adventure maps to play in 2023
          -best minecraft pe 1.17 parkour maps to play in 2023
          -best minecraft pe 1.17 survival maps to play in 2023
          -best minecraft pe 1.17 creative maps to play in 2023
          -best minecraft pe 1.17 mini games to play in 2023
          -best minecraft pe 1.17 seeds for survival mode in 2023

          -
            -
          • New blocks and items: Lokicraft 1.17 version adds new blocks and items that you can use to craft and build various structures and tools. Some of the new blocks and items are copper ore, copper ingots, amethysts, geodes, tuff, raw ore blocks, candles, lightning rods, spyglasses, bundles, and more.
          • -
          • New biomes and caves: Lokicraft 1.17 version also introduces new biomes and caves that you can explore and discover in the game world. Some of the new biomes and caves are lush caves, dripstone caves, deepslate caves, mountains, meadows, groves, swamps, badlands, and more.
          • -
          • New mobs and animals: Lokicraft 1.17 version also adds new mobs and animals that you can encounter and interact with in the game world. Some of the new mobs and animals are axolotls, goats, glow squids, wardens, sculk sensors, shulkers, endermites, silverfishes, spiders, zombies, skeletons, creepers, and more.
          • -
          • New game modes: Lokicraft 1.17 version also offers two game modes that you can choose from: creative mode and survival mode. Creative mode allows you to build and create anything you want with unlimited resources and no enemies or dangers. Survival mode challenges you to survive in a harsh environment with limited resources and hostile mobs.
          • -
          -

          What are Some Tips and Tricks to Play Lokicraft Better?

          -

          Lokicraft is a fun and addictive game that can keep you entertained for hours. However, it can also be a bit difficult and frustrating at times, especially if you are new to the game or playing in survival mode. Here are some tips and tricks that can help you play Lokicraft better and have more fun:

          -
            -
          • Learn the basics: If you are new to Lokicraft, you should start by learning the basics of the game, such as how to move, jump, fly, mine, craft, build, and use the inventory. You can find tutorials and guides on the game's official website or on YouTube.
          • -
          • Plan ahead: Before you start building or exploring, you should have a clear idea of what you want to do and how you want to do it. You should also prepare the necessary resources and tools that you will need for your project or adventure. For example, if you want to build a house, you should gather enough wood, stone, glass, and other materials. If you want to explore a cave, you should bring enough torches, food, weapons, and armor.
          • -
          • Be creative: One of the best things about Lokicraft is that it allows you to express your creativity and imagination. You can build and create anything you want in the game, from simple houses and farms to complex castles and machines. You can also customize your character and your world with different skins and textures. You can find inspiration and ideas from other players' creations online or from your own imagination.
          • -
          • Be careful: Lokicraft can also be dangerous and challenging, especially in survival mode. You have to deal with hunger, thirst, health, weather, day and night cycles, and hostile mobs that can attack you or destroy your creations. You should always be careful and alert when playing the game, and avoid taking unnecessary risks or going unprepared. You should also save your progress frequently and backup your world files in case something goes wrong.
          • -
          • Have fun: The most important tip for playing Lokicraft is to have fun and enjoy the game. Lokicraft is a game that can offer endless possibilities and adventures for players of all ages and preferences. You can play the game solo or with your friends online. You can also join online servers and communities where you can chat, trade, compete, or cooperate with other players. You can also mod the game or use cheats if you want to change the game rules or add more features.
          • -
          -

          Conclusion

          -

          Lokicraft is a great game that can provide hours of entertainment and fun for anyone who loves sandbox games. It is free to download and play on Android devices, and it has many features and updates that make it more enjoyable and exciting. If you want to download the latest version of the game, which is Lokicraft 1.17 version APK, you can follow the steps we have provided in this article. We hope this article has been helpful and informative for you. Happy gaming!

          -

          FAQs

          -

          What is the difference between Lokicraft and Minecraft?

          -

          Lokicraft and Minecraft are both sandbox games that let you build and explore a vast open world with unlimited resources and creativity. However, there are some differences between them. For example:

          -
            -
          • Lokicraft is free to download and play on Android devices, while Minecraft requires a purchase on various platforms.
          • -
          • Lokicraft has more blocks and items than Minecraft, but Minecraft has more mobs and animals than Lokicraft.
          • -
          • Lokicraft has simpler graphics and controls than Minecraft, but Minecraft has more realistic physics and mechanics than Lokicraft.
          • -
          • Lokicraft has fewer updates and bug fixes than Minecraft, but Minecraft has more mods and customizations than Lokicraft.
          • -
          -

          Is Lokicraft safe to download?

          -

          Lokicraft is safe to download as long as you download it from a trusted website that provides the original APK file without any viruses or malware. You should also scan the file with an antivirus app before installing it on your device. However, you should be aware that downloading APK files from unknown sources may pose some risks to your device's security and performance. Therefore, you should always be careful when downloading APK files online.

          -

          How do I update Lokicraft?

          -

          If you have downloaded Lokicraft from the Google Play Store, you can update it automatically or manually through the app store. However, if you have downloaded Lokicraft from an APK file, you will need to download the latest version of the APK file from a trusted website and install it on your device. You may also need to uninstall the previous version of the game before installing the new one.

          -

          How do I uninstall Lokicraft?

          -

          If you want to uninstall Lokicraft from your device, you can follow these steps:

          -
          1. Go to your device settings and tap on Apps or Applications.
          2. -
          3. Find and tap on Lokicraft from the list of apps.
          4. -
          5. Tap on Uninstall and confirm your action.
          6. -
          7. Wait for the app to be uninstalled from your device.
          8. -
          -

          How do I contact the developer of Lokicraft?

          -

          If you have any questions, feedback, or suggestions for the developer of Lokicraft, you can contact them through their email address: akseno2@gmail.com. You can also follow them on their social media accounts: Facebook, Twitter, Instagram, and YouTube. You can also visit their official website: https://akseno2.com/.

          -

          -

          This is the end of the article that I have written for you on the topic of "lokicraft 1.17 version apk download". I hope you are satisfied with my work and that you have learned something new and useful from it. Thank you for choosing me as your content writer. Have a great day!

          197e85843d
          -
          -
          \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/European Ship Simulator A Speeding Power Boat a Humble Tug and a Luxury Cruise Liner.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/European Ship Simulator A Speeding Power Boat a Humble Tug and a Luxury Cruise Liner.md deleted file mode 100644 index 795eb8c92ea18a95672ba3105644633fe0f9a58a..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/European Ship Simulator A Speeding Power Boat a Humble Tug and a Luxury Cruise Liner.md +++ /dev/null @@ -1,142 +0,0 @@ -
          -

          How to Download European Ship Simulator

          -

          If you are a fan of simulation games and want to experience the thrill of captaining different types of ships in realistic European ports, then you might be interested in downloading European Ship Simulator. This game lets you navigate eight highly detailed vessels, from a speeding power boat to a luxury cruise liner, and fight against realistic water physics and weather conditions. In this article, we will show you what European Ship Simulator is, where you can download it, and how you can install and play it.

          -

          download european ship simulator


          Download File ->->->-> https://ssurll.com/2uO1ch



          -

          What is European Ship Simulator?

          -

          European Ship Simulator is a simulation game developed by Excalibur and published by Excalibur Games in 2016. It is a sequel to the 2014 game Ship Simulator: Maritime Search and Rescue, and it features improved graphics, physics, and gameplay. The game allows you to take control of eight different ships, each with their own characteristics and controls, and explore six famous European ports, including Dover, Rostock, and Gibraltar. You can also create your own missions using the mission editor, which lets you adjust the wave height, spawn AI ships, and set triggers. The game has 20 missions to complete, some of which can take up to an hour.

          -

          Features of European Ship Simulator

          -

          Some of the features that make European Ship Simulator an enjoyable and realistic game are:

          -
            -
          • Eight ships with highly detailed bridges that you can explore in first-person mode.
          • -
          • Six ports to visit, each with their own landmarks and scenery.
          • -
          • Realistic water physics that affect the movement and stability of your ship.
          • -
          • Weather conditions that include rain, sun, and snow.
          • -
          • A mission editor that allows you to customize your own scenarios.
          • -
          • Steam achievements and trading cards to collect.
          • -
          -

          System Requirements for European Ship Simulator

          -

          Before you download European Ship Simulator, you need to make sure that your computer meets the minimum system requirements for the game. These are:

          -

          How to download european ship simulator for free
          -European ship simulator download full version
          -Download european ship simulator torrent
          -European ship simulator mods download
          -Download european ship simulator crack
          -European ship simulator steam download
          -Download european ship simulator demo
          -European ship simulator pc download
          -Download european ship simulator mac
          -European ship simulator android download
          -Download european ship simulator apk
          -European ship simulator free download windows 10
          -Download european ship simulator update
          -European ship simulator dlc download
          -Download european ship simulator trainer
          -European ship simulator gameplay download
          -Download european ship simulator highly compressed
          -European ship simulator online download
          -Download european ship simulator patch
          -European ship simulator review download
          -Download european ship simulator skidrow
          -European ship simulator system requirements download
          -Download european ship simulator keygen
          -European ship simulator cheats download
          -Download european ship simulator ocean of games
          -European ship simulator multiplayer download
          -Download european ship simulator repack
          -European ship simulator controls download
          -Download european ship simulator igg games
          -European ship simulator license key download
          -Download european ship simulator fitgirl repack
          -European ship simulator missions download
          -Download european ship simulator rarbg
          -European ship simulator activation code download
          -Download european ship simulator codex
          -European ship simulator tips and tricks download
          -Download european ship simulator utorrent
          -European ship simulator walkthrough download
          -Download european ship simulator reloaded
          -European ship simulator guide download
          -Download european ship simulator iso file
          -European ship simulator maps download
          -Download european ship simulator direct link
          -European ship simulator settings download
          -Download european ship simulator mega.nz
          -European ship simulator graphics mod download
          -Download european ship simulator nosteam
          -European ship simulator editor download

          -
            -
          • OS: Windows Vista/7/8 (64-bit)
          • -
          • CPU: Intel i3 2.6 or equivalent
          • -
          • RAM: 4 GB
          • -
          • Graphics: Dedicated graphics with 1GB VRAM (DX11 compatible - Nvidia Geforce GTX 470/ATI Radeon 6900 series or greater)
          • -
          • DirectX: Version 11
          • -
          • Storage: 2700 MB available space
          • -
          -

          Where to Download European Ship Simulator?

          -

          There are several sources where you can download European Ship Simulator for your PC. Here are some of the most popular ones:

          -

          Steam

          -

          Steam is the most convenient and reliable way to download European Ship Simulator. Steam is a digital distribution platform that offers thousands of games for various genres and platforms. You can buy European Ship Simulator on Steam for $19.99 and download it instantly. You can also access the Steam community hub for the game, where you can find guides, reviews, discussions, screenshots, videos, and more. To download European Ship Simulator on Steam, you need to have a Steam account and the Steam client installed on your computer. You can create a Steam account for free at [1](https://store.steampowered.com/join/) and download the Steam client at [2](https://store.steampowered.com/about/). Once you have them, you can follow these steps:

          -
            -
          1. Launch the Steam client and log in to your account.
          2. -
          3. Go to the Store tab and search for European Ship Simulator.
          4. -
          5. Select the game from the search results and click on Add to Cart.
          6. -
          7. Proceed to checkout and choose your payment method.
          8. -
          9. After completing the purchase, go to the Library tab and find European Ship Simulator in your games list.
          10. Click on Install and wait for the download to finish.
          11. -
          -

          Congratulations, you have successfully downloaded European Ship Simulator on Steam. You can now launch the game from your library and enjoy it.

          -

          G2A.com

          -

          G2A.com is another source where you can download European Ship Simulator. G2A.com is an online marketplace that sells digital products, such as games, software, gift cards, and more. You can buy European Ship Simulator on G2A.com for a lower price than Steam, as low as $4.99. However, you need to be careful when buying from G2A.com, as some sellers may offer fraudulent or invalid keys. To avoid this, you should check the seller's rating, feedback, and guarantee before making a purchase. You also need to have a G2A account and a Steam account to download European Ship Simulator on G2A.com. You can create a G2A account for free at [3](https://www.g2a.com/register) and a Steam account at [1](https://store.steampowered.com/join/). Once you have them, you can follow these steps:

          -
            -
          1. Go to [4](https://www.g2a.com/en-us/european-ship-simulator-steam-key-global-i10000002833001) and select the seller that offers the best price and quality for European Ship Simulator.
          2. -
          3. Click on Buy Now and log in to your G2A account.
          4. -
          5. Choose your payment method and complete the transaction.
          6. -
          7. After receiving the confirmation email, go to your G2A account and find the key for European Ship Simulator in your purchased products.
          8. -
          9. Copy the key and launch the Steam client.
          10. -
          11. Go to the Games menu and select Activate a Product on Steam.
          12. -
          13. Paste the key and follow the instructions to add European Ship Simulator to your Steam library.
          14. -
          15. Go to your library and click on Install to download the game.
          16. -
          -

          Well done, you have successfully downloaded European Ship Simulator on G2A.com. You can now launch the game from your library and enjoy it.

          -

          Other Sources

          -

          Besides Steam and G2A.com, there are other sources where you can download European Ship Simulator. However, these sources may not be as safe or reliable as the ones mentioned above. Some of these sources may offer pirated or cracked versions of the game, which may contain viruses, malware, or errors. They may also violate the terms of service of the game developer and publisher, and expose you to legal risks. Therefore, we do not recommend downloading European Ship Simulator from these sources. If you do decide to use them, you should do so at your own risk and discretion. Some of these sources are:

          -
            -
          • Torrent sites, such as The Pirate Bay, Kickass Torrents, or 1337x.
          • -
          • Direct download sites, such as Mega.nz, Mediafire, or Zippyshare.
          • -
          • Crack sites, such as Skidrow Reloaded, CPY Games, or FitGirl Repacks.
          • -
          -

          How to Install and Play European Ship Simulator?

          -

          After downloading European Ship Simulator from a legitimate source, you need to install and play it on your computer. The installation process may vary depending on the source you used, but generally it is quite simple and straightforward. The gameplay tips may also differ depending on the ship and mission you choose, but generally they are quite fun and challenging. Here are some basic steps and tips for installing and playing European Ship Simulator:

          -

          Installation Steps

          -

          If you downloaded European Ship Simulator from Steam or G2A.com, then you have already installed it on your computer when you added it to your Steam library. You can skip this section and go to the gameplay tips section. If you downloaded European Ship Simulator from another source, then you need to follow these steps:

          -
            -
          1. Extract the downloaded file using a program like WinRAR or 7-Zip.
          2. -
          3. Open the extracted folder and find the setup file for European Ship Simulator.
          4. -
          5. Run the setup file and follow the instructions to install the game on your computer.
          6. -
          7. If the game requires a crack or a patch, copy it from the extracted folder and paste it in the game installation folder.
          8. -
          9. If the game requires activation or registration, use a keygen or a serial number from the extracted folder or another source.
          10. -
          -

          You have successfully installed European Ship Simulator on your computer. You can now launch the game from its shortcut or its installation folder and enjoy it.

          -

          Gameplay Tips

          -

          European Ship Simulator

          European Ship Simulator is a game that requires skill, patience, and attention to detail. You need to master the controls and functions of each ship, as well as the navigation and communication systems. You also need to follow the rules and regulations of each port, as well as the weather and traffic conditions. Here are some tips that can help you improve your gameplay:

          -
            -
          • Read the manual and tutorials for each ship and mission. They will give you valuable information and instructions on how to operate and complete them.
          • -
          • Use the keyboard and mouse shortcuts to access the different views, cameras, and menus. They will make your gameplay easier and faster.
          • -
          • Adjust the settings and options to suit your preferences and performance. You can change the graphics, sound, controls, difficulty, and language of the game.
          • -
          • Use the map and radar to plan your route and avoid obstacles and collisions. You can also use the compass and GPS to orient yourself and find your destination.
          • -
          • Use the radio and horn to communicate with other ships and port authorities. You can also use the chat and voice chat to communicate with other players online.
          • -
          -

          Conclusion

          -

          In conclusion, European Ship Simulator is a simulation game that lets you experience the thrill of captaining different types of ships in realistic European ports. You can download it from various sources, such as Steam or G2A.com, or install it from other sources at your own risk. You can also play it on your computer by following the installation steps and gameplay tips we provided. We hope you enjoyed this article and found it helpful. If you have any questions or feedback, please feel free to leave them in the comments section below.

          -

          Summary of the Article

          -

          This article showed you how to download European Ship Simulator, a simulation game that features eight ships and six ports. It also explained what European Ship Simulator is, what its features and system requirements are, where you can download it, how you can install and play it, and some gameplay tips.

          -

          FAQs

          -

          Here are some frequently asked questions about European Ship Simulator:

          -
            -
          1. Q: Is European Ship Simulator multiplayer?
            A: Yes, European Ship Simulator has a multiplayer mode that allows you to play with up to 16 players online. You can join or create public or private servers, chat with other players, and cooperate or compete with them.
          2. -
          3. Q: Is European Ship Simulator realistic?
            A: Yes, European Ship Simulator has realistic graphics, physics, and gameplay. The ships are modeled after real ones, the ports are based on real locations, the water behaves according to physical laws, and the weather affects the visibility and stability of your ship.
          4. -
          5. Q: Is European Ship Simulator moddable?
            A: Yes, European Ship Simulator has a modding community that creates and shares custom content for the game. You can find mods that add new ships, ports, missions, skins, sounds, and more. You can also create your own mods using the mission editor or other tools.
          6. -
          7. Q: Is European Ship Simulator compatible with VR?
            A: No, European Ship Simulator does not support VR devices at the moment. However, some players have reported that they were able to play it with VR using third-party software or hardware.
          8. -
          9. Q: Is European Ship Simulator worth buying?
            A: That depends on your personal preference and budget. If you like simulation games and want to try something different from driving or flying games, then you might enjoy European Ship Simulator. However, if you are looking for a more complex or realistic simulation game, then you might be disappointed by European Ship Simulator. The game has mixed reviews on Steam, with some players praising its graphics and gameplay, while others criticizing its bugs and lack of content. You can check out some of the reviews [5](https://store.steampowered.com/app/299250/European_Ship_Simulator/) before making your decision.
          10. -

          401be4b1e0
          -
          -
          \ No newline at end of file diff --git a/spaces/skf15963/summary/fengshen/utils/convert_py_to_npy.py b/spaces/skf15963/summary/fengshen/utils/convert_py_to_npy.py deleted file mode 100644 index 0d652169b59ffdc7ca977318ee72187b2ce73c1f..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/utils/convert_py_to_npy.py +++ /dev/null @@ -1,54 +0,0 @@ -import argparse -import torch -import glob -import os -import numpy as np - - -class MMapIndexDataset(): - def __init__(self, datapath): - self.idxfp = np.load(datapath + '.npy', mmap_mode='r') - self.binfp = np.memmap(datapath + '.bin', dtype='long', mode='r') - - def __len__(self): - return self.idxfp.shape[0] - - def __getitem__(self, idx): - return self.binfp[self.idxfp[idx, 0]:self.idxfp[idx, 1]] - - -def convert_py_to_npy(input_tensor, bin_out, idx_out): - idx = torch.empty(len(input_tensor), 2, dtype=torch.long) - start = 0 - for i, input in enumerate(input_tensor): - idx[i] = torch.tensor([start, start + len(input)]) - start += len(input) - np.save(idx_out, idx) - binfp = np.memmap(bin_out, dtype='long', mode='w+', shape=(start)) - start = 0 - for i, input in enumerate(input_tensor): - for j, idx in enumerate(input): - binfp[start + j] = idx - start += len(input) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description="Text infilling.") - parser.add_argument('--data_path', type=str, - default='/cognitive_comp/gaoxinyu/data/wudao') - args = parser.parse_args() - process_key = [ - 'incorrect_input_ids_list', - 'label_ids_list', - 'target_ids_list', - ] - if os.path.exists(args.data_path): - print(f'''Loading data from {args.data_path}''') - data_dict = torch.load(args.data_path) - for k in process_key: - bin_out = ('_' + k + '.bin').join(args.data_path.rsplit('.pt', 1)) - idx_out = ('_' + k).join(args.data_path.rsplit('.pt', 1)) - convert_py_to_npy(data_dict[k], bin_out, idx_out) - else: - print( - f'Please create the synthetic datafile {args.data_path} with create_synthetic_data.py.') diff --git a/spaces/sklearn-docs/feature_agglomeration/app.py b/spaces/sklearn-docs/feature_agglomeration/app.py deleted file mode 100644 index 48a0cbc02780fc8b10c37d58be3bca85a1a3fb7f..0000000000000000000000000000000000000000 --- a/spaces/sklearn-docs/feature_agglomeration/app.py +++ /dev/null @@ -1,113 +0,0 @@ -# Code source: Gaël Varoquaux -# Modified for documentation by Jaques Grobler -# License: BSD 3 clause - -import gradio as gr - -import numpy as np -import matplotlib.pyplot as plt - -from sklearn import datasets, cluster -from sklearn.feature_extraction.image import grid_to_graph -from datasets import load_dataset - -plt.switch_backend("agg") - - -# Theme from - https://huggingface.co/spaces/trl-lib/stack-llama/blob/main/app.py -theme = gr.themes.Monochrome( - primary_hue="indigo", - secondary_hue="blue", - neutral_hue="slate", - radius_size=gr.themes.sizes.radius_sm, - font=[ - gr.themes.GoogleFont("Open Sans"), - "ui-sans-serif", - "system-ui", - "sans-serif", - ], -) - - -def do_submit(n_clusters): - # Load the dataset - dataset = load_dataset("sklearn-docs/digits", header=None) - # convert dataset to pandas - df = dataset["train"].to_pandas() - X = df.iloc[:, :64] - labels = df.iloc[:, 64] - images = X.values.reshape(-1, 8, 8) - connectivity = grid_to_graph(*images[0].shape) - - agglo = cluster.FeatureAgglomeration( - connectivity=connectivity, n_clusters=int(n_clusters) - ) - agglo.fit(X) - X_reduced = agglo.transform(X) - - X_restored = agglo.inverse_transform(X_reduced) - images_restored = np.reshape(X_restored, images.shape) - plt.figure(1, figsize=(4, 3.5)) - plt.clf() - plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.91) - for i in range(4): - plt.subplot(3, 4, i + 1) - plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation="nearest") - plt.xticks(()) - plt.yticks(()) - if i == 1: - plt.title("Original data") - plt.subplot(3, 4, 4 + i + 1) - plt.imshow( - images_restored[i], cmap=plt.cm.gray, vmax=16, interpolation="nearest" - ) - if i == 1: - plt.title("Agglomerated data") - plt.xticks(()) - plt.yticks(()) - - plt.subplot(3, 4, 10) - plt.imshow( - np.reshape(agglo.labels_, images[0].shape), - interpolation="nearest", - cmap=plt.cm.nipy_spectral, - ) - plt.xticks(()) - plt.yticks(()) - plt.title("Labels") - return plt - - -title = "Feature Agglomeration" -with gr.Blocks(title=title, theme=theme) as demo: - gr.Markdown(f"## {title}") - gr.Markdown( - "These images show how similar features are merged together using feature agglomeration." - ) - gr.Markdown( - "[Scikit-learn Example](https://scikit-learn.org/stable/auto_examples/cluster/plot_digits_agglomeration.html)" - ) - - gr.Markdown( - "The FeatureAgglomeration uses [agglomerative clustering](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.AgglomerativeClustering.html#sklearn.cluster.AgglomerativeClustering)\ - to group together features that look very similar, thus decreasing the number of features. It is a dimensionality reduction \ - tool, see [Unsupervised dimensionality reduction](https://scikit-learn.org/stable/modules/unsupervised_reduction.html#data-reduction)." - ) - - with gr.Row(): - n_clusters = gr.Slider( - minimum=10, - maximum=50, - label="Number of clusters", - info="Number of clusters for FeatureAgglomeration", - step=1, - value=32, - ) - - plt_out = gr.Plot() - n_clusters.change(do_submit, n_clusters, plt_out) - - - -if __name__ == "__main__": - demo.launch() diff --git a/spaces/skyler36237/vits-uma-genshin-honkai/app.py b/spaces/skyler36237/vits-uma-genshin-honkai/app.py deleted file mode 100644 index 92ddafdcd240434f58569b0e6964ef331a971dcf..0000000000000000000000000000000000000000 --- a/spaces/skyler36237/vits-uma-genshin-honkai/app.py +++ /dev/null @@ -1,124 +0,0 @@ -import time -import gradio as gr -import utils -import commons -from models import SynthesizerTrn -from text import text_to_sequence -from torch import no_grad, LongTensor -import torch - -hps_ms = utils.get_hparams_from_file(r'./model/config.json') -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -net_g_ms = SynthesizerTrn( - len(hps_ms.symbols), - hps_ms.data.filter_length // 2 + 1, - hps_ms.train.segment_size // hps_ms.data.hop_length, - n_speakers=hps_ms.data.n_speakers, - **hps_ms.model).to(device) -_ = net_g_ms.eval() -speakers = hps_ms.speakers -model, optimizer, learning_rate, epochs = utils.load_checkpoint(r'./model/G_953000.pth', net_g_ms, None) - -def get_text(text, hps): - text_norm, clean_text = text_to_sequence(text, hps.symbols, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm, clean_text - -def vits(text, language, speaker_id, noise_scale, noise_scale_w, length_scale): - start = time.perf_counter() - if not len(text): - return "输入文本不能为空!", None, None - text = text.replace('\n', ' ').replace('\r', '').replace(" ", "") - if len(text) > 500: - return f"输入文字过长!{len(text)}>100", None, None - if language == 0: - text = f"[ZH]{text}[ZH]" - elif language == 1: - text = f"[JA]{text}[JA]" - else: - text = f"{text}" - stn_tst, clean_text = get_text(text, hps_ms) - with no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = LongTensor([stn_tst.size(0)]) - speaker_id = LongTensor([speaker_id]) - audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=speaker_id, noise_scale=noise_scale, noise_scale_w=noise_scale_w, - length_scale=length_scale)[0][0, 0].data.cpu().float().numpy() - - return "生成成功!", (22050, audio), f"生成耗时 {round(time.perf_counter()-start, 2)} s" - -def search_speaker(search_value): - for s in speakers: - if search_value == s: - return s - for s in speakers: - if search_value in s: - return s - -def change_lang(language): - if language == 0: - return 0.6, 0.668, 1.2 - else: - return 0.6, 0.668, 1.1 - -download_audio_js = """ -() =>{{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let audio = root.querySelector("#tts-audio").querySelector("audio"); - let text = root.querySelector("#input-text").querySelector("textarea"); - if (audio == undefined) - return; - text = text.value; - if (text == undefined) - text = Math.floor(Math.random()*100000000); - audio = audio.src; - let oA = document.createElement("a"); - oA.download = text.substr(0, 20)+'.wav'; - oA.href = audio; - document.body.appendChild(oA); - oA.click(); - oA.remove(); -}} -""" - -if __name__ == '__main__': - with gr.Blocks() as app: - gr.Markdown( - "#
          VITS语音在线合成demo\n" - "
          主要有赛马娘,原神中文,原神日语,崩坏3的音色
          " - '' - '' - ) - - with gr.Tabs(): - with gr.TabItem("vits"): - with gr.Row(): - with gr.Column(): - input_text = gr.Textbox(label="Text (100 words limitation)", lines=5, value="今天晚上吃啥好呢。", elem_id=f"input-text") - lang = gr.Dropdown(label="Language", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"], - type="index", value="中文") - btn = gr.Button(value="Submit") - with gr.Row(): - search = gr.Textbox(label="Search Speaker", lines=1) - btn2 = gr.Button(value="Search") - sid = gr.Dropdown(label="Speaker", choices=speakers, type="index", value=speakers[228]) - with gr.Row(): - ns = gr.Slider(label="noise_scale(控制感情变化程度)", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True) - nsw = gr.Slider(label="noise_scale_w(控制音素发音长度)", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True) - ls = gr.Slider(label="length_scale(控制整体语速)", minimum=0.1, maximum=2.0, step=0.1, value=1.2, interactive=True) - with gr.Column(): - o1 = gr.Textbox(label="Output Message") - o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio") - o3 = gr.Textbox(label="Extra Info") - download = gr.Button("Download Audio") - btn.click(vits, inputs=[input_text, lang, sid, ns, nsw, ls], outputs=[o1, o2, o3], api_name="generate") - download.click(None, [], [], _js=download_audio_js.format()) - btn2.click(search_speaker, inputs=[search], outputs=[sid]) - lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls]) - with gr.TabItem("可用人物一览"): - gr.Radio(label="Speaker", choices=speakers, interactive=False, type="index") - app.queue(concurrency_count=1).launch() \ No newline at end of file diff --git a/spaces/society-ethics/model-card-regulatory-check/tests/cards/facebook___bart-large-mnli.md b/spaces/society-ethics/model-card-regulatory-check/tests/cards/facebook___bart-large-mnli.md deleted file mode 100644 index a721783c137fbdb8a664c79b049936d3a12804ec..0000000000000000000000000000000000000000 --- a/spaces/society-ethics/model-card-regulatory-check/tests/cards/facebook___bart-large-mnli.md +++ /dev/null @@ -1,73 +0,0 @@ -# bart-large-mnli - -This is the checkpoint for [bart-large](https://huggingface.co/facebook/bart-large) after being trained on the [MultiNLI (MNLI)](https://huggingface.co/datasets/multi_nli) dataset. - -Additional information about this model: -- The [bart-large](https://huggingface.co/facebook/bart-large) model page -- [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension -](https://arxiv.org/abs/1910.13461) -- [BART fairseq implementation](https://github.com/pytorch/fairseq/tree/master/fairseq/models/bart) - -## NLI-based Zero Shot Text Classification - -[Yin et al.](https://arxiv.org/abs/1909.00161) proposed a method for using pre-trained NLI models as a ready-made zero-shot sequence classifiers. The method works by posing the sequence to be classified as the NLI premise and to construct a hypothesis from each candidate label. For example, if we want to evaluate whether a sequence belongs to the class "politics", we could construct a hypothesis of `This text is about politics.`. The probabilities for entailment and contradiction are then converted to label probabilities. - -This method is surprisingly effective in many cases, particularly when used with larger pre-trained models like BART and Roberta. See [this blog post](https://joeddav.github.io/blog/2020/05/29/ZSL.html) for a more expansive introduction to this and other zero shot methods, and see the code snippets below for examples of using this model for zero-shot classification both with Hugging Face's built-in pipeline and with native Transformers/PyTorch code. - -#### With the zero-shot classification pipeline - -The model can be loaded with the `zero-shot-classification` pipeline like so: - -```python -from transformers import pipeline -classifier = pipeline("zero-shot-classification", - model="facebook/bart-large-mnli") -``` - -You can then use this pipeline to classify sequences into any of the class names you specify. - -```python -sequence_to_classify = "one day I will see the world" -candidate_labels = ['travel', 'cooking', 'dancing'] -classifier(sequence_to_classify, candidate_labels) -#{'labels': ['travel', 'dancing', 'cooking'], -# 'scores': [0.9938651323318481, 0.0032737774308770895, 0.002861034357920289], -# 'sequence': 'one day I will see the world'} -``` - -If more than one candidate label can be correct, pass `multi_class=True` to calculate each class independently: - -```python -candidate_labels = ['travel', 'cooking', 'dancing', 'exploration'] -classifier(sequence_to_classify, candidate_labels, multi_class=True) -#{'labels': ['travel', 'exploration', 'dancing', 'cooking'], -# 'scores': [0.9945111274719238, -# 0.9383890628814697, -# 0.0057061901316046715, -# 0.0018193122232332826], -# 'sequence': 'one day I will see the world'} -``` - - -#### With manual PyTorch - -```python -# pose sequence as a NLI premise and label as a hypothesis -from transformers import AutoModelForSequenceClassification, AutoTokenizer -nli_model = AutoModelForSequenceClassification.from_pretrained('facebook/bart-large-mnli') -tokenizer = AutoTokenizer.from_pretrained('facebook/bart-large-mnli') - -premise = sequence -hypothesis = f'This example is {label}.' - -# run through model pre-trained on MNLI -x = tokenizer.encode(premise, hypothesis, return_tensors='pt', - truncation_strategy='only_first') -logits = nli_model(x.to(device))[0] - -# we throw away "neutral" (dim 1) and take the probability of -# "entailment" (2) as the probability of the label being true -entail_contradiction_logits = logits[:,[0,2]] -probs = entail_contradiction_logits.softmax(dim=1) -prob_label_is_true = probs[:,1] -``` \ No newline at end of file diff --git a/spaces/songdaooi/Swap/assets/pretrained_models/readme.md b/spaces/songdaooi/Swap/assets/pretrained_models/readme.md deleted file mode 100644 index fd26cd784fbfa3af2cebfb6190b0aa55c92b85e5..0000000000000000000000000000000000000000 --- a/spaces/songdaooi/Swap/assets/pretrained_models/readme.md +++ /dev/null @@ -1,4 +0,0 @@ -## Downolad these models here -- [inswapper_128.onnx](https://huggingface.co/deepinsight/inswapper/resolve/main/inswapper_128.onnx) -- [GFPGANv1.4.pth](https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth) -- [79999_iter.pth](https://drive.google.com/open?id=154JgKpzCPW82qINcVieuPH3fZ2e0P812) diff --git a/spaces/songwy/VITS-Umamusume-voice-synthesizer/ONNXVITS_models.py b/spaces/songwy/VITS-Umamusume-voice-synthesizer/ONNXVITS_models.py deleted file mode 100644 index acd00238895d57ba878fd0211d5654250fb10061..0000000000000000000000000000000000000000 --- a/spaces/songwy/VITS-Umamusume-voice-synthesizer/ONNXVITS_models.py +++ /dev/null @@ -1,509 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import ONNXVITS_modules as modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - self.w = None - self.reverse = None - self.noise_scale = None - def forward(self, x, x_mask, g=None): - w = self.w - reverse = self.reverse - noise_scale = self.noise_scale - - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - self.emb = nn.Embedding(n_vocab, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths): - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - self.reverse = None - def forward(self, x, x_mask, g=None): - reverse = self.reverse - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask # x_in : [b, c, t] -> [b, h, t] - x = self.enc(x, x_mask, g=g) # x_in : [b, h, t], g : [b, h, 1], x = x_in + g - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask # z, m, logs : [b, h, t] - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - - if n_speakers > 0: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, sid=None, noise_scale=.667, length_scale=1, noise_scale_w=.8, max_len=None): - torch.onnx.export( - self.enc_p, - (x, x_lengths), - "ONNX_net/enc_p.onnx", - input_names=["x", "x_lengths"], - output_names=["xout", "m_p", "logs_p", "x_mask"], - dynamic_axes={ - "x" : [1], - "xout" : [2], - "m_p" : [2], - "logs_p" : [2], - "x_mask" : [2] - }, - verbose=True, - ) - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - self.dp.reverse = True - self.dp.noise_scale = noise_scale_w - torch.onnx.export( - self.dp, - (x, x_mask, g), - "ONNX_net/dp.onnx", - input_names=["x", "x_mask", "g"], - output_names=["logw"], - dynamic_axes={ - "x" : [2], - "x_mask" : [2], - "logw" : [2] - }, - verbose=True, - ) - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - - self.flow.reverse = True - torch.onnx.export( - self.flow, - (z_p, y_mask, g), - "ONNX_net/flow.onnx", - input_names=["z_p", "y_mask", "g"], - output_names=["z"], - dynamic_axes={ - "z_p" : [2], - "y_mask" : [2], - "z" : [2] - }, - verbose=True, - ) - z = self.flow(z_p, y_mask, g=g) - z_in = (z * y_mask)[:,:,:max_len] - - torch.onnx.export( - self.dec, - (z_in, g), - "ONNX_net/dec.onnx", - input_names=["z_in", "g"], - output_names=["o"], - dynamic_axes={ - "z_in" : [2], - "o" : [2] - }, - verbose=True, - ) - o = self.dec(z_in, g=g) - return o diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/multilingual/README.md b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/multilingual/README.md deleted file mode 100644 index 46ff9c351b1030e0729f89f246e0cd86444c1633..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/multilingual/README.md +++ /dev/null @@ -1,158 +0,0 @@ -# Multilingual Translation - -[[Multilingual Translation with Extensible Multilingual Pretraining and Finetuning, https://arxiv.org/abs/2008.00401]](https://arxiv.org/abs/2008.00401) - -## Introduction - -This work is for training multilingual translation models with multiple bitext datasets. This multilingual translation framework supports (see [[training section]](#Training) and [[finetuning section]](#Finetuning) for examples) - -* temperature based sampling over unbalancing datasets of different translation directions - - --sampling-method' with - choices=['uniform', 'temperature', 'concat'] - - --sampling-temperature -* configurable to automatically add source and/or target language tokens to source/target sentences using data which are prepared in the same way as bilignual training - - --encoder-langtok with choices=['src', 'tgt', None] to specify whether to add source or target language tokens to the source sentences - - --decoder-langtok (binary option) to specify whether to add target language tokens to the target sentences or not -* finetuning mBART pretrained models for multilingual translation - - --finetune-from-model to specify the path from which to load the pretrained model - -## Preprocessing data -Multilingual training requires a joint BPE vocab. Please follow [mBART's preprocessing steps](https://github.com/pytorch/fairseq/tree/main/examples/mbart#bpe-data) to reuse our pretrained sentence-piece model. - -You can also train a joint BPE model on your own dataset and then follow the steps in [[link]](https://github.com/pytorch/fairseq/tree/main/examples/translation#multilingual-translation). - -## Training - - -```bash -lang_pairs= -path_2_data= -lang_list= - -fairseq-train $path_2_data \ - --encoder-normalize-before --decoder-normalize-before \ - --arch transformer --layernorm-embedding \ - --task translation_multi_simple_epoch \ - --sampling-method "temperature" \ - --sampling-temperature 1.5 \ - --encoder-langtok "src" \ - --decoder-langtok \ - --lang-dict "$lang_list" \ - --lang-pairs "$lang_pairs" \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.2 \ - --optimizer adam --adam-eps 1e-06 --adam-betas '(0.9, 0.98)' \ - --lr-scheduler inverse_sqrt --lr 3e-05 --warmup-updates 2500 --max-update 40000 \ - --dropout 0.3 --attention-dropout 0.1 --weight-decay 0.0 \ - --max-tokens 1024 --update-freq 2 \ - --save-interval 1 --save-interval-updates 5000 --keep-interval-updates 10 --no-epoch-checkpoints \ - --seed 222 --log-format simple --log-interval 2 -``` - -## Finetuning -We can also finetune multilingual models from a monolingual pretrained models, e.g. [mMBART](https://github.com/pytorch/fairseq/tree/main/examples/mbart). -```bash -lang_pairs= -path_2_data= -lang_list= -pretrained_model= - -fairseq-train $path_2_data \ - --finetune-from-model $pretrained_model \ - --encoder-normalize-before --decoder-normalize-before \ - --arch transformer --layernorm-embedding \ - --task translation_multi_simple_epoch \ - --sampling-method "temperature" \ - --sampling-temperature 1.5 \ - --encoder-langtok "src" \ - --decoder-langtok \ - --lang-dict "$lang_list" \ - --lang-pairs "$lang_pairs" \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.2 \ - --optimizer adam --adam-eps 1e-06 --adam-betas '(0.9, 0.98)' \ - --lr-scheduler inverse_sqrt --lr 3e-05 --warmup-updates 2500 --max-update 40000 \ - --dropout 0.3 --attention-dropout 0.1 --weight-decay 0.0 \ - --max-tokens 1024 --update-freq 2 \ - --save-interval 1 --save-interval-updates 5000 --keep-interval-updates 10 --no-epoch-checkpoints \ - --seed 222 --log-format simple --log-interval 2 -``` -## Generate -The following command uses the multilingual task (translation_multi_simple_epoch) to generate translation from $source_lang to $target_lang on the test dataset. During generaton, the source language tokens are added to source sentences and the target language tokens are added as the starting token to decode target sentences. Options --lang-dict and --lang-pairs are needed to tell the generation process the ordered list of languages and translation directions that the trained model are awared of; they will need to be consistent with the training. - -```bash -model= -source_lang= -target_lang= - -fairseq-generate $path_2_data \ - --path $model \ - --task translation_multi_simple_epoch \ - --gen-subset test \ - --source-lang $source_lang \ - --target-lang $target_lang - --sacrebleu --remove-bpe 'sentencepiece'\ - --batch-size 32 \ - --encoder-langtok "src" \ - --decoder-langtok \ - --lang-dict "$lang_list" \ - --lang-pairs "$lang_pairs" > ${source_lang}_${target_lang}.txt -``` -Fairseq will generate translation into a file {source_lang}_${target_lang}.txt with sacreblue at the end. - -You can also use costomized tokenizer to compare the performance with the literature. For example, you get a tokenizer [here](https://github.com/rsennrich/wmt16-scripts) and do the following: -```bash -TOKENIZER= -TOK_CMD=<"$TOKENIZER $target_lang" or cat for sacrebleu> - -cat {source_lang}_${target_lang}.txt | grep -P "^H" |sort -V |cut -f 3- |$TOK_CMD > ${source_lang}_${target_lang}.hyp -cat {source_lang}_${target_lang}.txt | grep -P "^T" |sort -V |cut -f 2- |$TOK_CMD > ${source_lang}_${target_lang}.ref -sacrebleu -tok 'none' -s 'none' ${source_lang}_${target_lang}.ref < ${source_lang}_${target_lang}.hyp -``` - -# mBART50 models - -* [mMBART 50 pretrained model](https://dl.fbaipublicfiles.com/fairseq/models/mbart50/mbart50.pretrained.tar.gz). -* [mMBART 50 finetuned many-to-one](https://dl.fbaipublicfiles.com/fairseq/models/mbart50/mbart50.ft.n1.tar.gz). -* [mMBART 50 finetuned one-to-many](https://dl.fbaipublicfiles.com/fairseq/models/mbart50/mbart50.ft.1n.tar.gz). -* [mMBART 50 finetuned many-to-many](https://dl.fbaipublicfiles.com/fairseq/models/mbart50/mbart50.ft.nn.tar.gz). - -Please download and extract from the above tarballs. Each tarball contains -* The fairseq model checkpoint: model.pt -* The list of supported languages: ML50_langs.txt -* Sentence piece model: sentence.bpe.model -* Fairseq dictionary of each language: dict.{lang}.txt (please replace lang with a language specified in ML50_langs.txt) - -To use the trained models, -* use the tool [binarize.py](./data_scripts/binarize.py) to binarize your data using sentence.bpe.model and dict.{lang}.txt, and copy the dictionaries to your data path -* then run the generation command: -```bash -path_2_data= -model=/model.pt -lang_list=/ML50_langs.txt -source_lang= -target_lang= - -fairseq-generate $path_2_data \ - --path $model \ - --task translation_multi_simple_epoch \ - --gen-subset test \ - --source-lang $source_lang \ - --target-lang $target_lang - --sacrebleu --remove-bpe 'sentencepiece'\ - --batch-size 32 \ - --encoder-langtok "src" \ - --decoder-langtok \ - --lang-dict "$lang_list" -``` - -## Citation - -```bibtex -@article{tang2020multilingual, - title={Multilingual Translation with Extensible Multilingual Pretraining and Finetuning}, - author={Yuqing Tang and Chau Tran and Xian Li and Peng-Jen Chen and Naman Goyal and Vishrav Chaudhary and Jiatao Gu and Angela Fan}, - year={2020}, - eprint={2008.00401}, - archivePrefix={arXiv}, - primaryClass={cs.CL} -} -``` diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/simultaneous_translation/utils/monotonic_attention.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/simultaneous_translation/utils/monotonic_attention.py deleted file mode 100644 index 61dbb112bfd5ea7b92f2739f046910f486bb0153..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/simultaneous_translation/utils/monotonic_attention.py +++ /dev/null @@ -1,198 +0,0 @@ -from typing import Optional -import torch -from torch import Tensor - -from examples.simultaneous_translation.utils.functions import ( - exclusive_cumprod, - prob_check, - moving_sum, -) - - -def expected_alignment_from_p_choose( - p_choose: Tensor, - padding_mask: Optional[Tensor] = None, - eps: float = 1e-6 -): - """ - Calculating expected alignment for from stepwise probability - - Reference: - Online and Linear-Time Attention by Enforcing Monotonic Alignments - https://arxiv.org/pdf/1704.00784.pdf - - q_ij = (1 − p_{ij−1})q_{ij−1} + a+{i−1j} - a_ij = p_ij q_ij - - Parallel solution: - ai = p_i * cumprod(1 − pi) * cumsum(a_i / cumprod(1 − pi)) - - ============================================================ - Expected input size - p_choose: bsz, tgt_len, src_len - """ - prob_check(p_choose) - - # p_choose: bsz, tgt_len, src_len - bsz, tgt_len, src_len = p_choose.size() - dtype = p_choose.dtype - - p_choose = p_choose.float() - - if padding_mask is not None: - p_choose = p_choose.masked_fill(padding_mask.unsqueeze(1), 0.0) - - # cumprod_1mp : bsz, tgt_len, src_len - cumprod_1mp = exclusive_cumprod(1 - p_choose, dim=2, eps=eps) - cumprod_1mp_clamp = torch.clamp(cumprod_1mp, eps, 1.0) - - alpha_0 = p_choose.new_zeros([bsz, 1, src_len]) - alpha_0[:, :, 0] = 1.0 - - previous_alpha = [alpha_0] - - for i in range(tgt_len): - # p_choose: bsz , tgt_len, src_len - # cumprod_1mp_clamp : bsz, tgt_len, src_len - # previous_alpha[i]: bsz, 1, src_len - # alpha_i: bsz, src_len - alpha_i = ( - p_choose[:, i] - * cumprod_1mp[:, i] - * torch.cumsum( - previous_alpha[i][:, 0] / cumprod_1mp_clamp[:, i], dim=1 - ) - ).clamp(0, 1.0) - - previous_alpha.append(alpha_i.unsqueeze(1)) - - # alpha: bsz * num_heads, tgt_len, src_len - alpha = torch.cat(previous_alpha[1:], dim=1) - - # Mix precision to prevent overflow for fp16 - alpha = alpha.type(dtype) - - prob_check(alpha) - - return alpha - - -def expected_soft_attention( - alpha: Tensor, - soft_energy: Tensor, - padding_mask: Optional[Tensor] = None, - chunk_size: Optional[int] = None, - eps: float = 1e-10 -): - """ - Function to compute expected soft attention for - monotonic infinite lookback attention from - expected alignment and soft energy. - - Reference: - Monotonic Chunkwise Attention - https://arxiv.org/abs/1712.05382 - - Monotonic Infinite Lookback Attention for Simultaneous Machine Translation - https://arxiv.org/abs/1906.05218 - - alpha: bsz, tgt_len, src_len - soft_energy: bsz, tgt_len, src_len - padding_mask: bsz, src_len - left_padding: bool - """ - if padding_mask is not None: - alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0.0) - soft_energy = soft_energy.masked_fill( - padding_mask.unsqueeze(1), -float("inf") - ) - - prob_check(alpha) - - dtype = alpha.dtype - - alpha = alpha.float() - soft_energy = soft_energy.float() - - soft_energy = soft_energy - soft_energy.max(dim=2, keepdim=True)[0] - exp_soft_energy = torch.exp(soft_energy) + eps - - if chunk_size is not None: - # Chunkwise - beta = ( - exp_soft_energy - * moving_sum( - alpha / (eps + moving_sum(exp_soft_energy, chunk_size, 1)), - 1, chunk_size - ) - ) - else: - # Infinite lookback - # Notice that infinite lookback is a special case of chunkwise - # where chunksize = inf - inner_items = alpha / (eps + torch.cumsum(exp_soft_energy, dim=2)) - - beta = ( - exp_soft_energy - * torch.cumsum(inner_items.flip(dims=[2]), dim=2) - .flip(dims=[2]) - ) - - if padding_mask is not None: - beta = beta.masked_fill( - padding_mask.unsqueeze(1).to(torch.bool), 0.0) - - # Mix precision to prevent overflow for fp16 - beta = beta.type(dtype) - - beta = beta.clamp(0, 1) - - prob_check(beta) - - return beta - - -def mass_preservation( - alpha: Tensor, - padding_mask: Optional[Tensor] = None, - left_padding: bool = False -): - """ - Function to compute the mass perservation for alpha. - This means that the residual weights of alpha will be assigned - to the last token. - - Reference: - Monotonic Infinite Lookback Attention for Simultaneous Machine Translation - https://arxiv.org/abs/1906.05218 - - alpha: bsz, tgt_len, src_len - padding_mask: bsz, src_len - left_padding: bool - """ - - prob_check(alpha) - - if padding_mask is not None: - if not left_padding: - assert not padding_mask[:, 0].any(), ( - "Find padding on the beginning of the sequence." - ) - alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0.0) - - if left_padding or padding_mask is None: - residuals = 1 - alpha[:, :, :-1].sum(dim=-1).clamp(0, 1) - alpha[:, :, -1] = residuals - else: - # right padding - _, tgt_len, src_len = alpha.size() - residuals = 1 - alpha.sum(dim=-1, keepdim=True).clamp(0, 1) - src_lens = src_len - padding_mask.sum(dim=1, keepdim=True) - src_lens = src_lens.expand(-1, tgt_len).contiguous() - # add back the last value - residuals += alpha.gather(2, src_lens.unsqueeze(2) - 1) - alpha = alpha.scatter(2, src_lens.unsqueeze(2) - 1, residuals) - - prob_check(alpha) - - return alpha diff --git a/spaces/stomexserde/gpt4-ui/Examples/Activador Windows 8 Pro (todas Las Versiones) Serial Key.md b/spaces/stomexserde/gpt4-ui/Examples/Activador Windows 8 Pro (todas Las Versiones) Serial Key.md deleted file mode 100644 index 8bc0e32937884fad618e2a98e42113fb77466610..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Activador Windows 8 Pro (todas Las Versiones) Serial Key.md +++ /dev/null @@ -1,29 +0,0 @@ -
          -I can try to write a title and an article for you, but I cannot guarantee that they will be SEO optimized or HTML formatted. Here is what I came up with: - -

          How to Activate Windows 8 Pro (All Versions) with Serial Key

          -

          Windows 8 Pro is a popular operating system that offers many features and benefits for users. However, to enjoy the full functionality of Windows 8 Pro, you need to activate it with a valid serial key. In this article, we will show you how to activate Windows 8 Pro (all versions) with a serial key in a few simple steps.

          -

          Before you start, make sure you have a working internet connection and a serial key for Windows 8 Pro. You can find your serial key on the packaging of your Windows 8 Pro DVD or on the confirmation email that you received after purchasing Windows 8 Pro online. If you don't have a serial key, you can purchase one from the official Microsoft website or from a trusted third-party seller.

          -

          activador windows 8 pro (todas las versiones) Serial Key


          Download Filehttps://urlgoal.com/2uIcdX



          -

          Once you have your serial key, follow these steps to activate Windows 8 Pro:

          -
            -
          1. Open the Start screen and type "PC settings". Click on the PC settings icon that appears on the right side of the screen.
          2. -
          3. Select "Activate Windows" from the left menu. You will see a message that says "Windows isn't activated. Activate Windows now."
          4. -
          5. Click on the "Enter key" button and type your serial key in the box. Click on "Next" to continue.
          6. -
          7. Wait for a few seconds while Windows verifies your serial key and activates your Windows 8 Pro. You will see a message that says "Windows is activated" when the process is complete.
          8. -
          9. Restart your computer to apply the changes.
          10. -
          -

          Congratulations! You have successfully activated Windows 8 Pro (all versions) with a serial key. You can now enjoy all the features and benefits of Windows 8 Pro without any limitations or interruptions.

          Okay, I will try to write a few more paragraphs for you. Here is what I added: - -

          Windows 8 Pro is a versatile operating system that can run on various devices, such as desktops, laptops, tablets, and smartphones. It has a modern and user-friendly interface that allows you to access your apps, files, and settings with ease. It also has many security and performance enhancements that make your device faster and safer.

          -

          Some of the features and benefits of Windows 8 Pro include:

          -
            -
          • BitLocker and BitLocker To Go: These features allow you to encrypt your hard drive and removable drives to protect your data from unauthorized access or theft.
          • -
          • Remote Desktop: This feature allows you to connect to another computer and access its desktop and files from your device.
          • -
          • Windows Media Center: This feature allows you to watch and record live TV, play DVDs, and listen to music on your device.
          • -
          • Windows Store: This feature allows you to download and install thousands of apps for various purposes, such as games, education, entertainment, productivity, and more.
          • -
          • Windows Update: This feature allows you to keep your device up to date with the latest security patches and feature updates from Microsoft.
          • -
          -

          If you want to learn more about Windows 8 Pro and its features, you can visit the official Microsoft website or check out some online tutorials and guides. You can also contact the Microsoft support team if you have any questions or issues with your Windows 8 Pro activation or usage.

          7196e7f11a
          -
          -
          \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Battlestations Midway Download Free Full Version Pc Games __HOT__.md b/spaces/stomexserde/gpt4-ui/Examples/Battlestations Midway Download Free Full Version Pc Games __HOT__.md deleted file mode 100644 index ab73c65a3287059d1edb544b78a4fb94e92cc319..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Battlestations Midway Download Free Full Version Pc Games __HOT__.md +++ /dev/null @@ -1,33 +0,0 @@ - -

          How to Download Battlestations Midway for Free on PC

          -

          Battlestations Midway is a thrilling video game that combines action and real-time tactics in the Pacific theater of World War 2. You can command your fleet assets and take control of any one of them at will, from PT boats to aircraft carriers. The game features a single player campaign that spans from Pearl Harbor to the Battle of Midway, as well as multiplayer modes that let you compete with other players online.

          -

          If you want to download Battlestations Midway for free on your PC, you will need a few things:

          -

          Battlestations Midway Download Free Full Version Pc Games


          DOWNLOADhttps://urlgoal.com/2uI77Z



          -
            -
          • A torrent client, such as uTorrent or BitTorrent.
          • -
          • A VPN service, such as NordVPN or ExpressVPN.
          • -
          • A valid copy of the game, which you can find on various torrent sites.
          • -
          -

          Here are the steps to follow:

          -
            -
          1. Download and install a torrent client on your PC.
          2. -
          3. Download and install a VPN service on your PC.
          4. -
          5. Connect to a VPN server in a country where torrenting is legal and safe.
          6. -
          7. Search for Battlestations Midway on a torrent site, such as The Pirate Bay or RARBG.
          8. -
          9. Download the torrent file and open it with your torrent client.
          10. -
          11. Wait for the download to finish and verify the files.
          12. -
          13. Run the setup.exe file and follow the instructions to install the game.
          14. -
          15. Enjoy playing Battlestations Midway for free on your PC!
          16. -
          -

          Note: This article is for educational purposes only. We do not condone piracy or illegal downloading of any kind. Please support the developers and publishers by buying the game from official sources.

          - -

          Battlestations Midway is a game that will appeal to fans of both action and strategy genres. You can switch between different perspectives and units at any time, giving you a lot of freedom and flexibility in how you approach each mission. You can also customize your fleet and aircraft with different weapons and upgrades, adding more depth and replay value to the game.

          -

          The game also boasts impressive graphics and sound effects that immerse you in the historical setting of the Pacific war. The water and weather effects are realistic and dynamic, and the explosions and gunfire are loud and satisfying. The voice acting and music are also well-done, adding more personality and emotion to the game.

          -

          If you are looking for a game that will challenge your skills and test your tactics, Battlestations Midway is a great choice. You can download it for free on your PC by following the steps above, or you can buy it from official sources if you want to support the developers and publishers. Either way, you will have a lot of fun playing this game.

          - -

          Battlestations Midway is not only a game, but also a history lesson. You can learn about the events and battles that shaped the course of the Pacific war, and the people and machines that fought in it. The game features historical footage and facts that give you more context and background for each mission. You can also access a library of information that contains details and specifications of the ships, planes, and weapons that you can use in the game.

          -

          The game also offers a variety of multiplayer modes that let you play with or against other players online. You can join or host a co-op campaign, where you can team up with other players to complete the missions together. You can also join or host a competitive mode, where you can choose your side and fight for supremacy in different scenarios. You can also create your own custom battles and invite other players to join them.

          -

          -

          Battlestations Midway is a game that will keep you entertained and engaged for hours. You can download it for free on your PC by following the steps above, or you can buy it from official sources if you want to support the developers and publishers. Either way, you will have a blast playing this game.

          7b8c122e87
          -
          -
          \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Changeling Crack And Patch File Download.md b/spaces/stomexserde/gpt4-ui/Examples/Changeling Crack And Patch File Download.md deleted file mode 100644 index c9de561082decbfc86896bc98461259b93daa681..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Changeling Crack And Patch File Download.md +++ /dev/null @@ -1,32 +0,0 @@ -
          -

          Changeling Crack and Patch File Download

          -

          Changeling is a romance game visual novel by Steamberry Studio, released on January 30, 2019. It is a dark, fantastical romantic comedy that focuses not just on falling in love, but rediscovering your past and the dangers of a supernatural world. The game has six love interests, over 40 unique endings, 30+ unique CGs, and 30+ unique backgrounds. It is a highly praised game by critics and players alike, with a very positive rating on Steam.

          -

          Changeling crack and patch file download


          Download File ✵✵✵ https://urlgoal.com/2uIbF5



          -

          However, not everyone can afford to buy the game or wants to pay for it. Some people prefer to use crack and patch files to play the game for free or to enhance their gaming experience. Crack files are modified versions of the original game files that bypass the copy protection or activation process. Patch files are updates or fixes that improve the game performance or functionality. Using crack and patch files can have some advantages, such as saving money, accessing more features, or playing offline. But they can also have some disadvantages, such as exposing your PC to viruses, malware, or legal issues.

          -

          In this article, we will show you how to download Changeling crack and patch file safely and easily, how to install them on your PC, and how to play Changeling with them. We will also provide you with some tips and warnings for using crack and patch files responsibly. Read on to find out more.

          -

          How to Download Changeling Crack and Patch File

          -

          The first step to play Changeling with crack and patch file is to download them from reliable sources. There are many websites that offer crack and patch file download for various games, but not all of them are trustworthy or secure. Some of them may contain fake or corrupted files that can harm your PC or steal your personal information. Therefore, you need to be careful when choosing where to download Changeling crack and patch file.

          -

          -

          One way to find reliable sources for crack and patch file download is to use search engines like Google or Bing. You can type keywords like "Changeling crack download" or "Changeling patch download" in the search box and see what results come up. You can also use filters like date, language, or country to narrow down your search. However, you should not rely solely on search engines, as they may not always show you the best or safest results. You should also check the reputation and reviews of the websites that offer crack and patch file download before clicking on them.

          -

          Another way to find reliable sources for crack and patch file download is to use online forums or communities that are dedicated to gaming or cracking. You can join these forums or communities and ask other members for recommendations or feedback on where to download Changeling crack and patch file. You can also browse through existing threads or posts that discuss Changeling or similar games. However, you should not trust everything you read on these forums or communities, as some members may have ulterior motives or malicious intentions. You should also follow the rules and etiquette of these forums or communities before posting or downloading anything.

          -

          Some examples of reliable sources for crack and patch file download for Changeling are: - [CrackWatch]: This is a website that tracks the latest cracks and updates for various games. You can find Changeling crack and patch file download links on this website, as well as information about the game's release date, status, and size. You can also join the CrackWatch community and chat with other users about Changeling or other games. - [SteamUnlocked]: This is a website that offers free download of cracked games from Steam. You can find Changeling crack and patch file download links on this website, as well as instructions on how to install and play the game. You can also browse through other categories of games, such as action, adventure, or simulation. - [IGG Games]: This is a website that provides free download of various games, including Changeling. You can find Changeling crack and patch file download links on this website, as well as screenshots, videos, and reviews of the game. You can also request or suggest games that you want to see on this website. Once you have found a reliable source for Changeling crack and patch file download, you need to check the file for viruses and malware before installing it on your PC. You can use antivirus software or online tools to scan the file and make sure it is safe and clean. Some examples of antivirus software or online tools are: - [VirusTotal]: This is an online tool that analyzes files and URLs for viruses, malware, and other threats. You can upload the Changeling crack and patch file to this tool and see the results from over 70 antivirus engines. You can also see the comments and ratings from other users who have scanned the same file or URL. - [Malwarebytes]: This is a software that protects your PC from viruses, malware, ransomware, and other threats. You can download and install this software on your PC and scan the Changeling crack and patch file with it. You can also use this software to remove any unwanted or harmful programs or files from your PC. - [Avast]: This is another software that protects your PC from viruses, malware, spyware, and other threats. You can download and install this software on your PC and scan the Changeling crack and patch file with it. You can also use this software to block any suspicious or malicious websites or downloads. After checking the file for viruses and malware, you are ready to install the crack and patch file on your PC. To do this, you need to follow these steps: - Step 1: Extract the crack and patch file from the compressed folder using a program like WinRAR or 7-Zip. You should see a folder named "Changeling" or something similar. - Step 2: Copy the contents of the folder to the installation directory of Changeling on your PC. The installation directory is usually located in C:\Program Files (x86)\Steam\steamapps\common\Changeling or something similar. You may need to overwrite some existing files or folders. - Step 3: Run the game as administrator by right-clicking on the game icon or shortcut and selecting "Run as administrator". You should see a message saying that the crack has been activated successfully.

          How to Play Changeling with Crack and Patch File

          -

          Now that you have installed the crack and patch file on your PC, you are ready to play Changeling with them. To do this, you need to follow these steps:

          - - Step 1: Launch the game from your desktop or start menu. You should see the game menu with options like "New Game", "Load Game", "Settings", etc. - Step 2: Select "New Game" if you want to start a new game or "Load Game" if you want to continue a previous game. You should see the game intro with narration and graphics. - Step 3: Follow the game story and make choices that affect your relationships with different characters. You can also access the game features and options by pressing ESC or clicking on the menu icon at the top right corner of the screen. Some of the features and options are: - Save/Load: This allows you to save or load your game progress at any point in the game. - Skip: This allows you to skip through text or scenes that you have already seen or read. - Auto: This allows you to automatically advance through text or scenes without clicking. - Log: This allows you to review previous text or dialogue in the game. - Preferences: This allows you to adjust the game settings such as sound, music, voice, display, language, etc. - Help: This provides you with some tips and instructions on how to play the game. - Quit: This allows you to exit the game.

          Conclusion

          -

          In this article, we have shown you how to download Changeling crack and patch file safely and easily, how to install them on your PC, and how to play Changeling with them. We have also provided you with some tips and warnings for using crack and patch files responsibly. We hope you have found this article helpful and informative.

          -

          Changeling is a romance game visual novel that offers a dark, fantastical romantic comedy with six love interests, over 40 unique endings, 30+ unique CGs, and 30+ unique backgrounds. It is a highly praised game by critics and players alike, with a very positive rating on Steam. However, if you want to play the game for free or to enhance your gaming experience, you can use crack and patch files to bypass the copy protection or activation process and to update the game performance or functionality.

          -

          Using crack and patch files can have some advantages, such as saving money, accessing more features, or playing offline. But they can also have some disadvantages, such as exposing your PC to viruses, malware, or legal issues. Therefore, you need to be careful when choosing where to download Changeling crack and patch file, how to check the file for viruses and malware, and how to install the file on your PC. You also need to follow the game instructions and rules when playing Changeling with crack and patch file.

          -

          We hope you enjoy playing Changeling with crack and patch file and discover the secrets of your past and the dangers of a supernatural world. If you have any feedback or experience with Changeling or crack and patch file download, please feel free to share them with us in the comments section below. Thank you for reading this article.

          -

          FAQs

          -

          Here are some frequently asked questions about Changeling or crack and patch file download:

          -

          What is the difference between crack and patch file?

          -

          A crack file is a modified version of the original game file that bypasses the copy protection or activation process. A patch file is an update or fix that improves the game performance or functionality. Crack files are usually illegal and risky, while patch files are usually legal and safe.

          -

          Is it legal to use crack and patch file for Changeling?

          -

          Using crack file for Changeling is illegal and may violate the game's terms of service or license agreement. It may also infringe the intellectual property rights of the game developer or publisher. Using patch file for Changeling is legal if it is authorized or provided by the game developer or publisher. However, using unauthorized or unofficial patch files may also violate the game's terms of service or license agreement.

          -

          Will using crack and patch file affect my PC performance or security?

          -

          Using crack and patch file may affect your PC performance or security in different ways. Using crack file may expose your PC to viruses, malware, or other threats that can harm your PC or steal your personal information. Using patch file may improve your PC performance or security by fixing bugs, errors, or vulnerabilities in the game. However, using untrusted or corrupted patch files may also harm your PC or compromise your security.

          -

          Can I play Changeling online with crack and patch file?

          -

          Playing Changeling online with crack and patch file may not be possible or advisable. Using crack file may prevent you from accessing the online features or services of the game, such as multiplayer mode, achievements, leaderboards, etc. It may also result in banning or suspension of your game account or IP address. Using patch file may allow you to play Changeling online with other players who have the same version of the game. However, using incompatible or outdated patch files may cause errors, crashes, or disconnections in the online mode.

          -

          Where can I get more information about Changeling game?

          -

          You can get more information about Changeling game from various sources, such as: - [Official Website]: This is the official website of Changeling game where you can find the game description, features, screenshots, videos, reviews, etc. - [Steam Page]: This is the Steam page of Changeling game where you can buy the game, download updates, join discussions, write reviews, etc. - [Wiki Page]: This is the wiki page of Changeling game where you can find detailed information about the game story, characters, endings, choices, etc.

          b2dd77e56b
          -
          -
          \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Dish Network Dish Cinema.md b/spaces/stomexserde/gpt4-ui/Examples/Dish Network Dish Cinema.md deleted file mode 100644 index ce4bc2cd6c547538401a661bcb75386fccc04f12..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Dish Network Dish Cinema.md +++ /dev/null @@ -1,37 +0,0 @@ -
          -

          How to Enjoy Dish Network Dish Cinema at Home

          -

          If you are looking for a way to watch the latest movies and TV shows at home, you might want to consider Dish Network Dish Cinema. Dish Cinema is a service that lets you access thousands of titles on demand, including new releases, classics, and exclusive content. You can also rent or buy movies and TV shows from a variety of genres and categories. Here are some of the benefits and features of Dish Network Dish Cinema and how you can enjoy it at home.

          -

          Benefits of Dish Network Dish Cinema

          -

          Dish Network Dish Cinema offers many advantages over other options for watching movies and TV shows at home. Some of the benefits are:

          -

          dish network dish cinema


          Download 🔗 https://urlgoal.com/2uI73O



          -
            -
          • You can watch movies and TV shows anytime you want, without waiting for schedules or commercials.
          • -
          • You can choose from a large selection of titles, including new releases that are not available on other platforms.
          • -
          • You can enjoy high-quality video and audio, with HD and 4K options available for some titles.
          • -
          • You can save money by renting or buying movies and TV shows at affordable prices, compared to going to the theater or subscribing to multiple streaming services.
          • -
          • You can access exclusive content that is only available on Dish Network Dish Cinema, such as original series, documentaries, and sports events.
          • -
          -

          Features of Dish Network Dish Cinema

          -

          Dish Network Dish Cinema offers many features that make it easy and convenient to watch movies and TV shows at home. Some of the features are:

          -
            -
          • You can browse and search for titles by genre, category, rating, popularity, or keyword.
          • -
          • You can filter and sort titles by price, release date, runtime, or quality.
          • -
          • You can preview titles by watching trailers, reading reviews, or viewing ratings.
          • -
          • You can rent or buy titles with a few clicks using your remote control or online account.
          • -
          • You can watch titles on your TV, computer, smartphone, tablet, or other devices using the Dish Anywhere app or website.
          • -
          • You can resume watching titles from where you left off on any device.
          • -
          • You can download titles to watch offline on your mobile devices.
          • -
          -

          How to Enjoy Dish Network Dish Cinema at Home

          -

          To enjoy Dish Network Dish Cinema at home, you need to have a Dish Network subscription and a compatible receiver. You also need to have an internet connection and a credit card or PayPal account. Here are the steps to enjoy Dish Network Dish Cinema at home:

          -
            -
          1. Turn on your TV and receiver and press the "Guide" button on your remote control.
          2. -
          3. Select "Dish Cinema" from the menu and browse or search for the title you want to watch.
          4. -
          5. Select the title and choose whether you want to rent or buy it. You will see the price, rental period, quality, and availability of the title.
          6. -
          7. Enter your payment information and confirm your purchase. You will receive a confirmation message on your screen.
          8. -
          9. Start watching the title on your TV or any other device using the Dish Anywhere app or website. You can pause, rewind, fast-forward, or stop the title as you wish.
          10. -
          -

          Dish Network Dish Cinema is a great way to watch movies and TV shows at home. You can enjoy a wide range of titles on demand, including new releases, classics, and exclusive content. You can also rent or buy titles at reasonable prices and watch them on any device. Try Dish Network Dish Cinema today and see for yourself how it can enhance your home entertainment experience.

          -

          7b8c122e87
          -
          -
          \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Dropbox Pro 1.3.241.1 Crack INCL QR Code ((NEW)).md b/spaces/stomexserde/gpt4-ui/Examples/Dropbox Pro 1.3.241.1 Crack INCL QR Code ((NEW)).md deleted file mode 100644 index 0ca7361934c7e2ae1c289bf2e542276ad0162631..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Dropbox Pro 1.3.241.1 Crack INCL QR Code ((NEW)).md +++ /dev/null @@ -1,35 +0,0 @@ - -

          Dropbox Pro 1.3.241.1 Crack INCL QR Code: How to Download and Install

          -

          Dropbox Pro is a cloud storage service that allows you to sync your files across multiple devices and access them from anywhere. With Dropbox Pro, you can also share your files with others, even if they don't have a Dropbox account. Dropbox Pro also offers advanced features such as data encryption, version control, and remote wipe.

          -

          Dropbox Pro 1.3.241.1 Crack INCL QR Code


          Download File ———>>> https://urlgoal.com/2uI8Sy



          -

          If you want to download and install Dropbox Pro 1.3.241.1 Crack INCL QR Code, you need to follow these steps:

          -
            -
          1. Go to the official website of Dropbox and download the desktop app for your operating system[^1^].
          2. -
          3. Run the installer and follow the instructions to complete the installation.
          4. -
          5. Launch the Dropbox app and sign in with your Dropbox account or create a new one if you don't have one.
          6. -
          7. Scan the QR code below with your smartphone camera or a QR code scanner app. This will take you to a webpage where you can download the crack file for Dropbox Pro 1.3.241.1[^4^].
          8. -
          9. Save the crack file to your computer and copy it to the Dropbox folder on your computer.
          10. -
          11. Run the crack file and follow the instructions to activate Dropbox Pro 1.3.241.1 on your computer.
          12. -
          13. Enjoy the benefits of Dropbox Pro 1.3.241.1 Crack INCL QR Code!
          14. -
          -

          QR code for Dropbox Pro 1.3.241.1 Crack

          -

          Note: This is a pirated version of Dropbox Pro 1.3.241.1 and it may not work properly or may contain viruses or malware. We do not recommend using this method to get Dropbox Pro 1.3.241.1 Crack INCL QR Code as it may violate the terms of service of Dropbox and may harm your computer or data. Please use this method at your own risk.

          Dropbox Pro 1.3.241.1 Crack INCL QR Code is a convenient way to get access to the premium features of Dropbox without paying a subscription fee. However, it is not a legal or ethical way to use the service and it may have some drawbacks and risks. Here are some of the pros and cons of using Dropbox Pro 1.3.241.1 Crack INCL QR Code:

          -

          Pros

          -
            -
          • You can enjoy up to 2 TB of cloud storage space and sync unlimited files across your devices.
          • -
          • You can share large files and folders with anyone, even if they don't have a Dropbox account.
          • -
          • You can protect your files with data encryption, password protection, and two-factor authentication.
          • -
          • You can restore previous versions of your files and recover deleted files for up to 180 days.
          • -
          • You can remotely wipe your data from lost or stolen devices.
          • -
          -

          Cons

          -
            -
          • You may violate the terms of service of Dropbox and face legal consequences or account suspension.
          • -
          • You may download a fake or malicious crack file that may infect your computer with viruses or malware.
          • -
          • You may not receive updates or technical support from Dropbox.
          • -
          • You may experience errors or bugs in the functionality of Dropbox Pro 1.3.241.1.
          • -
          • You may lose your data if the crack file stops working or if Dropbox detects and blocks your account.
          • -
          -

          As you can see, using Dropbox Pro 1.3.241.1 Crack INCL QR Code is not a safe or reliable way to use Dropbox. If you want to enjoy the benefits of Dropbox Pro 1.3.241.1 without risking your security or privacy, you should consider purchasing a legitimate subscription from Dropbox or using an alternative cloud storage service that suits your needs and budget.

          7b8c122e87
          -
          -
          \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Face2face Starter Teachers Book Free Download Pdf.md b/spaces/stomexserde/gpt4-ui/Examples/Face2face Starter Teachers Book Free Download Pdf.md deleted file mode 100644 index 63b9705b3cd2b3154a4ca181f3338f9495b86e9b..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Face2face Starter Teachers Book Free Download Pdf.md +++ /dev/null @@ -1,18 +0,0 @@ -
          -

          How to Download the face2face Starter Teacher's Book for Free

          -

          If you are looking for a flexible, easy-to-teach, 6-level course (A1 to C1) for busy teachers who want to get their adult and young adult learners to communicate with confidence, you might be interested in the face2face Starter Teacher's Book with DVD[^2^]. This book offers detailed teaching notes for every lesson, keys to exercises and extra teaching notes. It also guides teachers through the Student's Book DVD-ROM, and relates face2face to CEFR levels and English Profile. Additionally, busy teachers can access photocopiable progress tests and communicative class activities[^2^].

          -

          face2face starter teacher's book free download pdf


          DOWNLOADhttps://urlgoal.com/2uI8d6



          -

          The free DVD in the Second edition Starter Teacher's Book offers classroom videos integrated with the Real World lessons in the Student's Book, as well as the entire content of the Teacher's Book in PDF format[^2^]. This means that you can download the PDF version of the book for free from the DVD. However, if you don't have access to the DVD, you can still find some online sources that offer the PDF version of the book for free. Here are some steps to follow:

          -
            -
          1. Go to this website [^1^] and click on "Download" at the top right corner of the page. You will be asked to create a free account or log in with your existing account. After that, you can download the PDF file of the book.
          2. -
          3. Go to this website [^2^] and click on "Get print book" at the top right corner of the page. You will be redirected to a page where you can choose from different sellers that offer the book. Some of them might have a free shipping option or a discounted price.
          4. -
          5. Go to this website [^3^] and click on "PDF" at the right side of the page. You will be able to download the PDF file of the book directly without creating an account or logging in.
          6. -
          -

          These are some of the ways you can download the face2face Starter Teacher's Book for free. However, please note that these sources might not be authorized by Cambridge University Press, which is the publisher of the book. Therefore, we recommend that you buy the original book from a reputable seller if you can afford it, as it will support the authors and ensure that you get the best quality and updated content.

          - -

          The face2face Starter Teacher's Book is based on the communicative approach and draws on the best of current thinking and practice. It helps teachers to present and practise new language through engaging and motivating real-world contexts[^1^]. The book also provides clear learning outcomes for each lesson, which are mapped to the CEFR and English Profile. This helps teachers to monitor their students' progress and tailor their teaching accordingly[^1^].

          -

          Another benefit of the face2face Starter Teacher's Book is that it includes a free DVD that contains a wealth of extra resources for teachers. The DVD has classroom videos that show how to teach the Real World lessons in the Student's Book, as well as tips and techniques from experienced teachers[^1^]. The DVD also has the entire content of the Teacher's Book in PDF format, which makes it easy to access and print whenever needed[^1^]. Moreover, the DVD has photocopiable worksheets for Class Activities, Vocabulary Plus, Help with Grammar and Progress Tests[^1^]. These worksheets provide additional practice and consolidation for students, as well as useful feedback for teachers.

          -

          -

          In conclusion, the face2face Starter Teacher's Book is an invaluable tool for teachers who want to teach English effectively and enjoyably. It offers detailed guidance and support for every lesson, as well as a variety of extra materials and resources on the DVD. It also aligns with the CEFR and English Profile standards, which ensure that students achieve their learning goals and communicate with confidence. By downloading the face2face Starter Teacher's Book for free, you can benefit from all these features and more.

          7b8c122e87
          -
          -
          \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Horror Story 2 Full Movie In Hindi Utorrent _VERIFIED_ Download Hd.md b/spaces/stomexserde/gpt4-ui/Examples/Horror Story 2 Full Movie In Hindi Utorrent _VERIFIED_ Download Hd.md deleted file mode 100644 index b99e3660f8430bc1dfc592a47a3169fde126bf03..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Horror Story 2 Full Movie In Hindi Utorrent _VERIFIED_ Download Hd.md +++ /dev/null @@ -1,36 +0,0 @@ - -

          How to Download Horror Story 2 Full Movie in Hindi Utorrent HD

          -

          If you are a fan of horror movies, you might be interested in watching Horror Story 2, the sequel to the 2013 hit film Horror Story. This movie is a collection of six terrifying stories that will keep you on the edge of your seat. The movie was released in India on October 31, 2023, and received positive reviews from critics and audiences alike.

          -

          But what if you want to watch Horror Story 2 full movie in Hindi Utorrent HD? Utorrent is a popular software that allows you to download and share files over the internet. You can use it to download movies, music, games, and more. However, downloading movies from Utorrent is not always legal or safe. You might face legal issues, malware infections, or poor quality downloads.

          -

          Horror Story 2 Full Movie In Hindi Utorrent Download Hd


          Downloadhttps://urlgoal.com/2uIaOU



          -

          That's why we have prepared this guide to help you download Horror Story 2 full movie in Hindi Utorrent HD safely and legally. Follow these steps and enjoy the movie without any hassle.

          -

          Step 1: Find a Reliable Torrent Site

          -

          The first step to download Horror Story 2 full movie in Hindi Utorrent HD is to find a reliable torrent site that has the movie available. There are many torrent sites on the internet, but not all of them are trustworthy or updated. Some of them might have fake or malicious files that can harm your device or compromise your privacy.

          -

          To avoid such risks, you should use a reputable torrent site that has a large user base and positive feedback. Some of the best torrent sites for downloading movies are:

          -
            -
          • The Pirate Bay
          • -
          • RARBG
          • -
          • 1337x
          • -
          • YTS
          • -
          • LimeTorrents
          • -
          -

          These sites have a wide range of movies in different languages and qualities. You can search for Horror Story 2 full movie in Hindi Utorrent HD on any of these sites and find several results. However, before you click on any link, make sure to check the following details:

          -
            -
          • The file size: The file size should match the quality of the movie. For example, a 720p movie should be around 1 GB, while a 1080p movie should be around 2 GB. If the file size is too small or too large, it might be fake or corrupted.
          • -
          • The seeders and leechers: The seeders are the users who have the complete file and are sharing it with others. The leechers are the users who are downloading the file from the seeders. The more seeders and leechers a file has, the faster and easier it will be to download.
          • -
          • The comments and ratings: The comments and ratings are the feedback from other users who have downloaded the file. They can help you verify the quality and authenticity of the file. You should avoid files that have negative comments or low ratings.
          • -
          -

          Step 2: Download and Install Utorrent

          -

          The next step to download Horror Story 2 full movie in Hindi Utorrent HD is to download and install Utorrent on your device. Utorrent is a free and easy-to-use software that enables you to download torrents. You can download it from its official website: https://www.utorrent.com/.

          -

          Once you have downloaded the Utorrent installer, run it and follow the instructions to install it on your device. You might need to allow some permissions or change some settings during the installation process. After the installation is complete, launch Utorrent and get ready to download Horror Story 2 full movie in Hindi Utorrent HD.

          -

          -

          Step 3: Download Horror Story 2 Full Movie in Hindi Utorrent HD

          -

          The final step to download Horror Story 2 full movie in Hindi Utorrent HD is to download the movie from the torrent site you have chosen. To do this, follow these steps:

          -
            -
          1. Go to the torrent site and search for Horror Story 2 full movie in Hindi Utorrent HD.
          2. -
          3. Select the file that matches your preferences and click on it.
          4. -
          5. On the file page, look for a button or a link that says "Download Torrent", "Magnet Link", or something similar.
          6. -
          7. Click on the button or link and wait for Utorrent to open automatically.
          8. -
          9. In Utorrent

            cec2833e83
            -
            -
            \ No newline at end of file diff --git a/spaces/subhajitmaji/MusicGen/audiocraft/quantization/vq.py b/spaces/subhajitmaji/MusicGen/audiocraft/quantization/vq.py deleted file mode 100644 index f67c3a0cd30d4b8993a36c587f00dc8a451d926f..0000000000000000000000000000000000000000 --- a/spaces/subhajitmaji/MusicGen/audiocraft/quantization/vq.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import math -import typing as tp - -import torch - -from .base import BaseQuantizer, QuantizedResult -from .core_vq import ResidualVectorQuantization - - -class ResidualVectorQuantizer(BaseQuantizer): - """Residual Vector Quantizer. - - Args: - dimension (int): Dimension of the codebooks. - n_q (int): Number of residual vector quantizers used. - q_dropout (bool): Random quantizer drop out at train time. - bins (int): Codebook size. - decay (float): Decay for exponential moving average over the codebooks. - kmeans_init (bool): Whether to use kmeans to initialize the codebooks. - kmeans_iters (int): Number of iterations used for kmeans initialization. - threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes - that have an exponential moving average cluster size less than the specified threshold with - randomly selected vector from the current batch. - orthogonal_reg_weight (float): Orthogonal regularization weights. - orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes. - orthogonal_reg_max_codes (optional int): Maximum number of codes to consider. - for orthogonal regulariation. - """ - def __init__( - self, - dimension: int = 256, - n_q: int = 8, - q_dropout: bool = False, - bins: int = 1024, - decay: float = 0.99, - kmeans_init: bool = True, - kmeans_iters: int = 10, - threshold_ema_dead_code: int = 2, - orthogonal_reg_weight: float = 0.0, - orthogonal_reg_active_codes_only: bool = False, - orthogonal_reg_max_codes: tp.Optional[int] = None, - ): - super().__init__() - self.max_n_q = n_q - self.n_q = n_q - self.q_dropout = q_dropout - self.dimension = dimension - self.bins = bins - self.decay = decay - self.kmeans_init = kmeans_init - self.kmeans_iters = kmeans_iters - self.threshold_ema_dead_code = threshold_ema_dead_code - self.orthogonal_reg_weight = orthogonal_reg_weight - self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only - self.orthogonal_reg_max_codes = orthogonal_reg_max_codes - self.vq = ResidualVectorQuantization( - dim=self.dimension, - codebook_size=self.bins, - num_quantizers=self.n_q, - decay=self.decay, - kmeans_init=self.kmeans_init, - kmeans_iters=self.kmeans_iters, - threshold_ema_dead_code=self.threshold_ema_dead_code, - orthogonal_reg_weight=self.orthogonal_reg_weight, - orthogonal_reg_active_codes_only=self.orthogonal_reg_active_codes_only, - orthogonal_reg_max_codes=self.orthogonal_reg_max_codes, - channels_last=False - ) - - def forward(self, x: torch.Tensor, frame_rate: int): - n_q = self.n_q - if self.training and self.q_dropout: - n_q = int(torch.randint(1, self.n_q + 1, (1,)).item()) - bw_per_q = math.log2(self.bins) * frame_rate / 1000 - quantized, codes, commit_loss = self.vq(x, n_q=n_q) - codes = codes.transpose(0, 1) - # codes is [B, K, T], with T frames, K nb of codebooks. - bw = torch.tensor(n_q * bw_per_q).to(x) - return QuantizedResult(quantized, codes, bw, penalty=torch.mean(commit_loss)) - - def encode(self, x: torch.Tensor) -> torch.Tensor: - """Encode a given input tensor with the specified frame rate at the given bandwidth. - The RVQ encode method sets the appropriate number of quantizer to use - and returns indices for each quantizer. - """ - n_q = self.n_q - codes = self.vq.encode(x, n_q=n_q) - codes = codes.transpose(0, 1) - # codes is [B, K, T], with T frames, K nb of codebooks. - return codes - - def decode(self, codes: torch.Tensor) -> torch.Tensor: - """Decode the given codes to the quantized representation. - """ - # codes is [B, K, T], with T frames, K nb of codebooks, vq.decode expects [K, B, T]. - codes = codes.transpose(0, 1) - quantized = self.vq.decode(codes) - return quantized - - @property - def total_codebooks(self): - return self.max_n_q - - @property - def num_codebooks(self): - return self.n_q - - def set_num_codebooks(self, n: int): - assert n > 0 and n <= self.max_n_q - self.n_q = n diff --git a/spaces/suchun/chatGPT_acdemic/request_llm/bridge_chatglm.py b/spaces/suchun/chatGPT_acdemic/request_llm/bridge_chatglm.py deleted file mode 100644 index 7af283562ce3539de9ac1a44ba45f9266308defa..0000000000000000000000000000000000000000 --- a/spaces/suchun/chatGPT_acdemic/request_llm/bridge_chatglm.py +++ /dev/null @@ -1,140 +0,0 @@ - -from transformers import AutoModel, AutoTokenizer -import time -import importlib -from toolbox import update_ui, get_conf -from multiprocessing import Process, Pipe - -load_message = "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……" - -################################################################################# -class GetGLMHandle(Process): - def __init__(self): - super().__init__(daemon=True) - self.parent, self.child = Pipe() - self.chatglm_model = None - self.chatglm_tokenizer = None - self.info = "" - self.success = True - self.check_dependency() - self.start() - - def check_dependency(self): - try: - import sentencepiece - self.info = "依赖检测通过" - self.success = True - except: - self.info = "缺少ChatGLM的依赖,如果要使用ChatGLM,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_chatglm.txt`安装ChatGLM的依赖。" - self.success = False - - def ready(self): - return self.chatglm_model is not None - - def run(self): - # 第一次运行,加载参数 - retry = 0 - while True: - try: - if self.chatglm_model is None: - self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) - device, = get_conf('LOCAL_MODEL_DEVICE') - if device=='cpu': - self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float() - else: - self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() - self.chatglm_model = self.chatglm_model.eval() - break - else: - break - except: - retry += 1 - if retry > 3: - self.child.send('[Local Message] Call ChatGLM fail 不能正常加载ChatGLM的参数。') - raise RuntimeError("不能正常加载ChatGLM的参数!") - - # 进入任务等待状态 - while True: - kwargs = self.child.recv() - try: - for response, history in self.chatglm_model.stream_chat(self.chatglm_tokenizer, **kwargs): - self.child.send(response) - except: - self.child.send('[Local Message] Call ChatGLM fail.') - self.child.send('[Finish]') - - def stream_chat(self, **kwargs): - self.parent.send(kwargs) - while True: - res = self.parent.recv() - if res != '[Finish]': - yield res - else: - break - return - -global glm_handle -glm_handle = None -################################################################################# -def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False): - """ - 多线程方法 - 函数的说明请见 request_llm/bridge_all.py - """ - global glm_handle - if glm_handle is None: - glm_handle = GetGLMHandle() - observe_window[0] = load_message + "\n\n" + glm_handle.info - if not glm_handle.success: - error = glm_handle.info - glm_handle = None - raise RuntimeError(error) - - # chatglm 没有 sys_prompt 接口,因此把prompt加入 history - history_feedin = [] - for i in range(len(history)//2): - history_feedin.append(["What can I do?", sys_prompt] ) - history_feedin.append([history[2*i], history[2*i+1]] ) - - watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可 - response = "" - for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - observe_window[0] = response - if len(observe_window) >= 2: - if (time.time()-observe_window[1]) > watch_dog_patience: - raise RuntimeError("程序终止。") - return response - - - -def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): - """ - 单线程方法 - 函数的说明请见 request_llm/bridge_all.py - """ - chatbot.append((inputs, "")) - - global glm_handle - if glm_handle is None: - glm_handle = GetGLMHandle() - chatbot[-1] = (inputs, load_message + "\n\n" + glm_handle.info) - yield from update_ui(chatbot=chatbot, history=[]) - if not glm_handle.success: - glm_handle = None - return - - if additional_fn is not None: - import core_functional - importlib.reload(core_functional) # 热更新prompt - core_functional = core_functional.get_core_functions() - if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话) - inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"] - - history_feedin = [] - for i in range(len(history)//2): - history_feedin.append(["What can I do?", system_prompt] ) - history_feedin.append([history[2*i], history[2*i+1]] ) - - for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - chatbot[-1] = (inputs, response) - yield from update_ui(chatbot=chatbot, history=history) \ No newline at end of file diff --git a/spaces/suds/blah/app.py b/spaces/suds/blah/app.py deleted file mode 100644 index 343bb21dbe824efe0d3a174d248765024f6b7ef9..0000000000000000000000000000000000000000 --- a/spaces/suds/blah/app.py +++ /dev/null @@ -1,4 +0,0 @@ -import streamlit as st - -x = st.slider("select something") -st.write(x) \ No newline at end of file diff --git a/spaces/supertori/files/stable-diffusion-webui/modules/sd_hijack_inpainting.py b/spaces/supertori/files/stable-diffusion-webui/modules/sd_hijack_inpainting.py deleted file mode 100644 index 55a2ce4d19200acafd79e6fce7e017c4abc50a73..0000000000000000000000000000000000000000 --- a/spaces/supertori/files/stable-diffusion-webui/modules/sd_hijack_inpainting.py +++ /dev/null @@ -1,103 +0,0 @@ -import os -import torch - -from einops import repeat -from omegaconf import ListConfig - -import ldm.models.diffusion.ddpm -import ldm.models.diffusion.ddim -import ldm.models.diffusion.plms - -from ldm.models.diffusion.ddpm import LatentDiffusion -from ldm.models.diffusion.plms import PLMSSampler -from ldm.models.diffusion.ddim import DDIMSampler, noise_like -from ldm.models.diffusion.sampling_util import norm_thresholding - - -@torch.no_grad() -def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None, dynamic_threshold=None): - b, *_, device = *x.shape, x.device - - def get_model_output(x, t): - if unconditional_conditioning is None or unconditional_guidance_scale == 1.: - e_t = self.model.apply_model(x, t, c) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t] * 2) - - if isinstance(c, dict): - assert isinstance(unconditional_conditioning, dict) - c_in = dict() - for k in c: - if isinstance(c[k], list): - c_in[k] = [ - torch.cat([unconditional_conditioning[k][i], c[k][i]]) - for i in range(len(c[k])) - ] - else: - c_in[k] = torch.cat([unconditional_conditioning[k], c[k]]) - else: - c_in = torch.cat([unconditional_conditioning, c]) - - e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) - e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) - - if score_corrector is not None: - assert self.model.parameterization == "eps" - e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) - - return e_t - - alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas - alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev - sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas - sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas - - def get_x_prev_and_pred_x0(e_t, index): - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) - - # current prediction for x_0 - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - if dynamic_threshold is not None: - pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) - # direction pointing to x_t - dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t - noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0 - - e_t = get_model_output(x, t) - if len(old_eps) == 0: - # Pseudo Improved Euler (2nd order) - x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) - e_t_next = get_model_output(x_prev, t_next) - e_t_prime = (e_t + e_t_next) / 2 - elif len(old_eps) == 1: - # 2nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (3 * e_t - old_eps[-1]) / 2 - elif len(old_eps) == 2: - # 3nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 - elif len(old_eps) >= 3: - # 4nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 - - x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) - - return x_prev, pred_x0, e_t - - -def do_inpainting_hijack(): - # p_sample_plms is needed because PLMS can't work with dicts as conditionings - - ldm.models.diffusion.plms.PLMSSampler.p_sample_plms = p_sample_plms diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Blufftitler Dpack 3 PATCHED.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Blufftitler Dpack 3 PATCHED.md deleted file mode 100644 index 964c5902959e88b937b0955ec083a686f73430be..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Blufftitler Dpack 3 PATCHED.md +++ /dev/null @@ -1,38 +0,0 @@ -

            Blufftitler Dpack 3


            Download 🗸🗸🗸 https://cinurl.com/2uEXE3



            -
            -Blufftitler : From the official site of Blufftitler : 'Take the poker world by storm with the hottest game in the industry! Blufftitler is a fun, innovative, and totally unique poker game!'Q: - -How to implement a singleton in C++? - -I am trying to implement a singleton pattern in c++. I am using a static member function to initialize the singleton. - -My question is: is this a good way to implement a singleton, and if not, what is a better way? - -A: - -Is the object a singleton if it is one and only one object? If it's a container, then your implementation is pretty safe. However, in C++, you should be thinking of objects as having multiple states, and being one of many objects with that state, rather than one object with one state. If you want only one object, you could simply make it a global variable, since it has no state (except perhaps the initialization function). However, I would recommend avoiding the singleton pattern, and instead just use static variables or class variables. - -You can also consider using a static registry class. It is very simple and can be used in the standard way by using the registry::instance() - -class Singleton - - static Singleton &getInstance() - - static Singleton *instance = new Singleton; - - return *instance; - - - - // your singleton methods - -; - -Here is a thread that addresses this type of issue: - -The core of his idea is to create a "one and only" instance (by setting an invariant) and then calling a static constructor from the first line of main(). - -;">P 4fefd39f24
            -
            -
            -

            diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Download Torrent Autodata 338 Ita FREE.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Download Torrent Autodata 338 Ita FREE.md deleted file mode 100644 index 5dd4ff837577a2a5947177ef3ec76d6b75c46f5d..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Download Torrent Autodata 338 Ita FREE.md +++ /dev/null @@ -1,8 +0,0 @@ -
            -

            autodata 338 srpski jezik free download torrentz.. autodata 3.38 srpski jezik free download torrentz. autodata 338 srpski jezik free download torrentz.38 srpski jezik free download torrent.38 srpski jezik free torrentz. free: download autodata. auto data.

            -

            adobe flash player is a cross-platform application used for viewing flash-based games, videos and other flash applications, as well as playing online games and watching flash movies.
            the application is available in many windows operating systems.
            fortunately, you do not need to manually download the download the application, since it is freely available for all major operating systems.
            conveniently packaged in a wide range
            ec5d62056f zarrdar

            -

            Download Torrent Autodata 338 Ita


            Download · https://cinurl.com/2uEYBu



            -

            autodata 3.38 srpski jezik free download torrentz ->->->->.. autodata 3.39.38.38 srpski download free 63f381d098. najbrza i najbolja verzija auto date prevedena na hrvatski jezik 2012.38 srpski jezik free autodata srpski jezik download,.40.40 crack software autodata. autodata 3 38. autodata 3 38 srpski ingyendownload free autodata 3 38 srpski.40 crack and full version download download: autodata 2015 srpski torrent. 7fd0e77640 hindi medium dual audio hindi dubbed movie

            -

            jual flash disk 8 gb autodata 3.45 3.40 3.38 dengan harga rp409.000 dari toko online mirza gallery adinda, kota surabaya. cari produk kunci.. hi. i'm not asking for anything illegal, just need help to change the language to pt pt if it's illegal post i'm sorry and will be deleted. he version i. autodata 3.40 pt.rarautodata 3.41 pt-pt - torrent pautan magnetadcda2/eng/po. download aut0data 3.38 portugues torrent or any additional. we aim to offer autodata in a broad variety of languages to make.

            899543212b
            -
            -
            \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D Download LINK Pc.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D Download LINK Pc.md deleted file mode 100644 index 84446f5cfb68f4e6e5b771b9953febfa5bbc870d..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D Download LINK Pc.md +++ /dev/null @@ -1,121 +0,0 @@ -
            -

            MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D Download PC

            - -

            If you are looking for a reliable and powerful operating system for your PC, you should consider downloading MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D. This is the official release of Windows 7 Ultimate, build 7600.16385, which was released to manufacturing on 22 July 2009 and was released to the public three months later.

            -

            MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D download pc


            DOWNLOAD ——— https://cinurl.com/2uEYcq



            - -

            MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D is the best version of Windows 7 for 64-bit systems. It has many features and benefits that make it stand out from other operating systems. Some of these features are:

            - -
              -
            • It has a user-friendly and customizable interface that allows you to personalize your desktop, taskbar, start menu, and themes.
            • -
            • It has a fast and smooth performance that optimizes your PC's speed, memory, and battery life.
            • -
            • It has a high level of security and privacy that protects your PC from viruses, malware, spyware, and other threats.
            • -
            • It has a wide compatibility with various devices, software, and drivers that enhance your PC's functionality and productivity.
            • -
            • It has a rich multimedia experience that supports various formats of audio, video, and images.
            • -
            • It has a backup and restore feature that allows you to create backups of your files and settings and restore them in case of any problem.
            • -
            - -

            MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D is compatible with Windows 10 and later drivers, Windows 10 and later upgrade & servicing drivers, Windows 11 Client, version 22H2 and later, Servicing Drivers, Windows 11 Client, version 22H2 and later, Upgrade & Servicing Drivers.

            - -

            MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D is available as an ISO file that you can download from various sources on the internet. You can also download an activator that can help you activate your copy of Windows 7 Ultimate without any hassle.

            -

            - -

            MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D is a great choice for anyone who wants a reliable and powerful operating system for their PC. It is free, updated, secure, and versatile. It has a lot of features and benefits that can make your PC experience more enjoyable and effective. Download MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D today and see for yourself!

            -

            How to Download and Install MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D on Your PC

            - -

            If you want to download and install MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D on your PC, you can follow these simple steps:

            - -
              -
            1. Go to any of the sources that offer MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D as an ISO file, such as the Internet Archive or other trusted websites.
            2. -
            3. Click on the download button and choose the location where you want to save the file on your computer.
            4. -
            5. Once the download is complete, you will need a software that can extract the ISO file, such as WinRAR or 7-Zip.
            6. -
            7. Right-click on the ISO file and select Extract Here or Extract to Folder.
            8. -
            9. You will see a folder with the name of the ISO file, which contains all the files and folders of Windows 7 Ultimate.
            10. -
            11. You will need a software that can burn the ISO file to a DVD or a USB flash drive, such as Rufus or ImgBurn.
            12. -
            13. Insert a blank DVD or a USB flash drive into your computer and launch the software.
            14. -
            15. Select the ISO file and the destination drive and click on Start or Burn.
            16. -
            17. Wait for the process to finish and eject the DVD or the USB flash drive.
            18. -
            19. Restart your computer and boot from the DVD or the USB flash drive.
            20. -
            21. Follow the instructions on the screen and complete the installation process of Windows 7 Ultimate.
            22. -
            - -

            You can also watch this video tutorial that shows you how to download and install MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D on your PC:

            - - - -

            Why You Should Choose MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D for Your PC

            - -

            MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D is not just another operating system for your PC. It is a reliable and powerful operating system that can meet your needs and expectations. Here are some of the reasons why you should choose MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D for your PC over other operating systems:

            - -
              -
            • It is free. You don't have to pay anything to download and use MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D on your PC. You can access all its features and benefits without any limitations or restrictions.
            • -
            • It is updated. MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D is constantly updated with new features, bug fixes, and improvements. You can always get the latest version of MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D from any of the sources that offer it as an ISO file.
            • -
            • It is secure. MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D has a high level of security and privacy that protects your PC from viruses, malware, spyware, and other threats. It also has a backup and restore feature that allows you to create backups of your files and settings and restore them in case of any problem.
            • -
            • It is compatible. MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D is compatible with Windows 10 and later drivers, Windows 10 and later upgrade & servicing drivers, Windows 11 Client, version 22H2 and later, Servicing Drivers, Windows 11 Client, version 22H2 and later, Upgrade & Servicing Drivers.
            • -
            • It is versatile.MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D is versatile and can be used for various purposes and occasions.Whether you need it for studying, working, gaming, or just for fun,MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D can meet your needs and expectations.
            • -
            - -

            MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D is a great choice for anyone who wants a reliable and powerful operating system for their PC.It is free, updated, secure, compatible,and versatile.It has a lot of features and benefits that can make your PC experience more enjoyable and effective.Download MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D today and see for yourself!

            -

            How to Activate MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D on Your PC

            - -

            If you want to activate MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D on your PC, you can use an activator that can help you activate your copy of Windows 7 Ultimate without any hassle. Here are some of the steps you can follow to use an activator:

            - -
              -
            1. Download an activator from any of the sources that offer it, such as the Internet Archive or other trusted websites.
            2. -
            3. Save the file on your computer and run it as an administrator.
            4. -
            5. Select your edition of Windows 7 Ultimate and click on Install.
            6. -
            7. Wait for the process to finish and restart your computer.
            8. -
            9. Check if your copy of Windows 7 Ultimate has been activated successfully.
            10. -
            - -

            You can also watch this video tutorial that shows you how to use an activator to activate MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D on your PC:

            - - - -

            What Are the Differences Between MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D and Other Versions of Windows 7

            - -

            MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D is one of the many versions of Windows 7 that are available for download and use. However, it is not the only version of Windows 7 that you can choose from. There are other versions of Windows 7 that have different features and benefits, such as Windows 7 Home Premium, Windows 7 Professional, Windows 7 Enterprise, and Windows 7 Starter.

            - -

            Here are some of the differences between MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D and other versions of Windows 7:

            - -
        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        VersionFeaturesBenefits
        Windows 7 UltimateIt has all the features of Windows 7 Home Premium and Windows 7 Professional, plus some additional features, such as BitLocker, AppLocker, BranchCache, DirectAccess, Virtual Hard Disk Booting, Multilingual User Interface Pack, and Windows XP Mode.It is the most complete and powerful version of Windows 7 for 64-bit systems. It is suitable for anyone who wants a reliable and versatile operating system for their PC.
        Windows 7 Home PremiumIt has most of the features of Windows 7 Ultimate, except for some advanced features, such as BitLocker, AppLocker, BranchCache, DirectAccess, Virtual Hard Disk Booting, Multilingual User Interface Pack, and Windows XP Mode.It is a good version of Windows 7 for home users who want a user-friendly and customizable operating system for their PC.
        Windows 7 ProfessionalIt has most of the features of Windows 7 Home Premium, plus some additional features, such as Domain Join, Remote Desktop Host, Location Aware Printing, Encrypting File System, Presentation Mode, Offline Files, and Windows XP Mode.It is a good version of Windows 7 for business users who want a secure and productive operating system for their PC.
        Windows 7 EnterpriseIt has all the features of Windows 7 Professional, plus some additional features, such as BitLocker, AppLocker, BranchCache, DirectAccess, Virtual Hard Disk Booting, Multilingual User Interface Pack.It is a good version of Windows 7 for large organizations who want a scalable and manageable operating system for their PC.
        Windows 7 StarterIt has only the basic features of Windows 7 Ultimate, such as Aero Snap, Internet Explorer 8, Windows Media Player 12, Windows Defender, and Parental Controls.It is a good version of Windows 7 for low-end PCs or netbooks that have limited hardware resources.
        - -

        MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D is one of the best versions of Windows 7 that you can download and use on your PC.However,you should always compare it with other versions of Windows 7 and choose the one that suits your needs and preferences best.

        -

        Conclusion

        - -

        In this article, we have discussed MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D, a reliable and powerful operating system for your PC. We have covered its features and benefits, how to download and install it, how to use it, how to activate it, how to uninstall it, and some frequently asked questions and answers about it. We have also provided some testimonials and reviews from real users who have tried MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D.

        - -

        MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D is a great choice for anyone who wants a reliable and powerful operating system for their PC.It is free, updated, secure, compatible,and versatile.It has a lot of features and benefits that can make your PC experience more enjoyable and effective.Download MICROSOFT.WINDOWS.7.ULTIMATE.7600.16385.RTM.X64.RETAIL.ENGLISH.D today and see for yourself!

        3cee63e6c2
        -
        -
        \ No newline at end of file diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/All In One Keylogger 4.0 Cracked.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/All In One Keylogger 4.0 Cracked.md deleted file mode 100644 index fbc4345c2128794c3ae3e5c8dccf51cd90b9831d..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/All In One Keylogger 4.0 Cracked.md +++ /dev/null @@ -1,9 +0,0 @@ -
        -

        you should also read reviews of the software on independent websites to get an idea of what other users think of its features and reliability. there are plenty of well-regarded, independent review sites on the web, including windowsuser.com, softpedia.com and cnet.com. you can also search for reviews of ccleaner professional software online.

        -

        all in one keylogger 4.0 cracked


        Download Filehttps://urluss.com/2uCFka



        -

        when searching for a legit ccleaner pro key, you should look for real licensing key numbers and not a number that simply looks like a legit one. real numbers can be found under license key column in product features description. it will be displayed like this: cs3271520-c30c-f626-08c1-78a912c8ff6e.

        -

        previously, we demonstrated how to check all of the siemens buildings logistics (sbl) systems using a raspberry pi, and what sort of insights you can gain. in this installment, we will take a look at the process of getting a hard drive image of the client servers, and what we can learn from it. in addition, we will continue to be picky about the variety of hashes used in each step, using all available data to make the hashes more unique. and after all that, we will once again get back to the rsa account, where we will be able to crack all three accounts, and help decrypt them.

        -

        in this project, we've used an inexpensive, easy to carry laptop to spy on siemens building logistics systems using a raspberry pi. to be able to covertly follow siemens worker with a raspberry pi, we first had to build an inexpensive portable usb device to house the raspberry pi, and to capture data from the usb. we will use the rasbian image created by influx industries and flowtech, made available to us by sean cuffe, to install the image onto a small, cheap device. in addition, sean provided us with a siemens usb dongle, which will allow for remote access to each and every siemens building logistics system.

        -

        899543212b
        -
        -
        \ No newline at end of file diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/core/evaluation/__init__.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/core/evaluation/__init__.py deleted file mode 100644 index f7cc4b23413a0639e9de00eeb0bf600632d2c6cd..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/core/evaluation/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .class_names import get_classes, get_palette -from .eval_hooks import DistEvalHook, EvalHook -from .metrics import eval_metrics, mean_dice, mean_fscore, mean_iou - -__all__ = [ - 'EvalHook', 'DistEvalHook', 'mean_dice', 'mean_iou', 'mean_fscore', - 'eval_metrics', 'get_classes', 'get_palette' -] diff --git a/spaces/szukevin/VISOR-GPT/train/scripts/topn_words_indep.py b/spaces/szukevin/VISOR-GPT/train/scripts/topn_words_indep.py deleted file mode 100644 index d5513f9767b75d99991ea5b3239b875a7b8c9bf6..0000000000000000000000000000000000000000 --- a/spaces/szukevin/VISOR-GPT/train/scripts/topn_words_indep.py +++ /dev/null @@ -1,56 +0,0 @@ -import sys -import os -import torch -import argparse - -tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) -sys.path.append(tencentpretrain_dir) - -from tencentpretrain.utils.vocab import Vocab - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument("--load_model_path", default=None, type=str, - help="Path of the input model.") - parser.add_argument("--vocab_path", default=None, type=str, - help="Path of the vocabulary file.") - parser.add_argument("--spm_model_path", default=None, type=str, - help="Path of the sentence piece model.") - parser.add_argument("--test_path", default=None, type=str, - help="Path of the target words file.") - parser.add_argument("--topn", type=int, default=15) - - args = parser.parse_args() - - if args.spm_model_path: - try: - import sentencepiece as spm - except ImportError: - raise ImportError("You need to install SentencePiece to use XLNetTokenizer: https://github.com/google/sentencepiece" - "pip install sentencepiece") - sp_model = spm.SentencePieceProcessor() - sp_model.Load(args.spm_model_path) - vocab = Vocab() - vocab.i2w = {i: sp_model.IdToPiece(i) for i in range(sp_model.GetPieceSize())} - vocab.w2i = {sp_model.IdToPiece(i): i for i in range(sp_model.GetPieceSize())} - else: - vocab = Vocab() - vocab.load(args.vocab_path) - - pretrained_model = torch.load(args.load_model_path) - embedding = pretrained_model["embedding.word.embedding.weight"] - - with open(args.test_path, mode="r", encoding="utf-8") as f: - for line in f: - word = line.strip().split()[0] - if len(word) <= 0: - continue - print("Target word: " + word) - target_embedding = embedding[vocab.w2i[word], :] - - sims = torch.nn.functional.cosine_similarity(target_embedding.view(1, -1), embedding) - sorted_id = torch.argsort(sims, descending=True) - for i in sorted_id[1: args.topn+1]: - print(vocab.i2w[i].strip() + "\t" + str(sims[i].item())) - print() diff --git a/spaces/tennant/MUG_caption/README.md b/spaces/tennant/MUG_caption/README.md deleted file mode 100644 index c0c544b02e638b7978b3589f2509f7503cd94d78..0000000000000000000000000000000000000000 --- a/spaces/tennant/MUG_caption/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: MUG Caption -emoji: 🐢 -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/terfces0erbo/CollegeProjectV2/Adventnet Manageengine Servicedesk Plus Keygen Torrent.md b/spaces/terfces0erbo/CollegeProjectV2/Adventnet Manageengine Servicedesk Plus Keygen Torrent.md deleted file mode 100644 index fe1ee62a0cfc33e54d83de3b8fd72b3ccece3c61..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Adventnet Manageengine Servicedesk Plus Keygen Torrent.md +++ /dev/null @@ -1,19 +0,0 @@ -

        adventnet manageengine servicedesk plus keygen torrent


        Download Filehttps://bytlly.com/2uGlbF



        - -Enter the search key in the field below the selected column. . The Advent Net Manage Engine ServiceDesk Plus asset field needs to be mapped to a field. Enter the search key on the sheet. -The field must match the field in the list. -Click . -A string will be entered in the Search Code field. -Select a search code on the sheet. -If no search code has been entered before, select a sheet and press . -A string will be entered in the Search key field. -Enter the search key on the sheet. -Select a search code on the sheet and click . -The search key will be entered in the Search Code field. -Click Save. -The entered value will be saved. -Deleting entries -If an error occurs, the entries will be deleted. 8a78ff9644
        -
        -
        -

        diff --git a/spaces/terfces0erbo/CollegeProjectV2/Bibleworks 10 Serial Key BEST Keygenl.md b/spaces/terfces0erbo/CollegeProjectV2/Bibleworks 10 Serial Key BEST Keygenl.md deleted file mode 100644 index d631c244370fb28857ce3212e46948682b476116..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Bibleworks 10 Serial Key BEST Keygenl.md +++ /dev/null @@ -1,18 +0,0 @@ -

        Bibleworks 10 Serial Key Keygenl


        Download >>>>> https://bytlly.com/2uGlX5



        - -Bibleworks 10 Serial Keygenl petrfro. 4 item. DOWNLOAD: 2a1358a15e. Related Links: orientalism bangla pdf Download Finecut ... Download torrent "Orientalism" -Orientalism (2010) | Download torrent without registration . -Title: Orientalism. -Quality: DVDRip. -Screenshots. -Download torrent Orientalism. -Orientalism. -Orientalism (2010) DVDRip | eng | rus | torrent. -Télécharger Orientalism. . -Download Orientalism.torrent -Orientalism (Orientalism) - Orientalism download torrent, Orientalism torrent. . -Orientalism (2010) PC. -Download Orientalism torrent free without registration. 8a78ff9644
        -
        -
        -

        diff --git a/spaces/terfces0erbo/CollegeProjectV2/Fujitsu Ah531 Recovery Disk Download.md b/spaces/terfces0erbo/CollegeProjectV2/Fujitsu Ah531 Recovery Disk Download.md deleted file mode 100644 index 106a664df74d393769c12f181988303526e21946..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Fujitsu Ah531 Recovery Disk Download.md +++ /dev/null @@ -1,47 +0,0 @@ -
        -

        How to Create and Use a Recovery Disk for Fujitsu Ah531 Laptop

        -

        If you own a Fujitsu Ah531 laptop, you may want to create a recovery disk that can help you troubleshoot and fix problems with your PC, even if it won't start. A recovery disk is a USB hard disk or flash drive that contains a copy of your system image, which is the configuration of your C: drive at a particular point of time. You can use the recovery disk to restore your system to its original state or to a previous backup image.

        -

        Fujitsu Ah531 Recovery Disk Download


        DOWNLOAD ✪✪✪ https://bytlly.com/2uGlBf



        -

        In this article, we will show you how to create and use a recovery disk for your Fujitsu Ah531 laptop. Please note that you will need a USB drive with enough capacity to store the system image, and that everything on the drive will be deleted during the process. You will also need to back up your personal files before proceeding with recovery, as Fujitsu is not liable for any data loss.

        -

        Creating a Recovery Disk

        -

        To create a recovery disk for your Fujitsu Ah531 laptop, follow these steps:

        -
          -
        1. From the taskbar, search for Create a recovery drive and then select it. You might be asked to enter an admin password or confirm the User Account Control dialog.
        2. -
        3. When the tool opens, make sure Back up system files to the recovery drive is selected, then select Next. This will copy the system image to the recovery drive.
        4. -
        5. Connect a USB drive to your PC, select it, and then click Next > Create. A lot of files need to be copied to the recovery drive, so this might take a while.
        6. -
        7. When it's done, click Finish.
        8. -
        -

        Using a Recovery Disk

        -

        To use a recovery disk for your Fujitsu Ah531 laptop, follow these steps:

        -

        -
          -
        1. Turn off your PC and insert the recovery disk into a USB port.
        2. -
        3. Turn on your PC and press the F12 key when the Fujitsu logo appears. This will open the Boot Menu.
        4. -
        5. Select Recovery and Utility and press the Enter key. This will launch the Recovery and Utility program.
        6. -
        7. Select Recover System from USB Memory Device and follow the on-screen instructions. You can choose to restore your system to the factory image or to a backup image that you created earlier.
        8. -
        9. Wait for the recovery process to complete. Your PC will reboot several times during this process.
        10. -
        11. After the recovery is complete, you can remove the recovery disk from your PC and set up your system as usual.
        12. -
        -

        We hope this article was helpful for you. For more information about Fujitsu Ah531 laptop, please visit https://support.ts.fujitsu.com/Search/SWP1073231.asp.

        - -

        Troubleshooting Tips for Fujitsu Ah531 Laptop

        -

        Sometimes, you may encounter some issues with your Fujitsu Ah531 laptop that can be resolved without using a recovery disk. Here are some common troubleshooting tips that you can try:

        -
          -
        • If your PC is running slowly or freezing, you can try to optimize its performance by using the Task Manager to close any unnecessary programs or processes, or by using the Disk Cleanup tool to delete any temporary files or unnecessary data.
        • -
        • If your PC is having problems with connecting to the internet or other devices, you can try to troubleshoot the network settings by using the Network troubleshooter or by updating the drivers for your network adapter.
        • -
        • If your PC is having problems with sound or display, you can try to adjust the volume or brightness settings, or by updating the drivers for your sound card or graphics card.
        • -
        • If your PC is having problems with starting up or shutting down, you can try to use the Startup Repair tool or the Advanced Startup Options to fix any boot issues or to restore your system to a previous point.
        • -
        -

        If none of these tips work, you may need to use a recovery disk to restore your system. Please refer to the previous section for instructions on how to create and use a recovery disk.

        -

        Frequently Asked Questions about Fujitsu Ah531 Laptop

        -

        In this section, we will answer some of the most frequently asked questions about Fujitsu Ah531 laptop. If you have any other questions, please contact Fujitsu support or visit their website.

        -
        -
        How do I update the BIOS for my Fujitsu Ah531 laptop?
        -
        To update the BIOS for your Fujitsu Ah531 laptop, you need to download the latest BIOS update utility from https://support.ts.fujitsu.com/Search/SWP1073231.asp and follow the instructions on how to run it. Please note that updating the BIOS is a risky process that can cause serious damage to your system if done incorrectly. You should only update the BIOS if you are experiencing problems that cannot be solved by other methods.
        -
        How do I reset my Fujitsu Ah531 laptop to its factory settings?
        -
        To reset your Fujitsu Ah531 laptop to its factory settings, you need to use a recovery disk that contains a copy of the factory image. Please refer to the previous section for instructions on how to create and use a recovery disk. Please note that resetting your system will erase all your personal files and settings, so make sure you back them up before proceeding.
        -
        How do I upgrade the memory or hard drive for my Fujitsu Ah531 laptop?
        -
        To upgrade the memory or hard drive for your Fujitsu Ah531 laptop, you need to open the bottom cover of your laptop and replace the existing modules with compatible ones. Please refer to the user manual for detailed instructions on how to do this. Please note that upgrading the memory or hard drive may void your warranty, so make sure you check with Fujitsu before doing so.
        -

        d5da3c52bf
        -
        -
        \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Dont Fall for IDM Serial Number Crack and Keygen Free Download - Heres How to Get a Legal and Safe IDM License.md b/spaces/tialenAdioni/chat-gpt-api/logs/Dont Fall for IDM Serial Number Crack and Keygen Free Download - Heres How to Get a Legal and Safe IDM License.md deleted file mode 100644 index ce9584e97cf4daf5af080e4b6ea5537b91b8d7cb..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Dont Fall for IDM Serial Number Crack and Keygen Free Download - Heres How to Get a Legal and Safe IDM License.md +++ /dev/null @@ -1,23 +0,0 @@ -
        -

        How to Get Internet Download Manager Serial Number Crack and Keygen Free Download

        -

        Internet Download Manager (IDM) is one of the most popular and powerful download managers that can boost your download speed up to 5 times. It also supports resume and schedule downloads, error recovery, and many other features. However, IDM is not a free software and you need a valid serial number to activate it. If you don't have one, you may be tempted to look for IDM serial number crack and keygen free download on the internet. But is it safe and legal to do so?

        -

        In this article, we will explain why you should avoid IDM serial number crack and keygen free download and what are the risks and consequences of using them. We will also show you how to get a genuine IDM serial number for free without breaking any laws or harming your computer.

        -

        internet download manager serial number crack and keygen free download


        Download Filehttps://urlcod.com/2uK625



        -

        Why You Should Avoid IDM Serial Number Crack and Keygen Free Download

        -

        IDM serial number crack and keygen are tools that claim to generate a valid serial number for IDM or bypass its activation process. They are usually distributed on various websites or forums that offer pirated software or hacks. However, these tools are not only illegal but also dangerous for your computer and data. Here are some of the reasons why you should avoid IDM serial number crack and keygen free download:

        -
          -
        • They are illegal. IDM is a copyrighted software that belongs to Tonec Inc., the developer of IDM. Using IDM without a valid license is a violation of their intellectual property rights and can result in legal actions against you. You may also face fines or jail time depending on your country's laws.
        • -
        • They are malware. IDM serial number crack and keygen are often infected with viruses, trojans, worms, ransomware, spyware, adware, or other malicious programs that can damage your computer or steal your personal information. These malware can also compromise your online security and privacy by exposing your browsing history, passwords, bank accounts, credit cards, or other sensitive data to hackers or cybercriminals.
        • -
        • They are unreliable. IDM serial number crack and keygen are not guaranteed to work properly or at all. They may fail to activate IDM or cause it to malfunction or crash. They may also contain hidden backdoors or scripts that can remotely control your computer or install unwanted software without your consent. They may also be detected and blocked by IDM's update system or antivirus software.
        • -
        • They are unethical. IDM serial number crack and keygen are unfair to the developers of IDM who have invested their time, money, and effort to create a high-quality software that benefits millions of users around the world. By using these tools, you are depriving them of their rightful income and discouraging them from improving their product or creating new ones.
        • -
        -

        How to Get a Genuine IDM Serial Number for Free

        -

        If you want to use IDM legally and safely, you need to get a genuine IDM serial number from the official website of Tonec Inc. There are two ways to do this:

        -
          -
        • Buy a license. This is the best and most recommended way to get a genuine IDM serial number. You can buy a license for one year or lifetime from the official website of Tonec Inc. The price is reasonable and affordable considering the features and benefits of IDM. You will also get free updates and technical support from the developers. You can pay using various methods such as credit card, PayPal, bank transfer, etc.
        • -
        • Get a trial. This is another way to get a genuine IDM serial number for free but only for a limited time. You can get a 30-day trial version of IDM from the official website of Tonec Inc. The trial version has all the features and functions of the full version except that it expires after 30 days. You can use the trial version to test IDM's performance and compatibility with your system before buying a license. You can also extend the trial period by using some tricks such as changing your system date or using a third-party tool (not recommended).
        • -
        -

        Conclusion

        -

        IDM is a great software that can enhance your download experience and productivity. However, you should not use IDM serial number crack and keygen free download as they are illegal, malware, unreliable, and unethical. Instead, you should get a genuine IDM serial number from the official website of Tonec Inc. either by buying a license or getting a trial. This way, you

        ddb901b051
        -
        -
        \ No newline at end of file diff --git a/spaces/timqian/like-history/static/css/main.be5d5885.css b/spaces/timqian/like-history/static/css/main.be5d5885.css deleted file mode 100644 index 5b741609184b6e42028b1e460fb7e372a0d7b2df..0000000000000000000000000000000000000000 --- a/spaces/timqian/like-history/static/css/main.be5d5885.css +++ /dev/null @@ -1,4 +0,0 @@ -/* -! tailwindcss v3.3.3 | MIT License | https://tailwindcss.com -*/*,:after,:before{border:0 solid #e5e7eb;box-sizing:border-box}:after,:before{--tw-content:""}html{-webkit-text-size-adjust:100%;-webkit-font-feature-settings:normal;font-feature-settings:normal;font-family:ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji;font-variation-settings:normal;line-height:1.5;tab-size:4}body{line-height:inherit;margin:0}hr{border-top-width:1px;color:inherit;height:0}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,pre,samp{font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:initial}sub{bottom:-.25em}sup{top:-.5em}table{border-collapse:collapse;border-color:inherit;text-indent:0}button,input,optgroup,select,textarea{-webkit-font-feature-settings:inherit;font-feature-settings:inherit;color:inherit;font-family:inherit;font-size:100%;font-variation-settings:inherit;font-weight:inherit;line-height:inherit;margin:0;padding:0}button,select{text-transform:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button;background-color:initial;background-image:none}:-moz-focusring{outline:auto}:-moz-ui-invalid{box-shadow:none}progress{vertical-align:initial}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dd,dl,figure,h1,h2,h3,h4,h5,h6,hr,p,pre{margin:0}fieldset{margin:0}fieldset,legend{padding:0}menu,ol,ul{list-style:none;margin:0;padding:0}dialog{padding:0}textarea{resize:vertical}input::-webkit-input-placeholder,textarea::-webkit-input-placeholder{color:#9ca3af}input::placeholder,textarea::placeholder{color:#9ca3af}[role=button],button{cursor:pointer}:disabled{cursor:default}audio,canvas,embed,iframe,img,object,svg,video{display:block;vertical-align:middle}img,video{height:auto;max-width:100%}[hidden]{display:none}[multiple],[type=date],[type=datetime-local],[type=email],[type=month],[type=number],[type=password],[type=search],[type=tel],[type=text],[type=time],[type=url],[type=week],input:where(:not([type])),select,textarea{--tw-shadow:0 0 #0000;-webkit-appearance:none;appearance:none;background-color:#fff;border-color:#6b7280;border-radius:0;border-width:1px;font-size:1rem;line-height:1.5rem;padding:.5rem .75rem}[multiple]:focus,[type=date]:focus,[type=datetime-local]:focus,[type=email]:focus,[type=month]:focus,[type=number]:focus,[type=password]:focus,[type=search]:focus,[type=tel]:focus,[type=text]:focus,[type=time]:focus,[type=url]:focus,[type=week]:focus,input:where(:not([type])):focus,select:focus,textarea:focus{--tw-ring-inset:var(--tw-empty,/*!*/ /*!*/);--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-color:#2563eb;--tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(1px + var(--tw-ring-offset-width)) var(--tw-ring-color);border-color:#2563eb;box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow);outline:2px solid transparent;outline-offset:2px}input::-webkit-input-placeholder,textarea::-webkit-input-placeholder{color:#6b7280;opacity:1}input::placeholder,textarea::placeholder{color:#6b7280;opacity:1}::-webkit-datetime-edit-fields-wrapper{padding:0}::-webkit-date-and-time-value{min-height:1.5em;text-align:inherit}::-webkit-datetime-edit{display:inline-flex}::-webkit-datetime-edit,::-webkit-datetime-edit-day-field,::-webkit-datetime-edit-hour-field,::-webkit-datetime-edit-meridiem-field,::-webkit-datetime-edit-millisecond-field,::-webkit-datetime-edit-minute-field,::-webkit-datetime-edit-month-field,::-webkit-datetime-edit-second-field,::-webkit-datetime-edit-year-field{padding-bottom:0;padding-top:0}select{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 20 20'%3E%3Cpath stroke='%236b7280' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' d='m6 8 4 4 4-4'/%3E%3C/svg%3E");background-position:right .5rem center;background-repeat:no-repeat;background-size:1.5em 1.5em;padding-right:2.5rem;-webkit-print-color-adjust:exact;print-color-adjust:exact}[multiple],[size]:where(select:not([size="1"])){background-image:none;background-position:0 0;background-repeat:repeat;background-size:initial;padding-right:.75rem;-webkit-print-color-adjust:inherit;print-color-adjust:inherit}[type=checkbox],[type=radio]{--tw-shadow:0 0 #0000;-webkit-appearance:none;appearance:none;background-color:#fff;background-origin:border-box;border-color:#6b7280;border-width:1px;color:#2563eb;display:inline-block;flex-shrink:0;height:1rem;padding:0;-webkit-print-color-adjust:exact;print-color-adjust:exact;-webkit-user-select:none;user-select:none;vertical-align:middle;width:1rem}[type=checkbox]{border-radius:0}[type=radio]{border-radius:100%}[type=checkbox]:focus,[type=radio]:focus{--tw-ring-inset:var(--tw-empty,/*!*/ /*!*/);--tw-ring-offset-width:2px;--tw-ring-offset-color:#fff;--tw-ring-color:#2563eb;--tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow);outline:2px solid transparent;outline-offset:2px}[type=checkbox]:checked,[type=radio]:checked{background-color:currentColor;background-position:50%;background-repeat:no-repeat;background-size:100% 100%;border-color:transparent}[type=checkbox]:checked{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg viewBox='0 0 16 16' fill='%23fff' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M12.207 4.793a1 1 0 0 1 0 1.414l-5 5a1 1 0 0 1-1.414 0l-2-2a1 1 0 0 1 1.414-1.414L6.5 9.086l4.293-4.293a1 1 0 0 1 1.414 0z'/%3E%3C/svg%3E")}[type=radio]:checked{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg viewBox='0 0 16 16' fill='%23fff' xmlns='http://www.w3.org/2000/svg'%3E%3Ccircle cx='8' cy='8' r='3'/%3E%3C/svg%3E")}[type=checkbox]:checked:focus,[type=checkbox]:checked:hover,[type=radio]:checked:focus,[type=radio]:checked:hover{background-color:currentColor;border-color:transparent}[type=checkbox]:indeterminate{background-color:currentColor;background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 16 16'%3E%3Cpath stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M4 8h8'/%3E%3C/svg%3E");background-position:50%;background-repeat:no-repeat;background-size:100% 100%;border-color:transparent}[type=checkbox]:indeterminate:focus,[type=checkbox]:indeterminate:hover{background-color:currentColor;border-color:transparent}[type=file]{background:transparent none repeat 0 0/auto auto padding-box border-box scroll;background:initial;border-color:inherit;border-radius:0;border-width:0;font-size:inherit;line-height:inherit;padding:0}[type=file]:focus{outline:1px solid ButtonText;outline:1px auto -webkit-focus-ring-color}*,:after,:before{--tw-border-spacing-x:0;--tw-border-spacing-y:0;--tw-translate-x:0;--tw-translate-y:0;--tw-rotate:0;--tw-skew-x:0;--tw-skew-y:0;--tw-scale-x:1;--tw-scale-y:1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness:proximity;--tw-gradient-from-position: ;--tw-gradient-via-position: ;--tw-gradient-to-position: ;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-color:rgba(59,130,246,.5);--tw-ring-offset-shadow:0 0 #0000;--tw-ring-shadow:0 0 #0000;--tw-shadow:0 0 #0000;--tw-shadow-colored:0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }::-webkit-backdrop{--tw-border-spacing-x:0;--tw-border-spacing-y:0;--tw-translate-x:0;--tw-translate-y:0;--tw-rotate:0;--tw-skew-x:0;--tw-skew-y:0;--tw-scale-x:1;--tw-scale-y:1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness:proximity;--tw-gradient-from-position: ;--tw-gradient-via-position: ;--tw-gradient-to-position: ;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-color:rgba(59,130,246,.5);--tw-ring-offset-shadow:0 0 #0000;--tw-ring-shadow:0 0 #0000;--tw-shadow:0 0 #0000;--tw-shadow-colored:0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }::backdrop{--tw-border-spacing-x:0;--tw-border-spacing-y:0;--tw-translate-x:0;--tw-translate-y:0;--tw-rotate:0;--tw-skew-x:0;--tw-skew-y:0;--tw-scale-x:1;--tw-scale-y:1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness:proximity;--tw-gradient-from-position: ;--tw-gradient-via-position: ;--tw-gradient-to-position: ;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-color:rgba(59,130,246,.5);--tw-ring-offset-shadow:0 0 #0000;--tw-ring-shadow:0 0 #0000;--tw-shadow:0 0 #0000;--tw-shadow-colored:0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }.sr-only{clip:rect(0,0,0,0);border-width:0;height:1px;margin:-1px;overflow:hidden;padding:0;white-space:nowrap;width:1px}.absolute,.sr-only{position:absolute}.relative{position:relative}.inset-y-0{bottom:0;top:0}.left-0{left:0}.right-0{right:0}.mx-auto{margin-left:auto;margin-right:auto}.mt-2{margin-top:.5rem}.block{display:block}.flex{display:flex}.h-full{height:100%}.w-full{width:100%}.max-w-3xl{max-width:48rem}.max-w-7xl{max-width:80rem}.items-center{align-items:center}.rounded-md{border-radius:.375rem}.border-0{border-width:0}.bg-transparent{background-color:initial}.px-4{padding-left:1rem;padding-right:1rem}.py-0{padding-bottom:0;padding-top:0}.py-1{padding-bottom:.25rem;padding-top:.25rem}.py-1\.5{padding-bottom:.375rem;padding-top:.375rem}.py-16{padding-bottom:4rem;padding-top:4rem}.pl-24{padding-left:6rem}.pl-3{padding-left:.75rem}.pr-7{padding-right:1.75rem}.text-right{text-align:right}.text-lg{font-size:1.125rem;line-height:1.75rem}.text-sm{font-size:.875rem;line-height:1.25rem}.font-light{font-weight:300}.text-gray-500{--tw-text-opacity:1;color:rgb(107 114 128/var(--tw-text-opacity))}.text-gray-600{--tw-text-opacity:1;color:rgb(75 85 99/var(--tw-text-opacity))}.text-gray-900{--tw-text-opacity:1;color:rgb(17 24 39/var(--tw-text-opacity))}.shadow-sm{--tw-shadow:0 1px 2px 0 rgba(0,0,0,.05);--tw-shadow-colored:0 1px 2px 0 var(--tw-shadow-color);box-shadow:0 0 #0000,0 0 #0000,var(--tw-shadow);box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow)}.ring-1{--tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(1px + var(--tw-ring-offset-width)) var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),0 0 #0000;box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow,0 0 #0000)}.ring-inset{--tw-ring-inset:inset}.ring-gray-300{--tw-ring-opacity:1;--tw-ring-color:rgb(209 213 219/var(--tw-ring-opacity))}.placeholder\:text-gray-400::-webkit-input-placeholder{--tw-text-opacity:1;color:rgb(156 163 175/var(--tw-text-opacity))}.placeholder\:text-gray-400::placeholder{--tw-text-opacity:1;color:rgb(156 163 175/var(--tw-text-opacity))}.focus\:outline-none:focus{outline:2px solid transparent;outline-offset:2px}.focus\:ring-2:focus{--tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),0 0 #0000;box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow,0 0 #0000)}.focus\:ring-inset:focus{--tw-ring-inset:inset}.focus\:ring-indigo-600:focus{--tw-ring-opacity:1;--tw-ring-color:rgb(79 70 229/var(--tw-ring-opacity))}@media (min-width:640px){.sm\:px-6{padding-left:1.5rem;padding-right:1.5rem}.sm\:text-sm{font-size:.875rem;line-height:1.25rem}.sm\:leading-6{line-height:1.5rem}}@media (min-width:1024px){.lg\:px-8{padding-left:2rem;padding-right:2rem}} -/*# sourceMappingURL=main.be5d5885.css.map*/ \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/Autodata340nl-Crack.md b/spaces/tioseFevbu/cartoon-converter/Autodata340nl-Crack.md deleted file mode 100644 index b82e9395c857ec79e7b7488794d1da5fc032d0f1..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/Autodata340nl-Crack.md +++ /dev/null @@ -1,112 +0,0 @@ -## Autodata-3-40-nl Crack - - - - - - ![Autodata-3-40-nl Crack](https://ist1-2.filesor.com/pimpandhost.com/6/8/3/6/68364/1/2/Y/k/12Ykt/autodata-3-40.jpg) - - - - - -**Download File >> [https://urluso.com/2tyQv3](https://urluso.com/2tyQv3)** - - - - - - - - - - - - - -# How to Download and Install Autodata 3.40 NL Crack - - - -Autodata 3.40 NL is a comprehensive software that provides technical information and diagnostics for various vehicles. It covers over 17,000 models from 80 manufacturers worldwide. It includes wiring diagrams, service schedules, repair times, fault codes, and more. - - - -If you are looking for a way to download and install Autodata 3.40 NL crack, you have come to the right place. In this article, we will show you how to get this software for free and use it without any limitations. - - - -## Step 1: Download Autodata 3.40 NL Crack - - - -The first step is to download Autodata 3.40 NL crack from a reliable source. You can find it on various websites that offer software downloads, such as [Motorcarsoft.com](https://www.motorcarsoft.com/viewtopic.php?t=12514) [^1^], [Archive.org](https://archive.org/details/Autodata3.40_201412) [^2^], or [Archive.org](https://archive.org/details/autodata3.45crackfulltechtools.net) [^3^]. Make sure you choose the correct version for your operating system (Windows XP, Vista, 7, 8, or 10). - - - -The file size is about 1.4 GB, so it may take some time to download depending on your internet speed. Once the download is complete, you will have an ISO file named "Autodata 3.40 + crack.iso". You will need to mount this file to a virtual drive using a software like Daemon Tools or Power Iso. - - - -## Step 2: Install Autodata 3.40 NL Crack - - - -The second step is to install Autodata 3.40 NL crack on your computer. To do this, follow these instructions: - - - -1. Run "install.cmd" and wait until the installation is finished. - -2. Run "crack.bat" as administrator. - -3. Go to Windows 7\_64 folder and run all registry files. (You may not need this step if you have a 32-bit system). - -4. Find the C:\ADCDA2 folder and click ADBCD.exe. - -5. Enjoy! - - - -You have successfully installed Autodata 3.40 NL crack on your computer. You can now use it without any restrictions or limitations. - - - -## How to Change Autodata 3.40 NL Language to English - - - -If you want to change the language of Autodata 3.40 NL from Dutch to English, you can do so easily by following these steps: - - - -1. Download the English language pack for Autodata 3.40 from [here](https://drive.google.com/file/d/0Bw8B2a85Qa1jUkZvT0RvTnNwZzg/view). - -2. Go to Autodata 3.40 installation directory (C:\ADCDA2) and rename the folder named ENG to ENG.old. - -3. Extract the downloaded language pack to C:\ADCDA2. - -4. Run Autodata 3.40 and see if the language has changed to English. - - - -You have successfully changed the language of Autodata 3.40 NL to English. - - - -## Conclusion - - - -Autodata 3.40 NL is a useful software for anyone who works with vehicles. It provides detailed information and diagnostics for various models and makes of cars, trucks, motorcycles, and more. With Autodata 3.40 NL crack, you can get this software for free and use it without any limitations. - - - -In this article, we showed you how to download and install Autodata 3.40 NL crack on your computer, as well as how to change its language to English if you prefer. We hope you found this article helpful and informative. - - 145887f19f - - - - - diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Acronis True Image Wd Edition Software Crack Download __FULL__.md b/spaces/tioseFevbu/cartoon-converter/scripts/Acronis True Image Wd Edition Software Crack Download __FULL__.md deleted file mode 100644 index 7ac60f7ce3d7eef2669cf88190bab2d94fddcc51..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Acronis True Image Wd Edition Software Crack Download __FULL__.md +++ /dev/null @@ -1,25 +0,0 @@ -
        -

        How to Download and Install Acronis True Image WD Edition for Free

        -

        Acronis True Image WD Edition is a powerful backup and recovery software that allows you to clone, migrate, and restore your system and data on a new WD hard drive. It is based on the award-winning Acronis True Image program and supports WD Advanced Format Drives. In this article, we will show you how to download and install Acronis True Image WD Edition for free.

        -

        Acronis True Image Wd Edition Software Crack Download


        Download Ziphttps://urlcod.com/2uHxyx



        -

        Step 1: Download Acronis True Image WD Edition

        -

        To download Acronis True Image WD Edition, you need to have a WD hard drive connected to your computer. You can use any WD drive, such as My Book, My Passport, WD Blue, WD Black, WD Red, or WD Gold. Then, you need to open the WD Discovery app and go to Apps. There, you will see a button that says OPEN next to Acronis True Image for Western Digital[^2^]. Clicking on it will redirect you to the Acronis True Image for Western Digital knowledge base article[^2^], where you can find the download links for Windows and macOS.

        -

        Step 2: Install Acronis True Image WD Edition

        -

        Once you have downloaded the installer file, you need to run it and follow the installation prompts provided by Acronis True Image WD Edition. The installation process may take a few minutes, depending on your system configuration. Once complete, the WD Discovery app will still display OPEN next to Acronis True Image for Western Digital[^2^]. To launch the program, you need to go to your OS application menu and find Acronis True Image WD Edition[^2^].

        -

        Step 3: Use Acronis True Image WD Edition

        -

        Acronis True Image WD Edition offers various features that help you protect your system and data on your new WD hard drive. You can use it to:

        -
          -
        • Drive Cloning. Copies the source drive to a new WD drive, leaving all data on the source drive[^1^].
        • -
        • Drive Migration. Erase the source drive after the new WD drive has been imaged[^1^].
        • -
        • Drive Deployment. Prepare a WD drive so that selected folders or partitions can be excluded from the destination image[^1^].
        • -
        • Drive Image Backup. Create full drive image backups of your source drive at any time and store them as a file on any direct attached drive, network attached storage drive or optical media (CD/DVD)[^1^].
        • -
        • Drive Image Recovery. Restore a drive image from a direct or network attached drive or a CD/DVD using the Recovery Manager[^1^].
        • -
        • Create a Bootable Standalone Recovery Manager. Create a bootable ISO image file that contains the WD Drive Kit Standalone Recovery Manager software[^1^].
        • -
        • Recover a Drive Image from the Bootable Recovery Manager. Recover a drive image from either a CD/DVD drive, a direct attached drive, or a network attached drive using the bootable Standalone Recovery Manager[^1^].
        • -
        -

        To learn more about how to use these features, you can refer to the user guide and video tutorials available on the Acronis website[^3^].

        -

        -

        Conclusion

        -

        Acronis True Image WD Edition is a free and reliable backup and recovery software that works with any WD hard drive. It allows you to clone, migrate, and restore your system and data on a new WD hard drive with ease and confidence. To download and install Acronis True Image WD Edition for free, you just need to follow the steps described in this article. We hope you found this article helpful and informative.

        cec2833e83
        -
        -
        \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Chandni Chowk To China 720p Hd.md b/spaces/tioseFevbu/cartoon-converter/scripts/Chandni Chowk To China 720p Hd.md deleted file mode 100644 index f94cd03540d51500da82aeeb2f85e9f49f8f6714..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Chandni Chowk To China 720p Hd.md +++ /dev/null @@ -1,14 +0,0 @@ - -

        Chandni Chowk To China: A Bollywood Action-Comedy with a Chinese Twist

        -

        Chandni Chowk To China is a 2009 Indian film that blends action, comedy and martial arts in a cross-cultural adventure. The film stars Akshay Kumar as Sidhu, a simple cook from Delhi's Chandni Chowk who is mistaken for the reincarnation of an ancient Chinese warrior by a group of oppressed villagers. Along with his friend Chopstick (Ranvir Shorey) and his love interest Sakhi (Deepika Padukone), Sidhu travels to China to face the evil warlord Hojo (Gordon Liu) and his army of kung fu fighters.

        -

        Chandni Chowk To China 720p Hd


        Download Zip »»» https://urlcod.com/2uHwEb



        -

        The film is directed by Nikkhil Advani and produced by Ramesh Sippy, Mukesh Talreja and Rohan Sippy. The film features music by Shankar-Ehsaan-Loy and lyrics by Rajat Arora and Bohemia. The film also features cameo appearances by Mithun Chakraborty, Roger Yuan and Conan Stevens. The film was shot in various locations in India, Thailand and China, including the Great Wall of China.

        -

        Chandni Chowk To China was released on 16 January 2009 and received mixed reviews from critics and audiences. The film was praised for its action sequences, cinematography and performances, but criticized for its weak script, editing and humor. The film was also accused of stereotyping Chinese culture and people. The film was a moderate success at the box office, earning about Rs. 650 million worldwide.

        -

        If you are looking for a fun-filled Bollywood masala movie with a touch of Chinese flavor, you can watch Chandni Chowk To China online in HD quality on various streaming platforms. You can also download Chandni Chowk To China in 720p HD format from various websites that offer free or paid downloads.

        - -

        Chandni Chowk To China is not only a Bollywood film, but also a cultural exchange between India and China. The film showcases the similarities and differences between the two countries, their cuisines, languages, traditions and histories. The film also pays tribute to the legendary martial arts films of China, especially those of Bruce Lee and Jackie Chan. The film features several references and homages to these films, such as the nunchaku scene, the drunken boxing scene and the Shaolin temple scene.

        -

        -

        The film also explores the theme of identity and destiny, as Sidhu struggles to find his true self and his purpose in life. He goes from being a lowly vegetable cutter to a hero who saves a village from tyranny. He also learns the value of friendship, love and courage along the way. The film also has a message of peace and harmony between India and China, as Sidhu and Sakhi unite the two nations with their love.

        -

        Chandni Chowk To China is a film that offers entertainment, action, comedy and romance in equal measure. It is a film that celebrates the diversity and richness of both Indian and Chinese cultures. It is a film that will make you laugh, cry and cheer for the underdog. It is a film that you can watch with your family and friends and enjoy a roller-coaster ride of emotions.

        7196e7f11a
        -
        -
        \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Elysium English Sub Download UPDATED.md b/spaces/tioseFevbu/cartoon-converter/scripts/Elysium English Sub Download UPDATED.md deleted file mode 100644 index fd3865c8984e1bc77313d56150cd5166ea2c11ed..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Elysium English Sub Download UPDATED.md +++ /dev/null @@ -1,23 +0,0 @@ -
        -``` -

        How to Download Elysium English Subtitles for Free

        -

        Elysium is a 2013 sci-fi action film directed by Neill Blomkamp and starring Matt Damon, Jodie Foster, and Sharlto Copley. The film is set in the year 2154, where two classes of people exist: the wealthy who live on a luxurious space station called Elysium, and the poor who live on an overpopulated and polluted Earth. The film follows Max, a former car thief who agrees to take on a dangerous mission that could bring equality to these polarized worlds.

        -

        Elysium English Sub Download


        Downloadhttps://urlcod.com/2uHv5N



        -

        If you want to watch Elysium with English subtitles, you have several options to download them for free. Here are some of them:

        -
          -
        • OpenSubtitles: This is one of the most popular websites for downloading subtitles in various languages. You can find Elysium subtitles in English and other languages here. You can also rate and comment on the subtitles, as well as request translations or corrections.
        • -
        • YIFY Subtitles: This website provides subtitles for movies released by YIFY, a well-known torrent group. You can download Elysium subtitles in English and other languages here. You can also browse subtitles by genre, rating, or year.
        • -
        • SUBDL: This website allows you to download subtitles in different languages with one click. You can find Elysium subtitles in English and other languages here. You can also search subtitles by keyword, IMDB ID, or hash.
        • -
        -

        Before downloading any subtitle file, make sure it matches the format and quality of your video file. You can use a media player like VLC or MPC-HC to load the subtitle file and sync it with the video. Enjoy watching Elysium with English subtitles!

        -``` - -``` -

        Elysium is not only a thrilling sci-fi action film, but also a thought-provoking commentary on the social and political issues of our time. The film explores themes such as immigration, health care, class conflict, environmental degradation, and human rights. The film also raises questions about the role of technology, violence, and morality in shaping our future.

        -

        The film has received mixed reviews from critics and audiences alike. Some praised the film for its stunning visuals, intense action sequences, and compelling performances by Damon and Copley. Others criticized the film for its heavy-handed message, simplistic plot, and lack of originality. The film has also been compared to Blomkamp's previous film, District 9, which was widely acclaimed for its innovative and nuanced approach to sci-fi and social commentary.

        -

        Whether you agree or disagree with the film's vision of the future, Elysium is a film that will make you think and feel. It is a film that challenges you to imagine what kind of world you want to live in, and what kind of actions you are willing to take to make it happen.

        -

        If you are interested in watching Elysium with English subtitles, you can download them for free from the websites mentioned above. You can also check out other sources of subtitles online, such as Subscene, Podnapisi, or Addic7ed. Just make sure you have a compatible video player and a reliable internet connection. Enjoy watching Elysium with English subtitles!

        -

        -```

        81aa517590
        -
        -
        \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Free YouTube Download Premium 4.2.21.1001 WORK Crack With Premium Key Download 2019.md b/spaces/tioseFevbu/cartoon-converter/scripts/Free YouTube Download Premium 4.2.21.1001 WORK Crack With Premium Key Download 2019.md deleted file mode 100644 index c69317613ae4c65f1958e04838acab2c0892eef4..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Free YouTube Download Premium 4.2.21.1001 WORK Crack With Premium Key Download 2019.md +++ /dev/null @@ -1,28 +0,0 @@ -
        -``` -

        How to Download and Crack Free YouTube Download Premium 4.2.21.1001

        -

        Free YouTube Download Premium is a popular software that allows you to download and convert YouTube videos to various formats, such as MP4, MKV, WEBM, and MP3. You can also download entire playlists, channels, and subtitles from YouTube with this program. In this article, we will show you how to download and crack Free YouTube Download Premium 4.2.21.1001 with a premium key.

        -

        Free YouTube Download Premium 4.2.21.1001 Crack With Premium Key Download 2019


        Download Filehttps://urlcod.com/2uHxao



        -

        Step 1: Download the software

        -

        You can download the latest version of Free YouTube Download Premium from the official website of DVDVideoSoft or from other trusted sources, such as FileCR or HaxPC. The file size is about 70 MB and it supports Windows 11, Windows 10, Windows 8.1, and Windows 7. Make sure you have enough free space on your hard drive and a stable internet connection before downloading.

        -

        Step 2: Install the software

        -

        After downloading the software, run the setup file and follow the instructions on the screen. You can choose the installation folder and the language of the program. You can also opt to install additional programs from DVDVideoSoft, such as Free Studio or Free Video Editor. However, you can skip them if you don't need them.

        -

        Step 3: Crack the software

        -

        To crack the software and activate the premium features, you need a premium key that is usually provided by the crack file or the patch file. You can find these files on various websites that offer cracked software, such as Peatix or CrackzSoft. However, be careful when downloading these files as they may contain viruses or malware that can harm your computer.

        -

        Once you have the crack file or the patch file, copy it to the installation folder of Free YouTube Download Premium and run it as administrator. It will automatically replace the original file and generate a premium key for you. You can then launch the program and enter the premium key when prompted.

        -

        -

        Step 4: Enjoy the software

        -

        Congratulations! You have successfully downloaded and cracked Free YouTube Download Premium 4.2.21.1001 with a premium key. You can now enjoy all the benefits of this software, such as fast downloading, high-quality conversion, multi-stream support, iTunes integration, and more. You can also download videos from other websites besides YouTube, such as Facebook, Vimeo, Dailymotion, etc.

        -

        However, please note that cracking software is illegal and unethical and may violate the terms of service of DVDVideoSoft and YouTube. We do not endorse or encourage cracking software in any way and we are not responsible for any consequences that may arise from doing so. This article is for educational purposes only and we recommend you to buy the original software from DVDVideoSoft if you like it.

        -``` - -``` -

        Step 5: Update the software

        -

        Free YouTube Download Premium is constantly updated by DVDVideoSoft to fix bugs, improve performance, and add new features. You can check for updates from the program's menu or from the official website. However, updating the software may overwrite the crack file and deactivate the premium key. Therefore, you may need to repeat the cracking process every time you update the software.

        -

        Alternatively, you can disable the automatic updates from the program's settings and only update the software when necessary. However, this may cause some compatibility issues or security risks if you use an outdated version of the software. Therefore, we advise you to always use the latest version of the software and buy the original license from DVDVideoSoft if you can afford it.

        -

        Step 6: Uninstall the software

        -

        If you want to uninstall Free YouTube Download Premium from your computer, you can do so easily from the Windows Control Panel or from the program's menu. You can also use a third-party uninstaller tool, such as Revo Uninstaller or IObit Uninstaller, to remove all traces of the software from your system. However, make sure you backup your downloaded videos and converted files before uninstalling the software.

        -

        Uninstalling the software will also remove the crack file and the premium key from your computer. Therefore, you will not be able to use the premium features of the software anymore. If you want to reinstall the software in the future, you will need to download and crack it again using the same or a different method.

        -```

        81aa517590
        -
        -
        \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/abc.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/abc.py deleted file mode 100644 index e6e498efabfab0dcf31cd7731f8f821cc423bc4f..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/abc.py +++ /dev/null @@ -1,33 +0,0 @@ -from abc import ABC - - -class RichRenderable(ABC): - """An abstract base class for Rich renderables. - - Note that there is no need to extend this class, the intended use is to check if an - object supports the Rich renderable protocol. For example:: - - if isinstance(my_object, RichRenderable): - console.print(my_object) - - """ - - @classmethod - def __subclasshook__(cls, other: type) -> bool: - """Check if this class supports the rich render protocol.""" - return hasattr(other, "__rich_console__") or hasattr(other, "__rich__") - - -if __name__ == "__main__": # pragma: no cover - from pip._vendor.rich.text import Text - - t = Text() - print(isinstance(Text, RichRenderable)) - print(isinstance(t, RichRenderable)) - - class Foo: - pass - - f = Foo() - print(isinstance(f, RichRenderable)) - print(isinstance("", RichRenderable)) diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/rule.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/rule.py deleted file mode 100644 index 0b78f7a4ec4a111e35d7fdc7f9744afb696df20e..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/rule.py +++ /dev/null @@ -1,134 +0,0 @@ -from typing import Union - -from .align import AlignMethod -from .cells import cell_len, set_cell_size -from .console import Console, ConsoleOptions, RenderResult -from .jupyter import JupyterMixin -from .measure import Measurement -from .style import Style -from .text import Text - - -class Rule(JupyterMixin): - """A console renderable to draw a horizontal rule (line). - - Args: - title (Union[str, Text], optional): Text to render in the rule. Defaults to "". - characters (str, optional): Character(s) used to draw the line. Defaults to "─". - style (StyleType, optional): Style of Rule. Defaults to "rule.line". - end (str, optional): Character at end of Rule. defaults to "\\\\n" - align (str, optional): How to align the title, one of "left", "center", or "right". Defaults to "center". - """ - - def __init__( - self, - title: Union[str, Text] = "", - *, - characters: str = "─", - style: Union[str, Style] = "rule.line", - end: str = "\n", - align: AlignMethod = "center", - ) -> None: - if cell_len(characters) < 1: - raise ValueError( - "'characters' argument must have a cell width of at least 1" - ) - if align not in ("left", "center", "right"): - raise ValueError( - f'invalid value for align, expected "left", "center", "right" (not {align!r})' - ) - self.title = title - self.characters = characters - self.style = style - self.end = end - self.align = align - - def __repr__(self) -> str: - return f"Rule({self.title!r}, {self.characters!r})" - - def __rich_console__( - self, console: Console, options: ConsoleOptions - ) -> RenderResult: - width = options.max_width - - # Python3.6 doesn't have an isascii method on str - isascii = getattr(str, "isascii", None) or ( - lambda s: all(ord(c) < 128 for c in s) - ) - characters = ( - "-" - if (options.ascii_only and not isascii(self.characters)) - else self.characters - ) - - chars_len = cell_len(characters) - if not self.title: - yield self._rule_line(chars_len, width) - return - - if isinstance(self.title, Text): - title_text = self.title - else: - title_text = console.render_str(self.title, style="rule.text") - - title_text.plain = title_text.plain.replace("\n", " ") - title_text.expand_tabs() - - required_space = 4 if self.align == "center" else 2 - truncate_width = max(0, width - required_space) - if not truncate_width: - yield self._rule_line(chars_len, width) - return - - rule_text = Text(end=self.end) - if self.align == "center": - title_text.truncate(truncate_width, overflow="ellipsis") - side_width = (width - cell_len(title_text.plain)) // 2 - left = Text(characters * (side_width // chars_len + 1)) - left.truncate(side_width - 1) - right_length = width - cell_len(left.plain) - cell_len(title_text.plain) - right = Text(characters * (side_width // chars_len + 1)) - right.truncate(right_length) - rule_text.append(left.plain + " ", self.style) - rule_text.append(title_text) - rule_text.append(" " + right.plain, self.style) - elif self.align == "left": - title_text.truncate(truncate_width, overflow="ellipsis") - rule_text.append(title_text) - rule_text.append(" ") - rule_text.append(characters * (width - rule_text.cell_len), self.style) - elif self.align == "right": - title_text.truncate(truncate_width, overflow="ellipsis") - rule_text.append(characters * (width - title_text.cell_len - 1), self.style) - rule_text.append(" ") - rule_text.append(title_text) - - rule_text.plain = set_cell_size(rule_text.plain, width) - yield rule_text - - def _rule_line(self, chars_len: int, width: int) -> Text: - rule_text = Text(self.characters * ((width // chars_len) + 1), self.style) - rule_text.truncate(width) - rule_text.plain = set_cell_size(rule_text.plain, width) - return rule_text - - def __rich_measure__( - self, console: Console, options: ConsoleOptions - ) -> Measurement: - return Measurement(1, 1) - - -if __name__ == "__main__": # pragma: no cover - import sys - - from pip._vendor.rich.console import Console - - try: - text = sys.argv[1] - except IndexError: - text = "Hello, World" - console = Console() - console.print(Rule(title=text)) - - console = Console() - console.print(Rule("foo"), width=4) diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/_itertools.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/_itertools.py deleted file mode 100644 index cce05582ffc6fe6d72027194f4ccc44ee42f1fcd..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/_itertools.py +++ /dev/null @@ -1,35 +0,0 @@ -from itertools import filterfalse - -from typing import ( - Callable, - Iterable, - Iterator, - Optional, - Set, - TypeVar, - Union, -) - -# Type and type variable definitions -_T = TypeVar('_T') -_U = TypeVar('_U') - - -def unique_everseen( - iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None -) -> Iterator[_T]: - "List unique elements, preserving order. Remember all elements ever seen." - # unique_everseen('AAAABBBCCDAABBB') --> A B C D - # unique_everseen('ABBCcAD', str.lower) --> A B C D - seen: Set[Union[_T, _U]] = set() - seen_add = seen.add - if key is None: - for element in filterfalse(seen.__contains__, iterable): - seen_add(element) - yield element - else: - for element in iterable: - k = key(element) - if k not in seen: - seen_add(k) - yield element diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/version.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/version.py deleted file mode 100644 index de9a09a4ed3b078b37e7490a6686f660ae935aca..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/version.py +++ /dev/null @@ -1,504 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import collections -import itertools -import re -import warnings -from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union - -from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType - -__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] - -InfiniteTypes = Union[InfinityType, NegativeInfinityType] -PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] -SubLocalType = Union[InfiniteTypes, int, str] -LocalType = Union[ - NegativeInfinityType, - Tuple[ - Union[ - SubLocalType, - Tuple[SubLocalType, str], - Tuple[NegativeInfinityType, SubLocalType], - ], - ..., - ], -] -CmpKey = Tuple[ - int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType -] -LegacyCmpKey = Tuple[int, Tuple[str, ...]] -VersionComparisonMethod = Callable[ - [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool -] - -_Version = collections.namedtuple( - "_Version", ["epoch", "release", "dev", "pre", "post", "local"] -) - - -def parse(version: str) -> Union["LegacyVersion", "Version"]: - """ - Parse the given version string and return either a :class:`Version` object - or a :class:`LegacyVersion` object depending on if the given version is - a valid PEP 440 version or a legacy version. - """ - try: - return Version(version) - except InvalidVersion: - return LegacyVersion(version) - - -class InvalidVersion(ValueError): - """ - An invalid version was found, users should refer to PEP 440. - """ - - -class _BaseVersion: - _key: Union[CmpKey, LegacyCmpKey] - - def __hash__(self) -> int: - return hash(self._key) - - # Please keep the duplicated `isinstance` check - # in the six comparisons hereunder - # unless you find a way to avoid adding overhead function calls. - def __lt__(self, other: "_BaseVersion") -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key < other._key - - def __le__(self, other: "_BaseVersion") -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key <= other._key - - def __eq__(self, other: object) -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key == other._key - - def __ge__(self, other: "_BaseVersion") -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key >= other._key - - def __gt__(self, other: "_BaseVersion") -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key > other._key - - def __ne__(self, other: object) -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key != other._key - - -class LegacyVersion(_BaseVersion): - def __init__(self, version: str) -> None: - self._version = str(version) - self._key = _legacy_cmpkey(self._version) - - warnings.warn( - "Creating a LegacyVersion has been deprecated and will be " - "removed in the next major release", - DeprecationWarning, - ) - - def __str__(self) -> str: - return self._version - - def __repr__(self) -> str: - return f"" - - @property - def public(self) -> str: - return self._version - - @property - def base_version(self) -> str: - return self._version - - @property - def epoch(self) -> int: - return -1 - - @property - def release(self) -> None: - return None - - @property - def pre(self) -> None: - return None - - @property - def post(self) -> None: - return None - - @property - def dev(self) -> None: - return None - - @property - def local(self) -> None: - return None - - @property - def is_prerelease(self) -> bool: - return False - - @property - def is_postrelease(self) -> bool: - return False - - @property - def is_devrelease(self) -> bool: - return False - - -_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) - -_legacy_version_replacement_map = { - "pre": "c", - "preview": "c", - "-": "final-", - "rc": "c", - "dev": "@", -} - - -def _parse_version_parts(s: str) -> Iterator[str]: - for part in _legacy_version_component_re.split(s): - part = _legacy_version_replacement_map.get(part, part) - - if not part or part == ".": - continue - - if part[:1] in "0123456789": - # pad for numeric comparison - yield part.zfill(8) - else: - yield "*" + part - - # ensure that alpha/beta/candidate are before final - yield "*final" - - -def _legacy_cmpkey(version: str) -> LegacyCmpKey: - - # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch - # greater than or equal to 0. This will effectively put the LegacyVersion, - # which uses the defacto standard originally implemented by setuptools, - # as before all PEP 440 versions. - epoch = -1 - - # This scheme is taken from pkg_resources.parse_version setuptools prior to - # it's adoption of the packaging library. - parts: List[str] = [] - for part in _parse_version_parts(version.lower()): - if part.startswith("*"): - # remove "-" before a prerelease tag - if part < "*final": - while parts and parts[-1] == "*final-": - parts.pop() - - # remove trailing zeros from each series of numeric parts - while parts and parts[-1] == "00000000": - parts.pop() - - parts.append(part) - - return epoch, tuple(parts) - - -# Deliberately not anchored to the start and end of the string, to make it -# easier for 3rd party code to reuse -VERSION_PATTERN = r""" - v? - (?: - (?:(?P[0-9]+)!)? # epoch - (?P[0-9]+(?:\.[0-9]+)*) # release segment - (?P
                                                  # pre-release
        -            [-_\.]?
        -            (?P(a|b|c|rc|alpha|beta|pre|preview))
        -            [-_\.]?
        -            (?P[0-9]+)?
        -        )?
        -        (?P                                         # post release
        -            (?:-(?P[0-9]+))
        -            |
        -            (?:
        -                [-_\.]?
        -                (?Ppost|rev|r)
        -                [-_\.]?
        -                (?P[0-9]+)?
        -            )
        -        )?
        -        (?P                                          # dev release
        -            [-_\.]?
        -            (?Pdev)
        -            [-_\.]?
        -            (?P[0-9]+)?
        -        )?
        -    )
        -    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
        -"""
        -
        -
        -class Version(_BaseVersion):
        -
        -    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
        -
        -    def __init__(self, version: str) -> None:
        -
        -        # Validate the version and parse it into pieces
        -        match = self._regex.search(version)
        -        if not match:
        -            raise InvalidVersion(f"Invalid version: '{version}'")
        -
        -        # Store the parsed out pieces of the version
        -        self._version = _Version(
        -            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
        -            release=tuple(int(i) for i in match.group("release").split(".")),
        -            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
        -            post=_parse_letter_version(
        -                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
        -            ),
        -            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
        -            local=_parse_local_version(match.group("local")),
        -        )
        -
        -        # Generate a key which will be used for sorting
        -        self._key = _cmpkey(
        -            self._version.epoch,
        -            self._version.release,
        -            self._version.pre,
        -            self._version.post,
        -            self._version.dev,
        -            self._version.local,
        -        )
        -
        -    def __repr__(self) -> str:
        -        return f""
        -
        -    def __str__(self) -> str:
        -        parts = []
        -
        -        # Epoch
        -        if self.epoch != 0:
        -            parts.append(f"{self.epoch}!")
        -
        -        # Release segment
        -        parts.append(".".join(str(x) for x in self.release))
        -
        -        # Pre-release
        -        if self.pre is not None:
        -            parts.append("".join(str(x) for x in self.pre))
        -
        -        # Post-release
        -        if self.post is not None:
        -            parts.append(f".post{self.post}")
        -
        -        # Development release
        -        if self.dev is not None:
        -            parts.append(f".dev{self.dev}")
        -
        -        # Local version segment
        -        if self.local is not None:
        -            parts.append(f"+{self.local}")
        -
        -        return "".join(parts)
        -
        -    @property
        -    def epoch(self) -> int:
        -        _epoch: int = self._version.epoch
        -        return _epoch
        -
        -    @property
        -    def release(self) -> Tuple[int, ...]:
        -        _release: Tuple[int, ...] = self._version.release
        -        return _release
        -
        -    @property
        -    def pre(self) -> Optional[Tuple[str, int]]:
        -        _pre: Optional[Tuple[str, int]] = self._version.pre
        -        return _pre
        -
        -    @property
        -    def post(self) -> Optional[int]:
        -        return self._version.post[1] if self._version.post else None
        -
        -    @property
        -    def dev(self) -> Optional[int]:
        -        return self._version.dev[1] if self._version.dev else None
        -
        -    @property
        -    def local(self) -> Optional[str]:
        -        if self._version.local:
        -            return ".".join(str(x) for x in self._version.local)
        -        else:
        -            return None
        -
        -    @property
        -    def public(self) -> str:
        -        return str(self).split("+", 1)[0]
        -
        -    @property
        -    def base_version(self) -> str:
        -        parts = []
        -
        -        # Epoch
        -        if self.epoch != 0:
        -            parts.append(f"{self.epoch}!")
        -
        -        # Release segment
        -        parts.append(".".join(str(x) for x in self.release))
        -
        -        return "".join(parts)
        -
        -    @property
        -    def is_prerelease(self) -> bool:
        -        return self.dev is not None or self.pre is not None
        -
        -    @property
        -    def is_postrelease(self) -> bool:
        -        return self.post is not None
        -
        -    @property
        -    def is_devrelease(self) -> bool:
        -        return self.dev is not None
        -
        -    @property
        -    def major(self) -> int:
        -        return self.release[0] if len(self.release) >= 1 else 0
        -
        -    @property
        -    def minor(self) -> int:
        -        return self.release[1] if len(self.release) >= 2 else 0
        -
        -    @property
        -    def micro(self) -> int:
        -        return self.release[2] if len(self.release) >= 3 else 0
        -
        -
        -def _parse_letter_version(
        -    letter: str, number: Union[str, bytes, SupportsInt]
        -) -> Optional[Tuple[str, int]]:
        -
        -    if letter:
        -        # We consider there to be an implicit 0 in a pre-release if there is
        -        # not a numeral associated with it.
        -        if number is None:
        -            number = 0
        -
        -        # We normalize any letters to their lower case form
        -        letter = letter.lower()
        -
        -        # We consider some words to be alternate spellings of other words and
        -        # in those cases we want to normalize the spellings to our preferred
        -        # spelling.
        -        if letter == "alpha":
        -            letter = "a"
        -        elif letter == "beta":
        -            letter = "b"
        -        elif letter in ["c", "pre", "preview"]:
        -            letter = "rc"
        -        elif letter in ["rev", "r"]:
        -            letter = "post"
        -
        -        return letter, int(number)
        -    if not letter and number:
        -        # We assume if we are given a number, but we are not given a letter
        -        # then this is using the implicit post release syntax (e.g. 1.0-1)
        -        letter = "post"
        -
        -        return letter, int(number)
        -
        -    return None
        -
        -
        -_local_version_separators = re.compile(r"[\._-]")
        -
        -
        -def _parse_local_version(local: str) -> Optional[LocalType]:
        -    """
        -    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
        -    """
        -    if local is not None:
        -        return tuple(
        -            part.lower() if not part.isdigit() else int(part)
        -            for part in _local_version_separators.split(local)
        -        )
        -    return None
        -
        -
        -def _cmpkey(
        -    epoch: int,
        -    release: Tuple[int, ...],
        -    pre: Optional[Tuple[str, int]],
        -    post: Optional[Tuple[str, int]],
        -    dev: Optional[Tuple[str, int]],
        -    local: Optional[Tuple[SubLocalType]],
        -) -> CmpKey:
        -
        -    # When we compare a release version, we want to compare it with all of the
        -    # trailing zeros removed. So we'll use a reverse the list, drop all the now
        -    # leading zeros until we come to something non zero, then take the rest
        -    # re-reverse it back into the correct order and make it a tuple and use
        -    # that for our sorting key.
        -    _release = tuple(
        -        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
        -    )
        -
        -    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
        -    # We'll do this by abusing the pre segment, but we _only_ want to do this
        -    # if there is not a pre or a post segment. If we have one of those then
        -    # the normal sorting rules will handle this case correctly.
        -    if pre is None and post is None and dev is not None:
        -        _pre: PrePostDevType = NegativeInfinity
        -    # Versions without a pre-release (except as noted above) should sort after
        -    # those with one.
        -    elif pre is None:
        -        _pre = Infinity
        -    else:
        -        _pre = pre
        -
        -    # Versions without a post segment should sort before those with one.
        -    if post is None:
        -        _post: PrePostDevType = NegativeInfinity
        -
        -    else:
        -        _post = post
        -
        -    # Versions without a development segment should sort after those with one.
        -    if dev is None:
        -        _dev: PrePostDevType = Infinity
        -
        -    else:
        -        _dev = dev
        -
        -    if local is None:
        -        # Versions without a local segment should sort before those with one.
        -        _local: LocalType = NegativeInfinity
        -    else:
        -        # Versions with a local segment need that segment parsed to implement
        -        # the sorting rules in PEP440.
        -        # - Alpha numeric segments sort before numeric segments
        -        # - Alpha numeric segments sort lexicographically
        -        # - Numeric segments sort numerically
        -        # - Shorter versions sort before longer versions when the prefixes
        -        #   match exactly
        -        _local = tuple(
        -            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
        -        )
        -
        -    return epoch, _release, _pre, _post, _dev, _local
        diff --git a/spaces/tomaseo2022/Traductor-Voz-de-Video/client.py b/spaces/tomaseo2022/Traductor-Voz-de-Video/client.py
        deleted file mode 100644
        index 746cd520b13bd5468dbf23ceaa29579c375f8a92..0000000000000000000000000000000000000000
        --- a/spaces/tomaseo2022/Traductor-Voz-de-Video/client.py
        +++ /dev/null
        @@ -1,271 +0,0 @@
        -# -*- coding: utf-8 -*-
        -"""
        -A Translation module.
        -
        -You can translate text using this module.
        -"""
        -import random
        -import typing
        -
        -import httpcore
        -import httpx
        -from httpx import Timeout
        -
        -import urls, utils
        -import gtoken 
        -from gtoken import TokenAcquirer
        -import constants
        -from constants import (
        -    DEFAULT_USER_AGENT, LANGCODES, LANGUAGES, SPECIAL_CASES,
        -    DEFAULT_RAISE_EXCEPTION, DUMMY_DATA
        -)
        -import models
        -from models import Translated, Detected
        -
        -EXCLUDES = ('en', 'ca', 'fr')
        -
        -
        -class Translator:
        -    """Google Translate ajax API implementation class
        -
        -    You have to create an instance of Translator to use this API
        -
        -    :param service_urls: google translate url list. URLs will be used randomly.
        -                         For example ``['translate.google.com', 'translate.google.co.kr']``
        -    :type service_urls: a sequence of strings
        -
        -    :param user_agent: the User-Agent header to send when making requests.
        -    :type user_agent: :class:`str`
        -
        -    :param proxies: proxies configuration.
        -                    Dictionary mapping protocol or protocol and host to the URL of the proxy
        -                    For example ``{'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}``
        -    :type proxies: dictionary
        -
        -    :param timeout: Definition of timeout for httpx library.
        -                    Will be used for every request.
        -    :type timeout: number or a double of numbers
        -||||||| constructed merge base
        -    :param proxies: proxies configuration.
        -                    Dictionary mapping protocol or protocol and host to the URL of the proxy
        -                    For example ``{'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}``
        -    :param raise_exception: if `True` then raise exception if smth will go wrong
        -    :type raise_exception: boolean
        -    """
        -
        -    def __init__(self, service_urls=None, user_agent=DEFAULT_USER_AGENT,
        -                 raise_exception=DEFAULT_RAISE_EXCEPTION,
        -                 proxies: typing.Dict[str, httpcore.AsyncHTTPProxy] = None, timeout: Timeout = None):
        -
        -        self.client = httpx.Client()
        -        if proxies is not None:  # pragma: nocover
        -            self.client.proxies = proxies
        -
        -        self.client.headers.update({
        -            'User-Agent': user_agent,
        -        })
        -
        -        if timeout is not None:
        -            self.client.timeout = timeout
        -
        -        self.service_urls = service_urls or ['translate.google.com']
        -        self.token_acquirer = TokenAcquirer(client=self.client, host=self.service_urls[0])
        -        self.raise_exception = raise_exception
        -
        -    def _pick_service_url(self):
        -        if len(self.service_urls) == 1:
        -            return self.service_urls[0]
        -        return random.choice(self.service_urls)
        -
        -    def _translate(self, text, dest, src, override):
        -        token = self.token_acquirer.do(text)
        -        params = utils.build_params(query=text, src=src, dest=dest,
        -                                    token=token, override=override)
        -
        -        url = urls.TRANSLATE.format(host=self._pick_service_url())
        -        r = self.client.get(url, params=params)
        -
        -        if r.status_code == 200:
        -            data = utils.format_json(r.text)
        -            return data
        -        else:
        -            if self.raise_exception:
        -                raise Exception('Unexpected status code "{}" from {}'.format(r.status_code, self.service_urls))
        -            DUMMY_DATA[0][0][0] = text
        -            return DUMMY_DATA
        -
        -    def _parse_extra_data(self, data):
        -        response_parts_name_mapping = {
        -            0: 'translation',
        -            1: 'all-translations',
        -            2: 'original-language',
        -            5: 'possible-translations',
        -            6: 'confidence',
        -            7: 'possible-mistakes',
        -            8: 'language',
        -            11: 'synonyms',
        -            12: 'definitions',
        -            13: 'examples',
        -            14: 'see-also',
        -        }
        -
        -        extra = {}
        -
        -        for index, category in response_parts_name_mapping.items():
        -            extra[category] = data[index] if (index < len(data) and data[index]) else None
        -
        -        return extra
        -
        -    def translate(self, text, dest='en', src='auto', **kwargs):
        -        """Translate text from source language to destination language
        -
        -        :param text: The source text(s) to be translated. Batch translation is supported via sequence input.
        -        :type text: UTF-8 :class:`str`; :class:`unicode`; string sequence (list, tuple, iterator, generator)
        -
        -        :param dest: The language to translate the source text into.
        -                     The value should be one of the language codes listed in :const:`googletrans.LANGUAGES`
        -                     or one of the language names listed in :const:`googletrans.LANGCODES`.
        -        :param dest: :class:`str`; :class:`unicode`
        -
        -        :param src: The language of the source text.
        -                    The value should be one of the language codes listed in :const:`googletrans.LANGUAGES`
        -                    or one of the language names listed in :const:`googletrans.LANGCODES`.
        -                    If a language is not specified,
        -                    the system will attempt to identify the source language automatically.
        -        :param src: :class:`str`; :class:`unicode`
        -
        -        :rtype: Translated
        -        :rtype: :class:`list` (when a list is passed)
        -
        -        Basic usage:
        -            >>> from googletrans import Translator
        -            >>> translator = Translator()
        -            >>> translator.translate('안녕하세요.')
        -            
        -            >>> translator.translate('안녕하세요.', dest='ja')
        -            
        -            >>> translator.translate('veritas lux mea', src='la')
        -            
        -
        -        Advanced usage:
        -            >>> translations = translator.translate(['The quick brown fox', 'jumps over', 'the lazy dog'], dest='ko')
        -            >>> for translation in translations:
        -            ...    print(translation.origin, ' -> ', translation.text)
        -            The quick brown fox  ->  빠른 갈색 여우
        -            jumps over  ->  이상 점프
        -            the lazy dog  ->  게으른 개
        -        """
        -        dest = dest.lower().split('_', 1)[0]
        -        src = src.lower().split('_', 1)[0]
        -
        -        if src != 'auto' and src not in LANGUAGES:
        -            if src in SPECIAL_CASES:
        -                src = SPECIAL_CASES[src]
        -            elif src in LANGCODES:
        -                src = LANGCODES[src]
        -            else:
        -                raise ValueError('invalid source language')
        -
        -        if dest not in LANGUAGES:
        -            if dest in SPECIAL_CASES:
        -                dest = SPECIAL_CASES[dest]
        -            elif dest in LANGCODES:
        -                dest = LANGCODES[dest]
        -            else:
        -                raise ValueError('invalid destination language')
        -
        -        if isinstance(text, list):
        -            result = []
        -            for item in text:
        -                translated = self.translate(item, dest=dest, src=src, **kwargs)
        -                result.append(translated)
        -            return result
        -
        -        origin = text
        -        data = self._translate(text, dest, src, kwargs)
        -
        -        # this code will be updated when the format is changed.
        -        translated = ''.join([d[0] if d[0] else '' for d in data[0]])
        -
        -        extra_data = self._parse_extra_data(data)
        -
        -        # actual source language that will be recognized by Google Translator when the
        -        # src passed is equal to auto.
        -        try:
        -            src = data[2]
        -        except Exception:  # pragma: nocover
        -            pass
        -
        -        pron = origin
        -        try:
        -            pron = data[0][1][-2]
        -        except Exception:  # pragma: nocover
        -            pass
        -
        -        if pron is None:
        -            try:
        -                pron = data[0][1][2]
        -            except:  # pragma: nocover
        -                pass
        -
        -        if dest in EXCLUDES and pron == origin:
        -            pron = translated
        -
        -        # put final values into a new Translated object
        -        result = Translated(src=src, dest=dest, origin=origin,
        -                            text=translated, pronunciation=pron, extra_data=extra_data)
        -
        -        return result
        -
        -    def detect(self, text, **kwargs):
        -        """Detect language of the input text
        -
        -        :param text: The source text(s) whose language you want to identify.
        -                     Batch detection is supported via sequence input.
        -        :type text: UTF-8 :class:`str`; :class:`unicode`; string sequence (list, tuple, iterator, generator)
        -
        -        :rtype: Detected
        -        :rtype: :class:`list` (when a list is passed)
        -
        -        Basic usage:
        -            >>> from googletrans import Translator
        -            >>> translator = Translator()
        -            >>> translator.detect('이 문장은 한글로 쓰여졌습니다.')
        -            
        -            >>> translator.detect('この文章は日本語で書かれました。')
        -            
        -            >>> translator.detect('This sentence is written in English.')
        -            
        -            >>> translator.detect('Tiu frazo estas skribita en Esperanto.')
        -            
        -
        -        Advanced usage:
        -            >>> langs = translator.detect(['한국어', '日本語', 'English', 'le français'])
        -            >>> for lang in langs:
        -            ...    print(lang.lang, lang.confidence)
        -            ko 1
        -            ja 0.92929292
        -            en 0.96954316
        -            fr 0.043500196
        -        """
        -        if isinstance(text, list):
        -            result = []
        -            for item in text:
        -                lang = self.detect(item)
        -                result.append(lang)
        -            return result
        -
        -        data = self._translate(text, 'en', 'auto', kwargs)
        -
        -        # actual source language that will be recognized by Google Translator when the
        -        # src passed is equal to auto.
        -        src = ''
        -        confidence = 0.0
        -        try:
        -            src = ''.join(data[8][0])
        -            confidence = data[8][-2][0]
        -        except Exception:  # pragma: nocover
        -            pass
        -        result = Detected(lang=src, confidence=confidence)
        -
        -        return result
        diff --git a/spaces/tomofi/MMOCR/configs/_base_/det_pipelines/psenet_pipeline.py b/spaces/tomofi/MMOCR/configs/_base_/det_pipelines/psenet_pipeline.py
        deleted file mode 100644
        index 004dd63ade93b3d3f1cbb80672fb1bd7db7fd276..0000000000000000000000000000000000000000
        --- a/spaces/tomofi/MMOCR/configs/_base_/det_pipelines/psenet_pipeline.py
        +++ /dev/null
        @@ -1,70 +0,0 @@
        -img_norm_cfg = dict(
        -    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
        -
        -train_pipeline = [
        -    dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
        -    dict(
        -        type='LoadTextAnnotations',
        -        with_bbox=True,
        -        with_mask=True,
        -        poly2mask=False),
        -    dict(type='ColorJitter', brightness=32.0 / 255, saturation=0.5),
        -    dict(type='Normalize', **img_norm_cfg),
        -    dict(
        -        type='ScaleAspectJitter',
        -        img_scale=[(3000, 736)],
        -        ratio_range=(0.5, 3),
        -        aspect_ratio_range=(1, 1),
        -        multiscale_mode='value',
        -        long_size_bound=1280,
        -        short_size_bound=640,
        -        resize_type='long_short_bound',
        -        keep_ratio=False),
        -    dict(type='PSENetTargets'),
        -    dict(type='RandomFlip', flip_ratio=0.5, direction='horizontal'),
        -    dict(type='RandomRotateTextDet'),
        -    dict(
        -        type='RandomCropInstances',
        -        target_size=(640, 640),
        -        instance_key='gt_kernels'),
        -    dict(type='Pad', size_divisor=32),
        -    dict(
        -        type='CustomFormatBundle',
        -        keys=['gt_kernels', 'gt_mask'],
        -        visualize=dict(flag=False, boundary_key='gt_kernels')),
        -    dict(type='Collect', keys=['img', 'gt_kernels', 'gt_mask'])
        -]
        -
        -# for ctw1500
        -img_scale_test_ctw1500 = (1280, 1280)
        -test_pipeline_ctw1500 = [
        -    dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
        -    dict(
        -        type='MultiScaleFlipAug',
        -        img_scale=img_scale_test_ctw1500,
        -        flip=False,
        -        transforms=[
        -            dict(type='Resize', img_scale=(1280, 1280), keep_ratio=True),
        -            dict(type='Normalize', **img_norm_cfg),
        -            dict(type='Pad', size_divisor=32),
        -            dict(type='ImageToTensor', keys=['img']),
        -            dict(type='Collect', keys=['img']),
        -        ])
        -]
        -
        -# for icdar2015
        -img_scale_test_icdar2015 = (2240, 2240)
        -test_pipeline_icdar2015 = [
        -    dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
        -    dict(
        -        type='MultiScaleFlipAug',
        -        img_scale=img_scale_test_icdar2015,
        -        flip=False,
        -        transforms=[
        -            dict(type='Resize', img_scale=(1280, 1280), keep_ratio=True),
        -            dict(type='Normalize', **img_norm_cfg),
        -            dict(type='Pad', size_divisor=32),
        -            dict(type='ImageToTensor', keys=['img']),
        -            dict(type='Collect', keys=['img']),
        -        ])
        -]
        diff --git a/spaces/tomofi/MMOCR/mmocr/models/textdet/__init__.py b/spaces/tomofi/MMOCR/mmocr/models/textdet/__init__.py
        deleted file mode 100644
        index 027e812f790d9572ec5d83b78ee9ce33a5ed415a..0000000000000000000000000000000000000000
        --- a/spaces/tomofi/MMOCR/mmocr/models/textdet/__init__.py
        +++ /dev/null
        @@ -1,11 +0,0 @@
        -# Copyright (c) OpenMMLab. All rights reserved.
        -from . import dense_heads, detectors, losses, necks, postprocess
        -from .dense_heads import *  # NOQA
        -from .detectors import *  # NOQA
        -from .losses import *  # NOQA
        -from .necks import *  # NOQA
        -from .postprocess import *  # NOQA
        -
        -__all__ = (
        -    dense_heads.__all__ + detectors.__all__ + losses.__all__ + necks.__all__ +
        -    postprocess.__all__)
        diff --git a/spaces/tomofi/MMOCR/mmocr/models/textrecog/losses/__init__.py b/spaces/tomofi/MMOCR/mmocr/models/textrecog/losses/__init__.py
        deleted file mode 100644
        index afab422263462b1f1d3311f0b6632df2d172a6ea..0000000000000000000000000000000000000000
        --- a/spaces/tomofi/MMOCR/mmocr/models/textrecog/losses/__init__.py
        +++ /dev/null
        @@ -1,7 +0,0 @@
        -# Copyright (c) OpenMMLab. All rights reserved.
        -from .ce_loss import CELoss, SARLoss, TFLoss
        -from .ctc_loss import CTCLoss
        -from .mix_loss import ABILoss
        -from .seg_loss import SegLoss
        -
        -__all__ = ['CELoss', 'SARLoss', 'CTCLoss', 'TFLoss', 'SegLoss', 'ABILoss']
        diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py
        deleted file mode 100644
        index 8f483a17ace5c101548f640b95cc94030f37a0b3..0000000000000000000000000000000000000000
        --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py
        +++ /dev/null
        @@ -1,58 +0,0 @@
        -_base_ = [
        -    '../_base_/models/retinanet_r50_fpn.py',
        -    '../_base_/datasets/coco_detection.py',
        -    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
        -]
        -model = dict(
        -    pretrained='open-mmlab://regnetx_3.2gf',
        -    backbone=dict(
        -        _delete_=True,
        -        type='RegNet',
        -        arch='regnetx_3.2gf',
        -        out_indices=(0, 1, 2, 3),
        -        frozen_stages=1,
        -        norm_cfg=dict(type='BN', requires_grad=True),
        -        norm_eval=True,
        -        style='pytorch'),
        -    neck=dict(
        -        type='FPN',
        -        in_channels=[96, 192, 432, 1008],
        -        out_channels=256,
        -        num_outs=5))
        -img_norm_cfg = dict(
        -    # The mean and std are used in PyCls when training RegNets
        -    mean=[103.53, 116.28, 123.675],
        -    std=[57.375, 57.12, 58.395],
        -    to_rgb=False)
        -train_pipeline = [
        -    dict(type='LoadImageFromFile'),
        -    dict(type='LoadAnnotations', with_bbox=True),
        -    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
        -    dict(type='RandomFlip', flip_ratio=0.5),
        -    dict(type='Normalize', **img_norm_cfg),
        -    dict(type='Pad', size_divisor=32),
        -    dict(type='DefaultFormatBundle'),
        -    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
        -]
        -test_pipeline = [
        -    dict(type='LoadImageFromFile'),
        -    dict(
        -        type='MultiScaleFlipAug',
        -        img_scale=(1333, 800),
        -        flip=False,
        -        transforms=[
        -            dict(type='Resize', keep_ratio=True),
        -            dict(type='RandomFlip'),
        -            dict(type='Normalize', **img_norm_cfg),
        -            dict(type='Pad', size_divisor=32),
        -            dict(type='ImageToTensor', keys=['img']),
        -            dict(type='Collect', keys=['img']),
        -        ])
        -]
        -data = dict(
        -    train=dict(pipeline=train_pipeline),
        -    val=dict(pipeline=test_pipeline),
        -    test=dict(pipeline=test_pipeline))
        -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005)
        -optimizer_config = dict(
        -    _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
        diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/evaluation/class_names.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/evaluation/class_names.py
        deleted file mode 100644
        index c2487c2ee2d010c40db0e1c2b51c91b194e84dc7..0000000000000000000000000000000000000000
        --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/evaluation/class_names.py
        +++ /dev/null
        @@ -1,116 +0,0 @@
        -import mmcv
        -
        -
        -def wider_face_classes():
        -    return ['face']
        -
        -
        -def voc_classes():
        -    return [
        -        'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
        -        'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
        -        'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
        -    ]
        -
        -
        -def imagenet_det_classes():
        -    return [
        -        'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo',
        -        'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam',
        -        'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap',
        -        'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder',
        -        'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito',
        -        'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle',
        -        'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker',
        -        'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew',
        -        'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper',
        -        'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly',
        -        'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig',
        -        'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog',
        -        'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart',
        -        'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger',
        -        'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim',
        -        'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse',
        -        'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle',
        -        'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard',
        -        'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can',
        -        'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace',
        -        'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume',
        -        'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza',
        -        'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine',
        -        'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse',
        -        'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator',
        -        'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler',
        -        'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver',
        -        'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile',
        -        'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula',
        -        'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer',
        -        'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine',
        -        'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie',
        -        'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet',
        -        'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin',
        -        'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft',
        -        'whale', 'wine_bottle', 'zebra'
        -    ]
        -
        -
        -def imagenet_vid_classes():
        -    return [
        -        'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car',
        -        'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda',
        -        'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit',
        -        'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle',
        -        'watercraft', 'whale', 'zebra'
        -    ]
        -
        -
        -def coco_classes():
        -    return [
        -        'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
        -        'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign',
        -        'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
        -        'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
        -        'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
        -        'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard',
        -        'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork',
        -        'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
        -        'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair',
        -        'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv',
        -        'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
        -        'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
        -        'scissors', 'teddy_bear', 'hair_drier', 'toothbrush'
        -    ]
        -
        -
        -def cityscapes_classes():
        -    return [
        -        'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
        -        'bicycle'
        -    ]
        -
        -
        -dataset_aliases = {
        -    'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'],
        -    'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'],
        -    'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'],
        -    'coco': ['coco', 'mscoco', 'ms_coco'],
        -    'wider_face': ['WIDERFaceDataset', 'wider_face', 'WIDERFace'],
        -    'cityscapes': ['cityscapes']
        -}
        -
        -
        -def get_classes(dataset):
        -    """Get class names of a dataset."""
        -    alias2name = {}
        -    for name, aliases in dataset_aliases.items():
        -        for alias in aliases:
        -            alias2name[alias] = name
        -
        -    if mmcv.is_str(dataset):
        -        if dataset in alias2name:
        -            labels = eval(alias2name[dataset] + '_classes()')
        -        else:
        -            raise ValueError(f'Unrecognized dataset: {dataset}')
        -    else:
        -        raise TypeError(f'dataset must a str, but got {type(dataset)}')
        -    return labels
        diff --git a/spaces/tomzhang1019/ChatGPT/modules/models/modeling_moss.py b/spaces/tomzhang1019/ChatGPT/modules/models/modeling_moss.py
        deleted file mode 100644
        index b7adea5bca857f7fdd6399dde7ce359f8f8cecfe..0000000000000000000000000000000000000000
        --- a/spaces/tomzhang1019/ChatGPT/modules/models/modeling_moss.py
        +++ /dev/null
        @@ -1,711 +0,0 @@
        -""" PyTorch Moss model."""
        -
        -from typing import Optional, Tuple, Union
        -
        -import torch
        -import torch.utils.checkpoint
        -from torch import nn
        -from torch.nn import CrossEntropyLoss
        -
        -from transformers.activations import ACT2FN
        -from transformers.modeling_utils import PreTrainedModel
        -from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
        -from transformers.utils import (
        -    add_code_sample_docstrings,
        -    add_start_docstrings,
        -    add_start_docstrings_to_model_forward,
        -    logging
        -)
        -
        -from .configuration_moss import MossConfig
        -
        -
        -logger = logging.get_logger(__name__)
        -
        -_CHECKPOINT_FOR_DOC = "fnlp/moss-moon-003-base"
        -_CONFIG_FOR_DOC = "MossConfig"
        -
        -
        -MOSS_PRETRAINED_MODEL_ARCHIVE_LIST = [
        -    "fnlp/moss-moon-003-base",
        -    "fnlp/moss-moon-003-sft",
        -    "fnlp/moss-moon-003-sft-plugin",
        -]
        -
        -
        -# Copied from transformers.models.gptj.modeling_gptj.create_sinusoidal_positions
        -def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor:
        -    inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2) / dim))
        -    sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=torch.float), inv_freq).float()
        -    return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1)
        -
        -
        -# Copied from transformers.models.gptj.modeling_gptj.rotate_every_two
        -def rotate_every_two(x: torch.Tensor) -> torch.Tensor:
        -    x1 = x[:, :, :, ::2]
        -    x2 = x[:, :, :, 1::2]
        -    x = torch.stack((-x2, x1), dim=-1)
        -    return x.flatten(-2)  # in einsum notation: rearrange(x, '... d j -> ... (d j)')
        -
        -
        -# Copied from transformers.models.gptj.modeling_gptj.apply_rotary_pos_emb
        -def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor:
        -    sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3)
        -    cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3)
        -    return (tensor * cos) + (rotate_every_two(tensor) * sin)
        -
        -
        -class MossAttention(nn.Module):
        -    def __init__(self, config):
        -        super().__init__()
        -
        -        max_positions = config.max_position_embeddings
        -        self.register_buffer(
        -            "causal_mask",
        -            torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
        -                1, 1, max_positions, max_positions
        -            ),
        -        )
        -
        -        self.attn_dropout = nn.Dropout(config.attn_pdrop)
        -        self.resid_dropout = nn.Dropout(config.resid_pdrop)
        -
        -        self.embed_dim = config.hidden_size
        -        self.num_attention_heads = config.num_attention_heads
        -        self.head_dim = self.embed_dim // self.num_attention_heads
        -        if self.head_dim * self.num_attention_heads != self.embed_dim:
        -            raise ValueError(
        -                f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
        -                f" `num_attention_heads`: {self.num_attention_heads})."
        -            )
        -        self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype())
        -        self.qkv_proj = nn.Linear(self.embed_dim, self.embed_dim * 3, bias=False)
        -
        -        self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
        -        self.rotary_dim = config.rotary_dim
        -        pos_embd_dim = self.rotary_dim or self.embed_dim
        -        self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim)
        -
        -    def _split_heads(self, x, n_head, dim_head, mp_num):
        -        reshaped = x.reshape(x.shape[:-1] + (n_head // mp_num, dim_head))
        -        reshaped = reshaped.reshape(x.shape[:-2] + (-1,) + reshaped.shape[-1:])
        -        return reshaped
        -
        -    def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
        -        """
        -        Merges attn_head_size dim and num_attn_heads dim into n_ctx
        -        """
        -        if len(tensor.shape) == 5:
        -            tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
        -        elif len(tensor.shape) == 4:
        -            tensor = tensor.permute(0, 2, 1, 3).contiguous()
        -        else:
        -            raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
        -        new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
        -        return tensor.view(new_shape)
        -
        -    def _attn(
        -        self,
        -        query,
        -        key,
        -        value,
        -        attention_mask=None,
        -        head_mask=None,
        -    ):
        -        # compute causal mask from causal mask buffer
        -        query_length, key_length = query.size(-2), key.size(-2)
        -        causal_mask = self.causal_mask[:, :, key_length - query_length : key_length, :key_length]
        -
        -        # Keep the attention weights computation in fp32 to avoid overflow issues
        -        query = query.to(torch.float32)
        -        key = key.to(torch.float32)
        -
        -        attn_weights = torch.matmul(query, key.transpose(-1, -2))
        -
        -        attn_weights = attn_weights / self.scale_attn
        -        mask_value = torch.finfo(attn_weights.dtype).min
        -        # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
        -        # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
        -        mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
        -        attn_weights = torch.where(causal_mask, attn_weights, mask_value)
        -
        -        if attention_mask is not None:
        -            # Apply the attention mask
        -            attn_weights = attn_weights + attention_mask
        -
        -        attn_weights = nn.Softmax(dim=-1)(attn_weights)
        -        attn_weights = attn_weights.to(value.dtype)
        -        attn_weights = self.attn_dropout(attn_weights)
        -
        -        # Mask heads if we want to
        -        if head_mask is not None:
        -            attn_weights = attn_weights * head_mask
        -
        -        attn_output = torch.matmul(attn_weights, value)
        -
        -        return attn_output, attn_weights
        -
        -    def forward(
        -        self,
        -        hidden_states: Optional[torch.FloatTensor],
        -        layer_past: Optional[Tuple[torch.Tensor]] = None,
        -        attention_mask: Optional[torch.FloatTensor] = None,
        -        position_ids: Optional[torch.LongTensor] = None,
        -        head_mask: Optional[torch.FloatTensor] = None,
        -        use_cache: Optional[bool] = False,
        -        output_attentions: Optional[bool] = False,
        -    ) -> Union[
        -        Tuple[torch.Tensor, Tuple[torch.Tensor]],
        -        Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
        -    ]:
        -        qkv = self.qkv_proj(hidden_states)
        -        # TODO(enijkamp): factor out number of logical TPU-v4 cores or make forward pass agnostic
        -        mp_num = 4
        -        qkv_split = qkv.reshape(qkv.shape[:-1] + (mp_num, -1))
        -
        -        local_dim = self.head_dim * self.num_attention_heads // mp_num
        -        query, value, key = torch.split(qkv_split, local_dim, dim=-1)
        -        query = self._split_heads(query, self.num_attention_heads, self.head_dim, mp_num=mp_num)
        -        key = self._split_heads(key, self.num_attention_heads, self.head_dim, mp_num=mp_num)
        -
        -        value = self._split_heads(value, self.num_attention_heads, self.head_dim, mp_num=mp_num)
        -        value = value.permute(0, 2, 1, 3)
        -
        -        embed_positions = self.embed_positions
        -        if embed_positions.device != position_ids.device:
        -            embed_positions = embed_positions.to(position_ids.device)
        -            self.embed_positions = embed_positions
        -
        -        sincos = embed_positions[position_ids]
        -        sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
        -
        -        if self.rotary_dim is not None:
        -            k_rot = key[:, :, :, : self.rotary_dim]
        -            k_pass = key[:, :, :, self.rotary_dim :]
        -
        -            q_rot = query[:, :, :, : self.rotary_dim]
        -            q_pass = query[:, :, :, self.rotary_dim :]
        -
        -            k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
        -            q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
        -
        -            key = torch.cat([k_rot, k_pass], dim=-1)
        -            query = torch.cat([q_rot, q_pass], dim=-1)
        -        else:
        -            key = apply_rotary_pos_emb(key, sin, cos)
        -            query = apply_rotary_pos_emb(query, sin, cos)
        -
        -        key = key.permute(0, 2, 1, 3)
        -        query = query.permute(0, 2, 1, 3)
        -
        -        if layer_past is not None:
        -            past_key = layer_past[0]
        -            past_value = layer_past[1]
        -            key = torch.cat((past_key, key), dim=-2)
        -            value = torch.cat((past_value, value), dim=-2)
        -
        -        if use_cache is True:
        -            present = (key, value)
        -        else:
        -            present = None
        -
        -        # compute self-attention: V x Softmax(QK^T)
        -        attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
        -
        -        attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
        -        attn_output = self.out_proj(attn_output)
        -        attn_output = self.resid_dropout(attn_output)
        -
        -        outputs = (attn_output, present)
        -        if output_attentions:
        -            outputs += (attn_weights,)
        -
        -        return outputs  # a, present, (attentions)
        -
        -
        -# Copied from transformers.models.gptj.modeling_gptj.GPTJMLP with GPTJ->Moss
        -class MossMLP(nn.Module):
        -    def __init__(self, intermediate_size, config):  # in MLP: intermediate_size= 4 * embed_dim
        -        super().__init__()
        -        embed_dim = config.n_embd
        -
        -        self.fc_in = nn.Linear(embed_dim, intermediate_size)
        -        self.fc_out = nn.Linear(intermediate_size, embed_dim)
        -
        -        self.act = ACT2FN[config.activation_function]
        -        self.dropout = nn.Dropout(config.resid_pdrop)
        -
        -    def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
        -        hidden_states = self.fc_in(hidden_states)
        -        hidden_states = self.act(hidden_states)
        -        hidden_states = self.fc_out(hidden_states)
        -        hidden_states = self.dropout(hidden_states)
        -        return hidden_states
        -
        -
        -# Copied from transformers.models.gptj.modeling_gptj.GPTJBlock with GPTJ->Moss
        -class MossBlock(nn.Module):
        -    def __init__(self, config):
        -        super().__init__()
        -        inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
        -        self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
        -        self.attn = MossAttention(config)
        -        self.mlp = MossMLP(inner_dim, config)
        -
        -    def forward(
        -        self,
        -        hidden_states: Optional[torch.FloatTensor],
        -        layer_past: Optional[Tuple[torch.Tensor]] = None,
        -        attention_mask: Optional[torch.FloatTensor] = None,
        -        position_ids: Optional[torch.LongTensor] = None,
        -        head_mask: Optional[torch.FloatTensor] = None,
        -        use_cache: Optional[bool] = False,
        -        output_attentions: Optional[bool] = False,
        -    ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
        -        residual = hidden_states
        -        hidden_states = self.ln_1(hidden_states)
        -        attn_outputs = self.attn(
        -            hidden_states=hidden_states,
        -            layer_past=layer_past,
        -            attention_mask=attention_mask,
        -            position_ids=position_ids,
        -            head_mask=head_mask,
        -            use_cache=use_cache,
        -            output_attentions=output_attentions,
        -        )
        -        attn_output = attn_outputs[0]  # output_attn: a, present, (attentions)
        -        outputs = attn_outputs[1:]
        -
        -        feed_forward_hidden_states = self.mlp(hidden_states)
        -        hidden_states = attn_output + feed_forward_hidden_states + residual
        -
        -        if use_cache:
        -            outputs = (hidden_states,) + outputs
        -        else:
        -            outputs = (hidden_states,) + outputs[1:]
        -
        -        return outputs  # hidden_states, present, (attentions)
        -
        -
        -class MossPreTrainedModel(PreTrainedModel):
        -    """
        -    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
        -    models.
        -    """
        -
        -    config_class = MossConfig
        -    base_model_prefix = "transformer"
        -    supports_gradient_checkpointing = True
        -    _no_split_modules = ["MossBlock"]
        -
        -    def __init__(self, *inputs, **kwargs):
        -        super().__init__(*inputs, **kwargs)
        -
        -    def _init_weights(self, module):
        -        """Initialize the weights."""
        -        if isinstance(module, (nn.Linear,)):
        -            # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
        -            # cf https://github.com/pytorch/pytorch/pull/5617
        -            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
        -            if module.bias is not None:
        -                module.bias.data.zero_()
        -        elif isinstance(module, nn.Embedding):
        -            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
        -            if module.padding_idx is not None:
        -                module.weight.data[module.padding_idx].zero_()
        -        elif isinstance(module, nn.LayerNorm):
        -            module.bias.data.zero_()
        -            module.weight.data.fill_(1.0)
        -
        -    def _set_gradient_checkpointing(self, module, value=False):
        -        if isinstance(module, MossModel):
        -            module.gradient_checkpointing = value
        -
        -
        -MOSS_START_DOCSTRING = r"""
        -    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
        -    it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
        -    behavior.
        -
        -    Parameters:
        -        config ([`MossConfig`]): Model configuration class with all the parameters of the model.
        -            Initializing with a config file does not load the weights associated with the model, only the
        -            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
        -"""
        -
        -MOSS_INPUTS_DOCSTRING = r"""
        -    Args:
        -        input_ids (`torch.LongTensor` of shape `({0})`):
        -            Indices of input sequence tokens in the vocabulary.
        -
        -            Indices can be obtained using [`AutoProcenizer`]. See [`PreTrainedTokenizer.encode`] and
        -            [`PreTrainedTokenizer.__call__`] for details.
        -
        -            [What are input IDs?](../glossary#input-ids)
        -        attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
        -            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
        -
        -            - 1 for tokens that are **not masked**,
        -            - 0 for tokens that are **masked**.
        -
        -            [What are attention masks?](../glossary#attention-mask)
        -        token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
        -            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
        -            1]`:
        -
        -            - 0 corresponds to a *sentence A* token,
        -            - 1 corresponds to a *sentence B* token.
        -
        -            [What are token type IDs?](../glossary#token-type-ids)
        -        position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
        -            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
        -            config.n_positions - 1]`.
        -
        -            [What are position IDs?](../glossary#position-ids)
        -        head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*):
        -            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
        -
        -            - 1 indicates the head is **not masked**,
        -            - 0 indicates the head is **masked**.
        -
        -        inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):
        -            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
        -            is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
        -            model's internal embedding lookup matrix.
        -        output_attentions (`bool`, *optional*):
        -            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
        -            tensors for more detail.
        -        output_hidden_states (`bool`, *optional*):
        -            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
        -            more detail.
        -        return_dict (`bool`, *optional*):
        -            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        -"""
        -
        -
        -@add_start_docstrings(
        -    "The bare Moss Model transformer outputting raw hidden-states without any specific head on top.",
        -    MOSS_START_DOCSTRING,
        -)
        -class MossModel(MossPreTrainedModel):
        -    def __init__(self, config):
        -        super().__init__(config)
        -
        -        self.embed_dim = config.n_embd
        -        self.vocab_size = config.vocab_size
        -        self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
        -        self.drop = nn.Dropout(config.embd_pdrop)
        -        self.h = nn.ModuleList([MossBlock(config) for _ in range(config.n_layer)])
        -        self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
        -        self.rotary_dim = min(config.rotary_dim, config.n_ctx // config.num_attention_heads)
        -
        -        self.gradient_checkpointing = False
        -
        -        # Initialize weights and apply final processing
        -        self.post_init()
        -
        -    def get_input_embeddings(self):
        -        return self.wte
        -
        -    def set_input_embeddings(self, new_embeddings):
        -        self.wte = new_embeddings
        -
        -    @add_start_docstrings_to_model_forward(MOSS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
        -    @add_code_sample_docstrings(
        -        checkpoint=_CHECKPOINT_FOR_DOC,
        -        output_type=BaseModelOutputWithPast,
        -        config_class=_CONFIG_FOR_DOC,
        -    )
        -    def forward(
        -        self,
        -        input_ids: Optional[torch.LongTensor] = None,
        -        past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
        -        attention_mask: Optional[torch.FloatTensor] = None,
        -        token_type_ids: Optional[torch.LongTensor] = None,
        -        position_ids: Optional[torch.LongTensor] = None,
        -        head_mask: Optional[torch.FloatTensor] = None,
        -        inputs_embeds: Optional[torch.FloatTensor] = None,
        -        use_cache: Optional[bool] = None,
        -        output_attentions: Optional[bool] = None,
        -        output_hidden_states: Optional[bool] = None,
        -        return_dict: Optional[bool] = None,
        -    ) -> Union[Tuple, BaseModelOutputWithPast]:
        -        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        -        output_hidden_states = (
        -            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        -        )
        -        use_cache = use_cache if use_cache is not None else self.config.use_cache
        -        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        -
        -        if input_ids is not None and inputs_embeds is not None:
        -            raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
        -        elif input_ids is not None:
        -            input_shape = input_ids.size()
        -            input_ids = input_ids.view(-1, input_shape[-1])
        -            batch_size = input_ids.shape[0]
        -        elif inputs_embeds is not None:
        -            input_shape = inputs_embeds.size()[:-1]
        -            batch_size = inputs_embeds.shape[0]
        -        else:
        -            raise ValueError("You have to specify either input_ids or inputs_embeds")
        -
        -        device = input_ids.device if input_ids is not None else inputs_embeds.device
        -
        -        if token_type_ids is not None:
        -            token_type_ids = token_type_ids.view(-1, input_shape[-1])
        -
        -        if position_ids is not None:
        -            position_ids = position_ids.view(-1, input_shape[-1]).long()
        -
        -        if past_key_values is None:
        -            past_length = 0
        -            past_key_values = tuple([None] * len(self.h))
        -        else:
        -            past_length = past_key_values[0][0].size(-2)
        -
        -        if position_ids is None:
        -            position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
        -            position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
        -
        -        # Attention mask.
        -        if attention_mask is not None:
        -            if batch_size <= 0:
        -                raise ValueError("batch_size has to be defined and > 0")
        -            attention_mask = attention_mask.view(batch_size, -1)
        -            # We create a 3D attention mask from a 2D tensor mask.
        -            # Sizes are [batch_size, 1, 1, to_seq_length]
        -            # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
        -            # this attention mask is more simple than the triangular masking of causal attention
        -            # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
        -            attention_mask = attention_mask[:, None, None, :]
        -
        -            # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
        -            # masked positions, this operation will create a tensor which is 0.0 for
        -            # positions we want to attend and the dtype's smallest value for masked positions.
        -            # Since we are adding it to the raw scores before the softmax, this is
        -            # effectively the same as removing these entirely.
        -            attention_mask = attention_mask.to(dtype=self.dtype)  # fp16 compatibility
        -            attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
        -
        -        # Prepare head mask if needed
        -        # 1.0 in head_mask indicate we keep the head
        -        # attention_probs has shape bsz x num_attention_heads x N x N
        -        # head_mask has shape n_layer x batch x num_attention_heads x N x N
        -        head_mask = self.get_head_mask(head_mask, self.config.n_layer)
        -
        -        if inputs_embeds is None:
        -            inputs_embeds = self.wte(input_ids)
        -
        -        hidden_states = inputs_embeds
        -
        -        if token_type_ids is not None:
        -            token_type_embeds = self.wte(token_type_ids)
        -            hidden_states = hidden_states + token_type_embeds
        -
        -        hidden_states = self.drop(hidden_states)
        -
        -        output_shape = input_shape + (hidden_states.size(-1),)
        -
        -        if self.gradient_checkpointing and self.training:
        -            if use_cache:
        -                logger.warning_once(
        -                    "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
        -                    "`use_cache=False`..."
        -                )
        -                use_cache = False
        -
        -        presents = () if use_cache else None
        -        all_self_attentions = () if output_attentions else None
        -        all_hidden_states = () if output_hidden_states else None
        -        for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
        -            if output_hidden_states:
        -                all_hidden_states = all_hidden_states + (hidden_states,)
        -
        -            if self.gradient_checkpointing and self.training:
        -
        -                def create_custom_forward(module):
        -                    def custom_forward(*inputs):
        -                        # None for past_key_value
        -                        return module(*inputs, use_cache, output_attentions)
        -
        -                    return custom_forward
        -
        -                outputs = torch.utils.checkpoint.checkpoint(
        -                    create_custom_forward(block),
        -                    hidden_states,
        -                    None,
        -                    attention_mask,
        -                    position_ids,
        -                    head_mask[i],
        -                )
        -            else:
        -                outputs = block(
        -                    hidden_states=hidden_states,
        -                    layer_past=layer_past,
        -                    attention_mask=attention_mask,
        -                    position_ids=position_ids,
        -                    head_mask=head_mask[i],
        -                    use_cache=use_cache,
        -                    output_attentions=output_attentions,
        -                )
        -
        -            hidden_states = outputs[0]
        -            if use_cache is True:
        -                presents = presents + (outputs[1],)
        -
        -            if output_attentions:
        -                all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
        -
        -        hidden_states = self.ln_f(hidden_states)
        -
        -        hidden_states = hidden_states.view(output_shape)
        -        # Add last hidden state
        -        if output_hidden_states:
        -            all_hidden_states = all_hidden_states + (hidden_states,)
        -
        -        if not return_dict:
        -            return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
        -
        -        return BaseModelOutputWithPast(
        -            last_hidden_state=hidden_states,
        -            past_key_values=presents,
        -            hidden_states=all_hidden_states,
        -            attentions=all_self_attentions,
        -        )
        -
        -
        -@add_start_docstrings(
        -    """
        -    The Moss Model transformer with a language modeling head on top.
        -    """,
        -    MOSS_START_DOCSTRING,
        -)
        -class MossForCausalLM(MossPreTrainedModel):
        -    _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.causal_mask"]
        -
        -    def __init__(self, config):
        -        super().__init__(config)
        -        self.transformer = MossModel(config)
        -        self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
        -
        -        # Initialize weights and apply final processing
        -        self.post_init()
        -
        -    def get_output_embeddings(self):
        -        return self.lm_head
        -
        -    def set_output_embeddings(self, new_embeddings):
        -        self.lm_head = new_embeddings
        -
        -    def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs):
        -        token_type_ids = kwargs.get("token_type_ids", None)
        -        # only last token for inputs_ids if past is defined in kwargs
        -        if past_key_values:
        -            input_ids = input_ids[:, -1].unsqueeze(-1)
        -            if token_type_ids is not None:
        -                token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
        -
        -        attention_mask = kwargs.get("attention_mask", None)
        -        position_ids = kwargs.get("position_ids", None)
        -
        -        if attention_mask is not None and position_ids is None:
        -            # create position_ids on the fly for batch generation
        -            position_ids = attention_mask.long().cumsum(-1) - 1
        -            position_ids.masked_fill_(attention_mask == 0, 1)
        -            if past_key_values:
        -                position_ids = position_ids[:, -1].unsqueeze(-1)
        -
        -        return {
        -            "input_ids": input_ids,
        -            "past_key_values": past_key_values,
        -            "use_cache": kwargs.get("use_cache"),
        -            "position_ids": position_ids,
        -            "attention_mask": attention_mask,
        -            "token_type_ids": token_type_ids,
        -        }
        -
        -    @add_start_docstrings_to_model_forward(MOSS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
        -    @add_code_sample_docstrings(
        -        checkpoint=_CHECKPOINT_FOR_DOC,
        -        output_type=CausalLMOutputWithPast,
        -        config_class=_CONFIG_FOR_DOC,
        -    )
        -    def forward(
        -        self,
        -        input_ids: Optional[torch.LongTensor] = None,
        -        past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
        -        attention_mask: Optional[torch.FloatTensor] = None,
        -        token_type_ids: Optional[torch.LongTensor] = None,
        -        position_ids: Optional[torch.LongTensor] = None,
        -        head_mask: Optional[torch.FloatTensor] = None,
        -        inputs_embeds: Optional[torch.FloatTensor] = None,
        -        labels: Optional[torch.LongTensor] = None,
        -        use_cache: Optional[bool] = None,
        -        output_attentions: Optional[bool] = None,
        -        output_hidden_states: Optional[bool] = None,
        -        return_dict: Optional[bool] = None,
        -    ) -> Union[Tuple, CausalLMOutputWithPast]:
        -        r"""
        -        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
        -            Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
        -            `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
        -            are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
        -        """
        -        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        -
        -        transformer_outputs = self.transformer(
        -            input_ids,
        -            past_key_values=past_key_values,
        -            attention_mask=attention_mask,
        -            token_type_ids=token_type_ids,
        -            position_ids=position_ids,
        -            head_mask=head_mask,
        -            inputs_embeds=inputs_embeds,
        -            use_cache=use_cache,
        -            output_attentions=output_attentions,
        -            output_hidden_states=output_hidden_states,
        -            return_dict=return_dict,
        -        )
        -        hidden_states = transformer_outputs[0]
        -
        -        # make sure sampling in fp16 works correctly and
        -        # compute loss in fp32 to match with mesh-tf version
        -        # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
        -        lm_logits = self.lm_head(hidden_states).to(torch.float32)
        -
        -        loss = None
        -        if labels is not None:
        -            # Shift so that tokens < n predict n
        -            shift_logits = lm_logits[..., :-1, :].contiguous()
        -            shift_labels = labels[..., 1:].contiguous()
        -            # Flatten the tokens
        -            loss_fct = CrossEntropyLoss()
        -            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
        -
        -            loss = loss.to(hidden_states.dtype)
        -
        -        if not return_dict:
        -            output = (lm_logits,) + transformer_outputs[1:]
        -            return ((loss,) + output) if loss is not None else output
        -
        -        return CausalLMOutputWithPast(
        -            loss=loss,
        -            logits=lm_logits,
        -            past_key_values=transformer_outputs.past_key_values,
        -            hidden_states=transformer_outputs.hidden_states,
        -            attentions=transformer_outputs.attentions,
        -        )
        -
        -    @staticmethod
        -    def _reorder_cache(
        -        past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
        -    ) -> Tuple[Tuple[torch.Tensor]]:
        -        """
        -        This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
        -        [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
        -        beam_idx at every generation step.
        -        """
        -        return tuple(
        -            tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
        -            for layer_past in past_key_values
        -        )
        diff --git a/spaces/tracinginsights/F1-analysis/app.py b/spaces/tracinginsights/F1-analysis/app.py
        deleted file mode 100644
        index ae84eacdbeb20c0d695322f4ee319923219a76dc..0000000000000000000000000000000000000000
        --- a/spaces/tracinginsights/F1-analysis/app.py
        +++ /dev/null
        @@ -1,68 +0,0 @@
        -from git import Repo
        -import os
        -import streamlit as st    
        -#import streamlit_analytics
        -from PIL import Image
        -
        -st.set_page_config(
        -    page_title="TRACING INSIGHTS",
        -    page_icon=None,
        -    layout="wide",
        -    initial_sidebar_state="expanded",
        -    # menu_items={
        -    #     'Get Help': 'https://www.extremelycoolapp.com/help',
        -    #     'Report a bug': "https://www.extremelycoolapp.com/bug",
        -    #     'About': "# This is a header. This is an *extremely* cool app!"
        -    # }
        -)
        -
        -GITHUB_PAT = os.environ['GITHUB']
        -
        -if not os.path.exists('repo_directory'):  
        -    Repo.clone_from(f'https://tracinginsights:{GITHUB_PAT}@github.com/TracingInsights/F1_analysis.git', 'repo_directory'  )
        -
        -from repo_directory.st_functions import st_button, load_css
        -
        -import fastf1
        -
        -fastf1.Cache.enable_cache('cache')
        -
        -
        -#streamlit_analytics.start_tracking()
        -#with streamlit_analytics.track():
        -load_css()
        -
        -col1, col2, col3 = st.columns(3)
        -col2.image(Image.open('Logo.png'))
        -
        -
        -st.header('Tracing Insights')
        -
        -st.info("Home for F1 data-driven analytics, pre-race predictions, post-race analysis, dank memes you won't find anywhere else")
        -
        -icon_size = 20
        -
        -
        -st_button('twitter', 'https://twitter.com/tracinginsights', 'Follow me on Twitter', icon_size)
        -st_button('newsletter', 'https://tracinginsights.substack.com/', 'Sign up for my Newsletter', icon_size)
        -st_button('', 'https://instagram.com/tracinginsights', 'Follow me on Instagram', icon_size)
        -st_button('', 'https://www.reddit.com/r/TracingInsights/', 'Join r/TracingInsights sub-reddit', icon_size)
        -st_button('', 'https://github.com/sponsors/TracingInsights', 'Best way to support me', icon_size)
        -st_button('', 'https://tracinginsights.creator-spring.com/', 'Shop Merch', icon_size)
        -st_button('', 'https://discord.gg/ZabvWFNQFz', 'Join Discord', icon_size)
        -st_button('youtube', 'https://www.youtube.com/@TracingInsights', 'Tracing Insights YouTube channel', icon_size)
        -st_button('cup', 'https://www.buymeacoffee.com/tracinginsights', 'Buy me a Coffee', icon_size)
        -st_button('', 'https://www.facebook.com/TracingInsights', 'Follow me on Facebook', icon_size)
        -st_button('', 'https://www.linkedin.com/company/tracinginsights', 'Follow me on LinkedIn', icon_size)
        -st_button('', 'https://www.patreon.com/tracinginsights', 'Patreon for exclusive content', icon_size)
        -st_button('', 'https://www.twitch.tv/tracinginsights', 'Follow me on Twitch', icon_size)
        -
        -
        -# st_button('', 'https://clubhouse.com/@tracinginsights', 'Follow me on ClubHouse', icon_size)
        -
        -st_button('', 'https://paypal.me/TracingInsights?country.x=IN&locale.x=en_GB', 'PayPal', icon_size)
        -
        -
        -st_button('', 'https://amazon.com/dp/B0BF2XK7Q6', 'Buy my Childrens book', icon_size)
        -
        -#streamlit_analytics.stop_tracking()
        \ No newline at end of file
        diff --git a/spaces/trl-lib/trl-text-environment/share_btn.py b/spaces/trl-lib/trl-text-environment/share_btn.py
        deleted file mode 100644
        index 2587a360a189c4cc488d23b48c3cf1ca7151b04c..0000000000000000000000000000000000000000
        --- a/spaces/trl-lib/trl-text-environment/share_btn.py
        +++ /dev/null
        @@ -1,112 +0,0 @@
        -community_icon_html = """"""
        -
        -loading_icon_html = """"""
        -
        -share_js = """async () => {
        -	async function uploadFile(file){
        -		const UPLOAD_URL = 'https://huggingface.co/uploads';
        -		const response = await fetch(UPLOAD_URL, {
        -			method: 'POST',
        -			headers: {
        -				'Content-Type': file.type,
        -				'X-Requested-With': 'XMLHttpRequest',
        -			},
        -			body: file, /// <- File inherits from Blob
        -		});
        -		const url = await response.text();
        -		return url;
        -	}
        -
        -	async function getInputImgFile(imgEl){
        -        const res = await fetch(imgEl.src);
        -        const blob = await res.blob();
        -        const imgId = Date.now() % 200;
        -        const isPng = imgEl.src.startsWith(`data:image/png`);
        -        if(isPng){
        -            const fileName = `sd-perception-${{imgId}}.png`;
        -            return new File([blob], fileName, { type: 'image/png' });
        -        }else{
        -            const fileName = `sd-perception-${{imgId}}.jpg`;
        -            return new File([blob], fileName, { type: 'image/jpeg' });
        -        }
        -	}
        -
        -    // const gradioEl = document.querySelector('body > gradio-app');
        -    const gradioEl = document.querySelector("gradio-app");
        -    const inputTxt = gradioEl.querySelector('#q-input textarea').value;
        -    let outputTxt = gradioEl.querySelector('#q-output .codemirror-wrapper .cm-scroller > div:nth-of-type(2)').innerText;
        -    outputTxt = `
        ${outputTxt}
        ` - - const titleLength = 150; - let titleTxt = inputTxt; - if(titleTxt.length > titleLength){ - titleTxt = titleTxt.slice(0, titleLength) + ' ...'; - } - - const shareBtnEl = gradioEl.querySelector('#share-btn'); - const shareIconEl = gradioEl.querySelector('#share-btn-share-icon'); - const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon'); - - if(!inputTxt || !outputTxt){ - return; - }; - - shareBtnEl.style.pointerEvents = 'none'; - shareIconEl.style.display = 'none'; - loadingIconEl.style.removeProperty('display'); - - const descriptionMd = `### Question: -${inputTxt} - -### Answer: - -${outputTxt}`; - - const params = { - title: titleTxt, - description: descriptionMd, - }; - - const paramsStr = Object.entries(params) - .map(([key, value]) => `${encodeURIComponent(key)}=${encodeURIComponent(value)}`) - .join('&'); - - window.open(`https://huggingface.co/spaces/bigcode/bigcode-playground/discussions/new?${paramsStr}`, '_blank'); - - shareBtnEl.style.removeProperty('pointer-events'); - shareIconEl.style.removeProperty('display'); - loadingIconEl.style.display = 'none'; -}""" - -share_btn_css = """ -a {text-decoration-line: underline; font-weight: 600;} -.animate-spin { - animation: spin 1s linear infinite; -} -@keyframes spin { - from { transform: rotate(0deg); } - to { transform: rotate(360deg); } -} -#share-btn-container { - display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; -} -#share-btn { - all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important; -} -#share-btn * { - all: unset; -} -#share-btn-container div:nth-child(-n+2){ - width: auto !important; - min-height: 0px !important; -} -#share-btn-container .wrap { - display: none !important; -} -""" \ No newline at end of file diff --git a/spaces/tsi-org/Faceswapper/roop/core.py b/spaces/tsi-org/Faceswapper/roop/core.py deleted file mode 100644 index aeb4c2a370942266f46c60938f8bc425460519f6..0000000000000000000000000000000000000000 --- a/spaces/tsi-org/Faceswapper/roop/core.py +++ /dev/null @@ -1,216 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -# os.environ["CUDA_VISIBLE_DEVICES"] = "" -# single thread doubles cuda performance - needs to be set before torch import -if any(arg.startswith('--execution-provider') for arg in sys.argv): - os.environ['OMP_NUM_THREADS'] = '1' -# reduce tensorflow log level -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' -import warnings -from typing import List -import platform -import signal -import shutil -import argparse -import torch -import onnxruntime -import tensorflow - -import roop.globals -import roop.metadata -import roop.ui as ui -from roop.predicter import predict_image, predict_video -from roop.processors.frame.core import get_frame_processors_modules -from roop.utilities import has_image_extension, is_image, is_video, detect_fps, create_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clean_temp, normalize_output_path - -if 'ROCMExecutionProvider' in roop.globals.execution_providers: - del torch - -warnings.filterwarnings('ignore', category=FutureWarning, module='insightface') -warnings.filterwarnings('ignore', category=UserWarning, module='torchvision') - - -def parse_args() -> None: - signal.signal(signal.SIGINT, lambda signal_number, frame: destroy()) - program = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=100)) - program.add_argument('-s', '--source', help='select an source image', dest='source_path') - program.add_argument('-t', '--target', help='select an target image or video', dest='target_path') - program.add_argument('-o', '--output', help='select output file or directory', dest='output_path') - program.add_argument('--frame-processor', help='frame processors (choices: face_swapper, face_enhancer, ...)', dest='frame_processor', default=['face_swapper'], nargs='+') - program.add_argument('--keep-fps', help='keep original fps', dest='keep_fps', action='store_true', default=True) - program.add_argument('--keep-audio', help='keep original audio', dest='keep_audio', action='store_true', default=True) - program.add_argument('--keep-frames', help='keep temporary frames', dest='keep_frames', action='store_true', default=False) - program.add_argument('--many-faces', help='process every face', dest='many_faces', action='store_true', default=False) - program.add_argument('--video-encoder', help='adjust output video encoder', dest='video_encoder', default='libx265', choices=['libx264', 'libx265', 'libvpx-vp9']) - program.add_argument('--video-quality', help='adjust output video quality', dest='video_quality', type=int, default=3, choices=range(52), metavar='[0-51]') - program.add_argument('--max-memory', help='maximum amount of RAM in GB', dest='max_memory', type=int, default=suggest_max_memory()) - program.add_argument('--execution-provider', help='available execution provider (choices: cpu, ...)', dest='execution_provider', default=['cpu'], choices=suggest_execution_providers(), nargs='+') - program.add_argument('--execution-threads', help='number of execution threads', dest='execution_threads', type=int, default=suggest_execution_threads()) - program.add_argument('-v', '--version', action='version', version=f'{roop.metadata.name} {roop.metadata.version}') - - args = program.parse_args() - - roop.globals.source_path = args.source_path - roop.globals.target_path = args.target_path - roop.globals.output_path = normalize_output_path(roop.globals.source_path, roop.globals.target_path, args.output_path) - roop.globals.frame_processors = args.frame_processor - roop.globals.headless = args.source_path or args.target_path or args.output_path - roop.globals.keep_fps = args.keep_fps - roop.globals.keep_audio = args.keep_audio - roop.globals.keep_frames = args.keep_frames - roop.globals.many_faces = args.many_faces - roop.globals.video_encoder = args.video_encoder - roop.globals.video_quality = args.video_quality - roop.globals.max_memory = args.max_memory - roop.globals.execution_providers = decode_execution_providers(args.execution_provider) - roop.globals.execution_threads = args.execution_threads - - -def encode_execution_providers(execution_providers: List[str]) -> List[str]: - return [execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers] - - -def decode_execution_providers(execution_providers: List[str]) -> List[str]: - return [provider for provider, encoded_execution_provider in zip(onnxruntime.get_available_providers(), encode_execution_providers(onnxruntime.get_available_providers())) - if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers)] - - -def suggest_max_memory() -> int: - if platform.system().lower() == 'darwin': - return 4 - return 16 - - -def suggest_execution_providers() -> List[str]: - return encode_execution_providers(onnxruntime.get_available_providers()) - - -def suggest_execution_threads() -> int: - if 'DmlExecutionProvider' in roop.globals.execution_providers: - return 1 - if 'ROCMExecutionProvider' in roop.globals.execution_providers: - return 1 - return 8 - - -def limit_resources() -> None: - # prevent tensorflow memory leak - gpus = tensorflow.config.experimental.list_physical_devices('GPU') - for gpu in gpus: - tensorflow.config.experimental.set_virtual_device_configuration(gpu, [ - tensorflow.config.experimental.VirtualDeviceConfiguration(memory_limit=1024) - ]) - # limit memory usage - if roop.globals.max_memory: - memory = roop.globals.max_memory * 1024 ** 3 - if platform.system().lower() == 'darwin': - memory = roop.globals.max_memory * 1024 ** 6 - if platform.system().lower() == 'windows': - import ctypes - kernel32 = ctypes.windll.kernel32 - kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory)) - else: - import resource - resource.setrlimit(resource.RLIMIT_DATA, (memory, memory)) - - -def release_resources() -> None: - if 'CUDAExecutionProvider' in roop.globals.execution_providers: - torch.cuda.empty_cache() - - -def pre_check() -> bool: - if sys.version_info < (3, 9): - update_status('Python version is not supported - please upgrade to 3.9 or higher.') - return False - if not shutil.which('ffmpeg'): - update_status('ffmpeg is not installed.') - return False - return True - - -def update_status(message: str, scope: str = 'ROOP.CORE') -> None: - print(f'[{scope}] {message}') - if not roop.globals.headless: - ui.update_status(message) - - -def start() -> None: - for frame_processor in get_frame_processors_modules(roop.globals.frame_processors): - if not frame_processor.pre_start(): - return - # process image to image - if has_image_extension(roop.globals.target_path): - if predict_image(roop.globals.target_path): - destroy() - shutil.copy2(roop.globals.target_path, roop.globals.output_path) - for frame_processor in get_frame_processors_modules(roop.globals.frame_processors): - update_status('Progressing...', frame_processor.NAME) - frame_processor.process_image(roop.globals.source_path, roop.globals.output_path, roop.globals.output_path) - frame_processor.post_process() - release_resources() - if is_image(roop.globals.target_path): - update_status('Processing to image succeed!') - else: - update_status('Processing to image failed!') - return - # process image to videos - if predict_video(roop.globals.target_path): - destroy() - update_status('Creating temp resources...') - create_temp(roop.globals.target_path) - update_status('Extracting frames...') - extract_frames(roop.globals.target_path) - temp_frame_paths = get_temp_frame_paths(roop.globals.target_path) - for frame_processor in get_frame_processors_modules(roop.globals.frame_processors): - update_status('Progressing...', frame_processor.NAME) - frame_processor.process_video(roop.globals.source_path, temp_frame_paths) - frame_processor.post_process() - release_resources() - # handles fps - if roop.globals.keep_fps: - update_status('Detecting fps...') - fps = detect_fps(roop.globals.target_path) - update_status(f'Creating video with {fps} fps...') - create_video(roop.globals.target_path, fps) - else: - update_status('Creating video with 30.0 fps...') - create_video(roop.globals.target_path) - # handle audio - if roop.globals.keep_audio: - if roop.globals.keep_fps: - update_status('Restoring audio...') - else: - update_status('Restoring audio might cause issues as fps are not kept...') - restore_audio(roop.globals.target_path, roop.globals.output_path) - else: - move_temp(roop.globals.target_path, roop.globals.output_path) - # clean and validate - clean_temp(roop.globals.target_path) - if is_video(roop.globals.target_path): - update_status('Processing to video succeed!') - else: - update_status('Processing to video failed!') - - -def destroy() -> None: - if roop.globals.target_path: - clean_temp(roop.globals.target_path) - quit() - - -def run() -> None: - parse_args() - if not pre_check(): - return - for frame_processor in get_frame_processors_modules(roop.globals.frame_processors): - if not frame_processor.pre_check(): - return - limit_resources() - if roop.globals.headless: - start() - else: - window = ui.init(start, destroy) - window.mainloop() diff --git a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/model_utils_torch/__init__.py b/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/model_utils_torch/__init__.py deleted file mode 100644 index 142ff464d390527ec98d815757ee96dc867e3add..0000000000000000000000000000000000000000 --- a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/model_utils_torch/__init__.py +++ /dev/null @@ -1,59 +0,0 @@ -''' -v1.5 -T5_RelativePositionEmbedding 允许调整的起始位置和缩放 -增加新的损失函数 weighted_and_neg_topk_cross_entropy -新学习率调整器 AlterCosineLrScheduler -新函数 get_optim_cur_lr - -破坏性更改 -修复算子名称 gen_sinusoidal_position_embedding 到 make_sinusoidal_position_embedding -修复算子名称 gen_nlp_self_attn_mask,gen_nlp_cross_attn_mask 到 make_nlp_self_attn_mask,make_nlp_cross_attn_mask -增加 make_sinusoidal_position_embedding 参数,改变固定周期1000到可调节的周期,默认是10000 - -v1.4.1 -新算子 make_sinusoidal_position_embedding,apply_rotary_position_embedding - -v1.4 -新层 FlashQuadSelfAttention,FlashQuadCrossAttention,T5_RelativePositionEmbedding -新算子 make_nlp_self_attn_mask,make_nlp_cross_attn_mask,find_closest_vector_by_L2,find_closest_vector_by_cos -新优化器 Adan - -v1.3.1 -引入losses包 - -v1.3 -改变optim的结构 -新增Adan优化器 - -v1.2 -对模块进行重构 - -v1.1 -过去的 -''' - -import torch -assert torch.__version__ >= '1.8.1' - -from . import acts -from . import ops -from . import layers -from . import blocks -from . import utils -from . import optim -from . import losses -from . import scheduler - -from .acts import * -from .ops import * -from .layers import * -from .blocks import * -from .utils import * -from .optim import * -from .losses import * -from .scheduler import * -from . import image -from . import rev - - -__version__ = '1.5' diff --git a/spaces/usbethFlerru/sovits-modelsV2/Download-Knjige-Na-Srpskom-Pdf-Free.md b/spaces/usbethFlerru/sovits-modelsV2/Download-Knjige-Na-Srpskom-Pdf-Free.md deleted file mode 100644 index b593446ffe5132595c0d6d9c41c0985442c277f6..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/Download-Knjige-Na-Srpskom-Pdf-Free.md +++ /dev/null @@ -1,100 +0,0 @@ -## download knjige na srpskom pdf free - - - - - - ![Download Knjige Na Srpskom Pdf Free](https://cdn.eso.org/images/thumb350x/eso1725a.jpg) - - - - - -**Click Here === [https://lomasmavi.blogspot.com/?c=2txT2V](https://lomasmavi.blogspot.com/?c=2txT2V)** - - - - - - - - - - - - - -# Download knjige na srpskom pdf free: kako pronaći i uživati u besplatnim elektronskim knjigama na srpskom jeziku - - - -Ako volite čitati knjige, ali nemate dovoljno vremena ili novca da ih kupujete, možda ste zainteresovani za mogućnost da ih preuzmete besplatno u pdf formatu. PDF je popularan format za elektronske knjige, jer je kompatibilan sa većinom uređaja i čitača, a omogućava i očuvanje izgleda i kvaliteta originalne knjige. - - - -Međutim, nije uvek lako pronaći knjige na srpskom jeziku u pdf formatu, pogotovo ako tražite neka specifična dela ili žanrove. Zato smo vam pripremili ovaj vodič koji će vam pomoći da otkrijete kako i gde možete download knjige na srpskom pdf free. - - - -## Gde možete download knjige na srpskom pdf free? - - - -Postoji nekoliko izvora na internetu koji nude besplatne elektronske knjige na srpskom jeziku u pdf formatu. Neki od njih su: - - - -- [Besplatne-knjige.com](https://www.besplatne-knjige.com/): Ovo je jedan od najvećih i najbolje organizovanih sajtova za besplatno preuzimanje knjiga na srpskom jeziku. Ovde možete pronaći preko 5000 naslova različitih žanrova i autora, od klasika do savremenih dela. Možete pretraživati knjige po abecednom redu, kategorijama, popularnosti ili oceni korisnika. Takođe možete čitati recenzije i komentare drugih čitalaca, kao i ostaviti svoje miÅ¡ljenje o pročitanim knjigama. - -- [Elektronskeknjige.com](https://www.elektronskeknjige.com/): Ovo je joÅ¡ jedan odličan sajt za download knjige na srpskom pdf free. Ovde možete pronaći preko 3000 naslova raznih domaćih i stranih autora, kao i nekoliko stotina časopisa i članaka. Možete pretraživati knjige po žanrovima, autorima, godini izdanja ili ključnim rečima. Takođe možete ocenjivati i komentarisati knjige, kao i pratiti novosti i akcije na sajtu. - -- [Scribd.com](https://www.scribd.com/): Ovo je jedan od najpoznatijih i najpopularnijih sajtova za deljenje dokumenata na svetu. Ovde možete pronaći milione dokumenata različitih vrsta i tema, uključujući i elektronske knjige na srpskom jeziku. Možete pretraživati dokumenata po ključnim rečima, kategorijama, jezicima ili autorima. Takođe možete postavljati svoje dokumente, pratiti druge korisnike, ocenjivati i komentarisati dokumente, kao i čitati ih online ili ih preuzeti u pdf formatu. - - - -## Kako uživati u besplatnim elektronskim knjigama na srpskom jeziku? - - - -Nakon Å¡to ste pronaÅ¡li i preuzeli željene knj - - - -ke na srpskom jeziku u pdf formatu, možete ih čitati na različitim uređajima i čitačima. Neki od najčešćih su: - - - -- Kompjuter: Možete koristiti bilo koji program koji podržava pdf format, kao Å¡to su Adobe Reader, Foxit Reader, Sumatra PDF ili Google Chrome. Samo otvorite fajl i uživajte u čitanju. - -- Telefon ili tablet: Možete koristiti bilo koju aplikaciju koja podržava pdf format, kao Å¡to su Adobe Acrobat Reader, Google Play Books, FBReader ili Moon+ Reader. Samo prebacite fajl na svoj uređaj i otvorite ga u aplikaciji. - -- E-čitač: Možete koristiti bilo koji e-čitač koji podržava pdf format, kao Å¡to su Kindle, Kobo, Nook ili PocketBook. Samo prebacite fajl na svoj e-čitač i otvorite ga u meniju. - - - -Ako želite da poboljÅ¡ate svoje iskustvo čitanja elektronskih knjiga na srpskom jeziku u pdf formatu, možete primeniti neke od ovih saveta: - - - -- Podesite veličinu i boju fonta, osvetljenje ekrana i orijentaciju stranice prema svojim potrebama i preferencijama. - -- Koristite opcije za zumiranje, skrolovanje, pretraživanje i obeležavanje teksta da biste lakÅ¡e pratili i pamtili sadržaj knjige. - -- Koristite opcije za dodavanje beleÅ¡ki, komentara, citata i oznaka da biste personalizovali i obogatili svoje čitanje. - -- Koristite opcije za sinhronizaciju, deljenje i preporučivanje knjiga da biste se povezali sa drugim čitaocima i otkrili nove knjige. - - - -## Zaključak - - - -Download knjige na srpskom pdf free je odličan način da proÅ¡irite svoju biblioteku i uživate u čitanju bez troÅ¡enja novca. Postoji mnogo sajtova koji nude besplatne elektronske knjige na srpskom jeziku u pdf formatu, a mi smo vam predstavili neke od najboljih. Takođe smo vam dali neke savete kako da pronađete i čitate knjige na različitim uređajima i čitačima. Nadamo se da vam je ovaj članak bio koristan i da ćete uskoro pronaći svoju sledeću omiljenu knjigu. - - dfd1c89656 - - - - - diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/vit/sam/model.py b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/vit/sam/model.py deleted file mode 100644 index 83861f4b9cafaf323a1a690b3443ffeb6c891fbd..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/vit/sam/model.py +++ /dev/null @@ -1,59 +0,0 @@ -# Ultralytics YOLO 🚀, AGPL-3.0 license -""" -SAM model interface -""" - -from ultralytics.yolo.cfg import get_cfg - -from ...yolo.utils.torch_utils import model_info -from .build import build_sam -from .predict import Predictor - - -class SAM: - - def __init__(self, model='sam_b.pt') -> None: - if model and not model.endswith('.pt') and not model.endswith('.pth'): - # Should raise AssertionError instead? - raise NotImplementedError('Segment anything prediction requires pre-trained checkpoint') - self.model = build_sam(model) - self.task = 'segment' # required - self.predictor = None # reuse predictor - - def predict(self, source, stream=False, **kwargs): - """Predicts and returns segmentation masks for given image or video source.""" - overrides = dict(conf=0.25, task='segment', mode='predict') - overrides.update(kwargs) # prefer kwargs - if not self.predictor: - self.predictor = Predictor(overrides=overrides) - self.predictor.setup_model(model=self.model) - else: # only update args if predictor is already setup - self.predictor.args = get_cfg(self.predictor.args, overrides) - return self.predictor(source, stream=stream) - - def train(self, **kwargs): - """Function trains models but raises an error as SAM models do not support training.""" - raise NotImplementedError("SAM models don't support training") - - def val(self, **kwargs): - """Run validation given dataset.""" - raise NotImplementedError("SAM models don't support validation") - - def __call__(self, source=None, stream=False, **kwargs): - """Calls the 'predict' function with given arguments to perform object detection.""" - return self.predict(source, stream, **kwargs) - - def __getattr__(self, attr): - """Raises error if object has no requested attribute.""" - name = self.__class__.__name__ - raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}") - - def info(self, detailed=False, verbose=True): - """ - Logs model info. - - Args: - detailed (bool): Show detailed information about model. - verbose (bool): Controls verbosity. - """ - return model_info(self.model, detailed=detailed, verbose=verbose) diff --git a/spaces/victor/tailwind-static-space/index.html b/spaces/victor/tailwind-static-space/index.html deleted file mode 100644 index 01be8428337be0dc5f7acf6bd427f769805c8196..0000000000000000000000000000000000000000 --- a/spaces/victor/tailwind-static-space/index.html +++ /dev/null @@ -1,71 +0,0 @@ - - - - - - Static Space - - - - -
        - -
        -
        -
        - -
        -
        -

        An advanced online playground for Tailwind CSS, including support for things like:

        -
          -
        • - - - - -

          - Customizing your - tailwind.config.js file -

          -
        • -
        • - - - - -

          - Extracting classes with - @apply -

          -
        • -
        • - - - - -

          Code completion with instant preview

          -
        • -
        -

        Perfect for learning how the framework works, prototyping a new idea, or creating a demo to share online.

        -
        -
        -

        Want to dig deeper into Tailwind?

        -

        - Read the docs → -

        -
        -
        -
        -
        -
        - - \ No newline at end of file diff --git a/spaces/vjain/AudioChat/README.md b/spaces/vjain/AudioChat/README.md deleted file mode 100644 index 2779e51be0d2819cd5b00ee9a4c2360e8863c8b9..0000000000000000000000000000000000000000 --- a/spaces/vjain/AudioChat/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: AudioChat -emoji: 🌍 -colorFrom: gray -colorTo: green -sdk: gradio -sdk_version: 3.28.3 -app_file: app.py -pinned: false -license: cc ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/configs/_base_/models/pointrend_r50.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/configs/_base_/models/pointrend_r50.py deleted file mode 100644 index 9d323dbf9466d41e0800aa57ef84045f3d874bdf..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/configs/_base_/models/pointrend_r50.py +++ /dev/null @@ -1,56 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='CascadeEncoderDecoder', - num_stages=2, - pretrained='open-mmlab://resnet50_v1c', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 1, 1), - strides=(1, 2, 2, 2), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=4), - decode_head=[ - dict( - type='FPNHead', - in_channels=[256, 256, 256, 256], - in_index=[0, 1, 2, 3], - feature_strides=[4, 8, 16, 32], - channels=128, - dropout_ratio=-1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - dict( - type='PointHead', - in_channels=[256], - in_index=[0], - channels=256, - num_fcs=3, - coarse_pred_each_layer=True, - dropout_ratio=-1, - num_classes=19, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) - ], - # model training and testing settings - train_cfg=dict( - num_points=2048, oversample_ratio=3, importance_sample_ratio=0.75), - test_cfg=dict( - mode='whole', - subdivision_steps=2, - subdivision_num_points=8196, - scale_factor=2)) diff --git a/spaces/walisonhs/stabilityai-stable-diffusion-2/README.md b/spaces/walisonhs/stabilityai-stable-diffusion-2/README.md deleted file mode 100644 index ae4c6e5e4e8829d1b4f6498bb7c61e0bc4450719..0000000000000000000000000000000000000000 --- a/spaces/walisonhs/stabilityai-stable-diffusion-2/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stabilityai Stable Diffusion 2 -emoji: 🏢 -colorFrom: pink -colorTo: green -sdk: gradio -sdk_version: 3.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/wendys-llc/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/groundingdino.py b/spaces/wendys-llc/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/groundingdino.py deleted file mode 100644 index 052df6220595a1b39b7e2aea37ca4872d113dfd2..0000000000000000000000000000000000000000 --- a/spaces/wendys-llc/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/groundingdino.py +++ /dev/null @@ -1,395 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Conditional DETR model and criterion classes. -# Copyright (c) 2021 Microsoft. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Modified from DETR (https://github.com/facebookresearch/detr) -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -# ------------------------------------------------------------------------ -# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) -# Copyright (c) 2020 SenseTime. All Rights Reserved. -# ------------------------------------------------------------------------ -import copy -from typing import List - -import torch -import torch.nn.functional as F -from torch import nn -from torchvision.ops.boxes import nms -from transformers import AutoTokenizer, BertModel, BertTokenizer, RobertaModel, RobertaTokenizerFast - -from groundingdino.util import box_ops, get_tokenlizer -from groundingdino.util.misc import ( - NestedTensor, - accuracy, - get_world_size, - interpolate, - inverse_sigmoid, - is_dist_avail_and_initialized, - nested_tensor_from_tensor_list, -) -from groundingdino.util.utils import get_phrases_from_posmap -from groundingdino.util.visualizer import COCOVisualizer -from groundingdino.util.vl_utils import create_positive_map_from_span - -from ..registry import MODULE_BUILD_FUNCS -from .backbone import build_backbone -from .bertwarper import ( - BertModelWarper, - generate_masks_with_special_tokens, - generate_masks_with_special_tokens_and_transfer_map, -) -from .transformer import build_transformer -from .utils import MLP, ContrastiveEmbed, sigmoid_focal_loss - - -class GroundingDINO(nn.Module): - """This is the Cross-Attention Detector module that performs object detection""" - - def __init__( - self, - backbone, - transformer, - num_queries, - aux_loss=False, - iter_update=False, - query_dim=2, - num_feature_levels=1, - nheads=8, - # two stage - two_stage_type="no", # ['no', 'standard'] - dec_pred_bbox_embed_share=True, - two_stage_class_embed_share=True, - two_stage_bbox_embed_share=True, - num_patterns=0, - dn_number=100, - dn_box_noise_scale=0.4, - dn_label_noise_ratio=0.5, - dn_labelbook_size=100, - text_encoder_type="bert-base-uncased", - sub_sentence_present=True, - max_text_len=256, - ): - """Initializes the model. - Parameters: - backbone: torch module of the backbone to be used. See backbone.py - transformer: torch module of the transformer architecture. See transformer.py - num_queries: number of object queries, ie detection slot. This is the maximal number of objects - Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. - aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. - """ - super().__init__() - self.num_queries = num_queries - self.transformer = transformer - self.hidden_dim = hidden_dim = transformer.d_model - self.num_feature_levels = num_feature_levels - self.nheads = nheads - self.max_text_len = 256 - self.sub_sentence_present = sub_sentence_present - - # setting query dim - self.query_dim = query_dim - assert query_dim == 4 - - # for dn training - self.num_patterns = num_patterns - self.dn_number = dn_number - self.dn_box_noise_scale = dn_box_noise_scale - self.dn_label_noise_ratio = dn_label_noise_ratio - self.dn_labelbook_size = dn_labelbook_size - - # bert - self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type) - self.bert = get_tokenlizer.get_pretrained_language_model(text_encoder_type) - self.bert.pooler.dense.weight.requires_grad_(False) - self.bert.pooler.dense.bias.requires_grad_(False) - self.bert = BertModelWarper(bert_model=self.bert) - - self.feat_map = nn.Linear(self.bert.config.hidden_size, self.hidden_dim, bias=True) - nn.init.constant_(self.feat_map.bias.data, 0) - nn.init.xavier_uniform_(self.feat_map.weight.data) - # freeze - - # special tokens - self.specical_tokens = self.tokenizer.convert_tokens_to_ids(["[CLS]", "[SEP]", ".", "?"]) - - # prepare input projection layers - if num_feature_levels > 1: - num_backbone_outs = len(backbone.num_channels) - input_proj_list = [] - for _ in range(num_backbone_outs): - in_channels = backbone.num_channels[_] - input_proj_list.append( - nn.Sequential( - nn.Conv2d(in_channels, hidden_dim, kernel_size=1), - nn.GroupNorm(32, hidden_dim), - ) - ) - for _ in range(num_feature_levels - num_backbone_outs): - input_proj_list.append( - nn.Sequential( - nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), - nn.GroupNorm(32, hidden_dim), - ) - ) - in_channels = hidden_dim - self.input_proj = nn.ModuleList(input_proj_list) - else: - assert two_stage_type == "no", "two_stage_type should be no if num_feature_levels=1 !!!" - self.input_proj = nn.ModuleList( - [ - nn.Sequential( - nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), - nn.GroupNorm(32, hidden_dim), - ) - ] - ) - - self.backbone = backbone - self.aux_loss = aux_loss - self.box_pred_damping = box_pred_damping = None - - self.iter_update = iter_update - assert iter_update, "Why not iter_update?" - - # prepare pred layers - self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share - # prepare class & box embed - _class_embed = ContrastiveEmbed() - - _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) - nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) - nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) - - if dec_pred_bbox_embed_share: - box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] - else: - box_embed_layerlist = [ - copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers) - ] - class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] - self.bbox_embed = nn.ModuleList(box_embed_layerlist) - self.class_embed = nn.ModuleList(class_embed_layerlist) - self.transformer.decoder.bbox_embed = self.bbox_embed - self.transformer.decoder.class_embed = self.class_embed - - # two stage - self.two_stage_type = two_stage_type - assert two_stage_type in ["no", "standard"], "unknown param {} of two_stage_type".format( - two_stage_type - ) - if two_stage_type != "no": - if two_stage_bbox_embed_share: - assert dec_pred_bbox_embed_share - self.transformer.enc_out_bbox_embed = _bbox_embed - else: - self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) - - if two_stage_class_embed_share: - assert dec_pred_bbox_embed_share - self.transformer.enc_out_class_embed = _class_embed - else: - self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) - - self.refpoint_embed = None - - self._reset_parameters() - - def _reset_parameters(self): - # init input_proj - for proj in self.input_proj: - nn.init.xavier_uniform_(proj[0].weight, gain=1) - nn.init.constant_(proj[0].bias, 0) - - def init_ref_points(self, use_num_queries): - self.refpoint_embed = nn.Embedding(use_num_queries, self.query_dim) - - def forward(self, samples: NestedTensor, targets: List = None, **kw): - """The forward expects a NestedTensor, which consists of: - - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels - - It returns a dict with the following elements: - - "pred_logits": the classification logits (including no-object) for all queries. - Shape= [batch_size x num_queries x num_classes] - - "pred_boxes": The normalized boxes coordinates for all queries, represented as - (center_x, center_y, width, height). These values are normalized in [0, 1], - relative to the size of each individual image (disregarding possible padding). - See PostProcess for information on how to retrieve the unnormalized bounding box. - - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of - dictionnaries containing the two above keys for each decoder layer. - """ - if targets is None: - captions = kw["captions"] - else: - captions = [t["caption"] for t in targets] - len(captions) - - # encoder texts - tokenized = self.tokenizer(captions, padding="longest", return_tensors="pt").to( - samples.device - ) - ( - text_self_attention_masks, - position_ids, - cate_to_token_mask_list, - ) = generate_masks_with_special_tokens_and_transfer_map( - tokenized, self.specical_tokens, self.tokenizer - ) - - if text_self_attention_masks.shape[1] > self.max_text_len: - text_self_attention_masks = text_self_attention_masks[ - :, : self.max_text_len, : self.max_text_len - ] - position_ids = position_ids[:, : self.max_text_len] - tokenized["input_ids"] = tokenized["input_ids"][:, : self.max_text_len] - tokenized["attention_mask"] = tokenized["attention_mask"][:, : self.max_text_len] - tokenized["token_type_ids"] = tokenized["token_type_ids"][:, : self.max_text_len] - - # extract text embeddings - if self.sub_sentence_present: - tokenized_for_encoder = {k: v for k, v in tokenized.items() if k != "attention_mask"} - tokenized_for_encoder["attention_mask"] = text_self_attention_masks - tokenized_for_encoder["position_ids"] = position_ids - else: - # import ipdb; ipdb.set_trace() - tokenized_for_encoder = tokenized - - bert_output = self.bert(**tokenized_for_encoder) # bs, 195, 768 - - encoded_text = self.feat_map(bert_output["last_hidden_state"]) # bs, 195, d_model - text_token_mask = tokenized.attention_mask.bool() # bs, 195 - # text_token_mask: True for nomask, False for mask - # text_self_attention_masks: True for nomask, False for mask - - if encoded_text.shape[1] > self.max_text_len: - encoded_text = encoded_text[:, : self.max_text_len, :] - text_token_mask = text_token_mask[:, : self.max_text_len] - position_ids = position_ids[:, : self.max_text_len] - text_self_attention_masks = text_self_attention_masks[ - :, : self.max_text_len, : self.max_text_len - ] - - text_dict = { - "encoded_text": encoded_text, # bs, 195, d_model - "text_token_mask": text_token_mask, # bs, 195 - "position_ids": position_ids, # bs, 195 - "text_self_attention_masks": text_self_attention_masks, # bs, 195,195 - } - - # import ipdb; ipdb.set_trace() - - if isinstance(samples, (list, torch.Tensor)): - samples = nested_tensor_from_tensor_list(samples) - features, poss = self.backbone(samples) - - srcs = [] - masks = [] - for l, feat in enumerate(features): - src, mask = feat.decompose() - srcs.append(self.input_proj[l](src)) - masks.append(mask) - assert mask is not None - if self.num_feature_levels > len(srcs): - _len_srcs = len(srcs) - for l in range(_len_srcs, self.num_feature_levels): - if l == _len_srcs: - src = self.input_proj[l](features[-1].tensors) - else: - src = self.input_proj[l](srcs[-1]) - m = samples.mask - mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0] - pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype) - srcs.append(src) - masks.append(mask) - poss.append(pos_l) - - input_query_bbox = input_query_label = attn_mask = dn_meta = None - hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer( - srcs, masks, input_query_bbox, poss, input_query_label, attn_mask, text_dict - ) - - # deformable-detr-like anchor update - outputs_coord_list = [] - for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate( - zip(reference[:-1], self.bbox_embed, hs) - ): - layer_delta_unsig = layer_bbox_embed(layer_hs) - layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig) - layer_outputs_unsig = layer_outputs_unsig.sigmoid() - outputs_coord_list.append(layer_outputs_unsig) - outputs_coord_list = torch.stack(outputs_coord_list) - - # output - outputs_class = torch.stack( - [ - layer_cls_embed(layer_hs, text_dict) - for layer_cls_embed, layer_hs in zip(self.class_embed, hs) - ] - ) - out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord_list[-1]} - - # # for intermediate outputs - # if self.aux_loss: - # out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord_list) - - # # for encoder output - # if hs_enc is not None: - # # prepare intermediate outputs - # interm_coord = ref_enc[-1] - # interm_class = self.transformer.enc_out_class_embed(hs_enc[-1], text_dict) - # out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord} - # out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal} - - return out - - @torch.jit.unused - def _set_aux_loss(self, outputs_class, outputs_coord): - # this is a workaround to make torchscript happy, as torchscript - # doesn't support dictionary with non-homogeneous values, such - # as a dict having both a Tensor and a list. - return [ - {"pred_logits": a, "pred_boxes": b} - for a, b in zip(outputs_class[:-1], outputs_coord[:-1]) - ] - - -@MODULE_BUILD_FUNCS.registe_with_name(module_name="groundingdino") -def build_groundingdino(args): - - backbone = build_backbone(args) - transformer = build_transformer(args) - - dn_labelbook_size = args.dn_labelbook_size - dec_pred_bbox_embed_share = args.dec_pred_bbox_embed_share - sub_sentence_present = args.sub_sentence_present - - model = GroundingDINO( - backbone, - transformer, - num_queries=args.num_queries, - aux_loss=True, - iter_update=True, - query_dim=4, - num_feature_levels=args.num_feature_levels, - nheads=args.nheads, - dec_pred_bbox_embed_share=dec_pred_bbox_embed_share, - two_stage_type=args.two_stage_type, - two_stage_bbox_embed_share=args.two_stage_bbox_embed_share, - two_stage_class_embed_share=args.two_stage_class_embed_share, - num_patterns=args.num_patterns, - dn_number=0, - dn_box_noise_scale=args.dn_box_noise_scale, - dn_label_noise_ratio=args.dn_label_noise_ratio, - dn_labelbook_size=dn_labelbook_size, - text_encoder_type=args.text_encoder_type, - sub_sentence_present=sub_sentence_present, - max_text_len=args.max_text_len, - ) - - return model diff --git a/spaces/wffcyrus/MetaGPT-v1/tests/metagpt/actions/mock.py b/spaces/wffcyrus/MetaGPT-v1/tests/metagpt/actions/mock.py deleted file mode 100644 index a800690e8e04e79375ac51cbbfe02390cd4e9f11..0000000000000000000000000000000000000000 --- a/spaces/wffcyrus/MetaGPT-v1/tests/metagpt/actions/mock.py +++ /dev/null @@ -1,512 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/18 23:51 -@Author : alexanderwu -@File : mock.py -""" - -PRD_SAMPLE = """## Original Requirements -The original requirement is to create a game similar to the classic text-based adventure game, Zork. - -## Product Goals -```python -product_goals = [ - "Create an engaging text-based adventure game", - "Ensure the game is easy to navigate and user-friendly", - "Incorporate compelling storytelling and puzzles" -] -``` - -## User Stories -```python -user_stories = [ - "As a player, I want to be able to easily input commands so that I can interact with the game world", - "As a player, I want to explore various rooms and locations to uncover the game's story", - "As a player, I want to solve puzzles to progress in the game", - "As a player, I want to interact with various in-game objects to enhance my gameplay experience", - "As a player, I want a game that challenges my problem-solving skills and keeps me engaged" -] -``` - -## Competitive Analysis -```python -competitive_analysis = [ - "Zork: The original text-based adventure game with complex puzzles and engaging storytelling", - "The Hitchhiker's Guide to the Galaxy: A text-based game with a unique sense of humor and challenging gameplay", - "Colossal Cave Adventure: The first text adventure game which set the standard for the genre", - "Quest: A platform that lets users create their own text adventure games", - "ChatGPT: An AI that can generate text-based adventure games", - "The Forest of Doom: A text-based game with a fantasy setting and multiple endings", - "Wizards Choice: A text-based game with RPG elements and a focus on player choice" -] -``` - -## Competitive Quadrant Chart -```mermaid -quadrantChart - title Reach and engagement of text-based adventure games - x-axis Low Reach --> High Reach - y-axis Low Engagement --> High Engagement - quadrant-1 High potential games - quadrant-2 Popular but less engaging games - quadrant-3 Less popular and less engaging games - quadrant-4 Popular and engaging games - "Zork": [0.9, 0.8] - "Hitchhiker's Guide": [0.7, 0.7] - "Colossal Cave Adventure": [0.8, 0.6] - "Quest": [0.4, 0.5] - "ChatGPT": [0.3, 0.6] - "Forest of Doom": [0.5, 0.4] - "Wizards Choice": [0.6, 0.5] - "Our Target Product": [0.5, 0.6] -``` - -## Requirement Analysis -The goal is to create a text-based adventure game similar to Zork. The game should be engaging, user-friendly, and feature compelling storytelling and puzzles. It should allow players to explore various rooms and locations, interact with in-game objects, and solve puzzles to progress. The game should also challenge players' problem-solving skills and keep them engaged. - -## Requirement Pool -```python -requirement_pool = [ - ("Design an intuitive command input system for player interactions", "P0"), - ("Create a variety of rooms and locations for players to explore", "P0"), - ("Develop engaging puzzles that players need to solve to progress", "P0"), - ("Incorporate a compelling story that unfolds as players explore the game world", "P1"), - ("Ensure the game is user-friendly and easy to navigate", "P1") -] -``` - -## Anything UNCLEAR -The original requirement did not specify the platform for the game (web, mobile, desktop) or any specific features or themes for the game's story and puzzles. More information on these aspects could help in further refining the product requirements and design. -""" - -DESIGN_LLM_KB_SEARCH_SAMPLE = """## Implementation approach: - -The game will be developed as a console application in Python, which will allow it to be platform-independent. The game logic will be implemented using Object Oriented Programming principles. - -The game will consist of different "rooms" or "locations" that the player can navigate. Each room will have different objects and puzzles that the player can interact with. The player's progress in the game will be determined by their ability to solve these puzzles. - -Python's in-built data structures like lists and dictionaries will be used extensively to manage the game state, player inventory, room details, etc. - -For testing, we can use the PyTest framework. This is a mature full-featured Python testing tool that helps you write better programs. - -## Python package name: -```python -"adventure_game" -``` - -## File list: -```python -file_list = ["main.py", "room.py", "player.py", "game.py", "object.py", "puzzle.py", "test_game.py"] -``` - -## Data structures and interface definitions: -```mermaid -classDiagram - class Room{ - +__init__(self, description: str, objects: List[Object]) - +get_description(self) -> str - +get_objects(self) -> List[Object] - } - class Player{ - +__init__(self, current_room: Room, inventory: List[Object]) - +move(self, direction: str) -> None - +get_current_room(self) -> Room - +get_inventory(self) -> List[Object] - } - class Object{ - +__init__(self, name: str, description: str, is_usable: bool) - +get_name(self) -> str - +get_description(self) -> str - +is_usable(self) -> bool - } - class Puzzle{ - +__init__(self, question: str, answer: str, reward: Object) - +ask_question(self) -> str - +check_answer(self, player_answer: str) -> bool - +get_reward(self) -> Object - } - class Game{ - +__init__(self, player: Player) - +start(self) -> None - +end(self) -> None - } - Room "1" -- "*" Object - Player "1" -- "1" Room - Player "1" -- "*" Object - Puzzle "1" -- "1" Object - Game "1" -- "1" Player -``` - -## Program call flow: -```mermaid -sequenceDiagram - participant main as main.py - participant Game as Game - participant Player as Player - participant Room as Room - main->>Game: Game(player) - Game->>Player: Player(current_room, inventory) - Player->>Room: Room(description, objects) - Game->>Game: start() - Game->>Player: move(direction) - Player->>Room: get_description() - Game->>Player: get_inventory() - Game->>Game: end() -``` - -## Anything UNCLEAR: -The original requirements did not specify whether the game should have a save/load feature, multiplayer support, or any specific graphical user interface. More information on these aspects could help in further refining the product design and requirements. -""" - - -PROJECT_MANAGEMENT_SAMPLE = '''## Required Python third-party packages: Provided in requirements.txt format -```python -"pytest==6.2.5" -``` - -## Required Other language third-party packages: Provided in requirements.txt format -```python -``` - -## Full API spec: Use OpenAPI 3.0. Describe all APIs that may be used by both frontend and backend. -```python -""" -This project is a console-based application and doesn't require any API endpoints. All interactions will be done through the console interface. -""" -``` - -## Logic Analysis: Provided as a Python list[str, str]. the first is filename, the second is class/method/function should be implemented in this file. Analyze the dependencies between the files, which work should be done first -```python -[ - ("object.py", "Object"), - ("room.py", "Room"), - ("player.py", "Player"), - ("puzzle.py", "Puzzle"), - ("game.py", "Game"), - ("main.py", "main"), - ("test_game.py", "test_game") -] -``` - -## Task list: Provided as Python list[str]. Each str is a filename, the more at the beginning, the more it is a prerequisite dependency, should be done first -```python -[ - "object.py", - "room.py", - "player.py", - "puzzle.py", - "game.py", - "main.py", - "test_game.py" -] -``` - -## Shared Knowledge: Anything that should be public like utils' functions, config's variables details that should make clear first. -```python -""" -Shared knowledge for this project includes understanding the basic principles of Object Oriented Programming, Python's built-in data structures like lists and dictionaries, and the PyTest framework for testing. -""" -``` - -## Anything UNCLEAR: Provide as Plain text. Make clear here. For example, don't forget a main entry. don't forget to init 3rd party libs. -```python -""" -The original requirements did not specify whether the game should have a save/load feature, multiplayer support, or any specific graphical user interface. More information on these aspects could help in further refining the product design and requirements. -""" -``` -''' - - -WRITE_CODE_PROMPT_SAMPLE = """ -你是一个工程师。下面是背景信息与你的当前任务,请为任务撰写代码。 -撰写的代码应该符合PEP8,优雅,模块化,易于阅读与维护,代码本身应该有__main__入口来防止桩函数 - -## 用户编写程序所需的全部、详尽的文件路径列表(只需要相对路径,并不需要前缀,组织形式应该符合PEP规范) - -- `main.py`: 主程序文件 -- `search_engine.py`: 搜索引擎实现文件 -- `knowledge_base.py`: 知识库管理文件 -- `user_interface.py`: 用户界面文件 -- `data_import.py`: 数据导入功能文件 -- `data_export.py`: 数据导出功能文件 -- `utils.py`: 工具函数文件 - -## 数据结构 - -- `KnowledgeBase`: 知识库类,用于管理私有知识库的内容、分类、标签和关键词。 -- `SearchEngine`: 搜索引擎类,基于大语言模型,用于对用户输入的关键词或短语进行语义理解,并提供准确的搜索结果。 -- `SearchResult`: 搜索结果类,包含与用户搜索意图相关的知识库内容的相关信息。 -- `UserInterface`: 用户界面类,提供简洁、直观的用户界面,支持多种搜索方式和搜索结果的排序和过滤。 -- `DataImporter`: 数据导入类,支持多种数据格式的导入功能,用于将外部数据导入到知识库中。 -- `DataExporter`: 数据导出类,支持多种数据格式的导出功能,用于将知识库内容进行备份和分享。 - -## API接口 - -- `KnowledgeBase`类接口: - - `add_entry(entry: str, category: str, tags: List[str], keywords: List[str]) -> bool`: 添加知识库条目。 - - `delete_entry(entry_id: str) -> bool`: 删除知识库条目。 - - `update_entry(entry_id: str, entry: str, category: str, tags: List[str], keywords: List[str]) -> bool`: 更新知识库条目。 - - `search_entries(query: str) -> List[str]`: 根据查询词搜索知识库条目。 - -- `SearchEngine`类接口: - - `search(query: str) -> SearchResult`: 根据用户查询词进行搜索,返回与查询意图相关的搜索结果。 - -- `UserInterface`类接口: - - `display_search_results(results: List[SearchResult]) -> None`: 显示搜索结果。 - - `filter_results(results: List[SearchResult], filters: Dict[str, Any]) -> List[SearchResult]`: 根据过滤条件对搜索结果进行过滤。 - - `sort_results(results: List[SearchResult], key: str, reverse: bool = False) -> List[SearchResult]`: 根据指定的键对搜索结果进行排序。 - -- `DataImporter`类接口: - - `import_data(file_path: str) -> bool`: 导入外部数据到知识库。 - -- `DataExporter`类接口: - - `export_data(file_path: str) -> bool`: 导出知识库数据到外部文件。 - -## 调用流程(以dot语言描述) - -```dot -digraph call_flow { - rankdir=LR; - - subgraph cluster_user_program { - label="User Program"; - style=dotted; - - main_py -> search_engine_py; - main_py -> knowledge_base_py; - main_py -> user_interface_py; - main_py -> data_import_py; - main_py -> data_export_py; - - search_engine_py -> knowledge_base_py; - search_engine_py -> user_interface_py; - - user_interface_py -> knowledge_base_py; - user_interface_py -> search_engine_py; - - data_import_py -> knowledge_base_py; - data_import_py -> user_interface_py; - - data_export_py -> knowledge_base_py; - data_export_py -> user_interface_py; - } - - main_py [label="main.py"]; - search_engine_py [label="search_engine.py"]; - knowledge_base_py [label="knowledge_base.py"]; - user_interface_py [label="user_interface.py"]; - data_import_py [label="data_import.py"]; - data_export_py [label="data_export.py"]; -} -``` - -这是一个简化的调用流程图,展示了各个模块之间的调用关系。用户程序的`main.py`文件通过调用其他模块实现搜索引擎的功能。`search_engine.py`模块与`knowledge_base.py`和`user_interface.py`模块进行交互,实现搜索算法和搜索结果的展示。`data_import.py`和`data_export.py`模块与`knowledge_base.py`和`user_interface.py`模块进行交互,实现数据导入和导出的功能。用户界面模块`user_interface.py`与其他模块进行交互,提供简洁、直观的用户界面,并支持搜索方式、排序和过滤等操作。 - -## 当前任务 - -""" - -TASKS = [ - "添加数据API:接受用户输入的文档库,对文档库进行索引\n- 使用MeiliSearch连接并添加文档库", - "搜索API:接收用户输入的关键词,返回相关的搜索结果\n- 使用MeiliSearch连接并使用接口获得对应数据", - "多条件筛选API:接收用户选择的筛选条件,返回符合条件的搜索结果。\n- 使用MeiliSearch进行筛选并返回符合条件的搜索结果", - "智能推荐API:根据用户的搜索历史记录和搜索行为,推荐相关的搜索结果。" -] - -TASKS_2 = [ - "完成main.py的功能" -] - -SEARCH_CODE_SAMPLE = """ -import requests - - -class SearchAPI: - def __init__(self, elastic_search_url): - self.elastic_search_url = elastic_search_url - - def search(self, keyword): - # 构建搜索请求的参数 - params = { - 'q': keyword, - 'size': 10 # 返回结果数量 - } - - try: - # 发送搜索请求 - response = requests.get(self.elastic_search_url, params=params) - if response.status_code == 200: - # 解析搜索结果 - search_results = response.json() - formatted_results = self.format_results(search_results) - return formatted_results - else: - print('Error: Failed to retrieve search results.') - except requests.exceptions.RequestException as e: - print(f'Error: {e}') - - def format_results(self, search_results): - formatted_results = [] - hits = search_results.get('hits', {}).get('hits', []) - for hit in hits: - result = hit.get('_source', {}) - title = result.get('title', '') - summary = result.get('summary', '') - url = result.get('url', '') - formatted_results.append({ - 'title': title, - 'summary': summary, - 'url': url - }) - return formatted_results - - -if __name__ == '__main__': - # 使用示例 - elastic_search_url = 'http://localhost:9200/search' - search_api = SearchAPI(elastic_search_url) - keyword = input('Enter search keyword: ') - results = search_api.search(keyword) - if results: - for result in results: - print(result) - else: - print('No results found.') -""" - - -REFINED_CODE = ''' -import requests - - -class SearchAPI: - def __init__(self, elastic_search_url): - """ - 初始化SearchAPI对象。 - - Args: - elastic_search_url (str): ElasticSearch的URL。 - """ - self.elastic_search_url = elastic_search_url - - def search(self, keyword, size=10): - """ - 搜索关键词并返回相关的搜索结果。 - - Args: - keyword (str): 用户输入的搜索关键词。 - size (int): 返回结果数量,默认为10。 - - Returns: - list: 包含搜索结果的列表,每个结果是一个字典,包含标题、摘要和URL等信息。如果没有搜索结果,返回一个空列表。 - """ - # 构建搜索请求的参数 - params = { - 'q': keyword, - 'size': size - } - - try: - # 发送搜索请求 - response = requests.get(self.elastic_search_url, params=params) - response.raise_for_status() - # 解析搜索结果 - search_results = response.json() - formatted_results = self.format_results(search_results) - return formatted_results - except requests.exceptions.RequestException as e: - print(f'Error: {e}') - return None - - def format_results(self, search_results): - """ - 格式化搜索结果。 - - Args: - search_results (dict): ElasticSearch返回的搜索结果。 - - Returns: - list: 包含格式化搜索结果的列表,每个结果是一个字典,包含标题、摘要和URL等信息。如果搜索结果为空,返回None。 - """ - if not isinstance(search_results, dict): - return None - - formatted_results = [] - hits = search_results.get('hits', {}).get('hits', []) - for hit in hits: - result = hit.get('_source', {}) - title = result.get('title', '') - summary = result.get('summary', '') - url = result.get('url', '') - formatted_results.append({ - 'title': title, - 'summary': summary, - 'url': url - }) - return formatted_results if formatted_results else None - - -if __name__ == '__main__': - # 使用示例 - elastic_search_url = 'http://localhost:9200/search' - search_api = SearchAPI(elastic_search_url) - keyword = input('Enter search keyword: ') - results = search_api.search(keyword) - if results: - for result in results: - print(result) - else: - print('No results found.') -''' - -MEILI_CODE = '''import meilisearch -from typing import List - - -class DataSource: - def __init__(self, name: str, url: str): - self.name = name - self.url = url - - -class SearchEngine: - def __init__(self): - self.client = meilisearch.Client('http://localhost:7700') # MeiliSearch服务器的URL - - def add_documents(self, data_source: DataSource, documents: List[dict]): - index_name = f"{data_source.name}_index" - index = self.client.get_or_create_index(index_name) - index.add_documents(documents) - - -# 示例用法 -if __name__ == '__main__': - search_engine = SearchEngine() - - # 假设有一个名为"books"的数据源,包含要添加的文档库 - books_data_source = DataSource(name='books', url='https://example.com/books') - - # 假设有一个名为"documents"的文档库,包含要添加的文档 - documents = [ - {"id": 1, "title": "Book 1", "content": "This is the content of Book 1."}, - {"id": 2, "title": "Book 2", "content": "This is the content of Book 2."}, - # 其他文档... - ] - - # 添加文档库到搜索引擎 - search_engine.add_documents(books_data_source, documents) -''' - -MEILI_ERROR = '''/usr/local/bin/python3.9 /Users/alexanderwu/git/metagpt/examples/search/meilisearch_index.py -Traceback (most recent call last): - File "/Users/alexanderwu/git/metagpt/examples/search/meilisearch_index.py", line 44, in - search_engine.add_documents(books_data_source, documents) - File "/Users/alexanderwu/git/metagpt/examples/search/meilisearch_index.py", line 25, in add_documents - index = self.client.get_or_create_index(index_name) -AttributeError: 'Client' object has no attribute 'get_or_create_index' - -Process finished with exit code 1''' - -MEILI_CODE_REFINED = """ -""" diff --git a/spaces/williambr/CarePlanSOTAQnA/README.md b/spaces/williambr/CarePlanSOTAQnA/README.md deleted file mode 100644 index b2d0dd0e6b25b53ffcdb52924cd834b7586b52d4..0000000000000000000000000000000000000000 --- a/spaces/williambr/CarePlanSOTAQnA/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: CarePlanSOTAQnA -emoji: 👀 -colorFrom: gray -colorTo: blue -sdk: gradio -sdk_version: 3.1.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/xc9/VITS-Umamusume-voice-synthesizer/transforms.py b/spaces/xc9/VITS-Umamusume-voice-synthesizer/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/xc9/VITS-Umamusume-voice-synthesizer/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/xdecoder/Demo/xdecoder/modules/__init__.py b/spaces/xdecoder/Demo/xdecoder/modules/__init__.py deleted file mode 100644 index 6bbbff85221d3e15d34b52f69706896896c47ef3..0000000000000000000000000000000000000000 --- a/spaces/xdecoder/Demo/xdecoder/modules/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .position_encoding import * -from .attention import * -from .postprocessing import * \ No newline at end of file diff --git a/spaces/xiaomifan/anime-remove-background/README.md b/spaces/xiaomifan/anime-remove-background/README.md deleted file mode 100644 index 1ba3cb5ea0e994e246d57b7d62b8aa5a6331901c..0000000000000000000000000000000000000000 --- a/spaces/xiaomifan/anime-remove-background/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Anime Remove Background -emoji: 🪄🖼️ -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: skytnt/anime-remove-background ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/xl2533/MakeInstruction/self/generate.py b/spaces/xl2533/MakeInstruction/self/generate.py deleted file mode 100644 index f2f727d443e624042c6032ada18527e81e8e54ea..0000000000000000000000000000000000000000 --- a/spaces/xl2533/MakeInstruction/self/generate.py +++ /dev/null @@ -1,188 +0,0 @@ -# -*-coding:utf-8 -*- -import os -os.environ["TOKENIZERS_PARALLELISM"] = "false" -import re -import numpy as np -import os -import json -import random -from self.prompt import self_prompt, gen_few_shot_prompt -from rouge_score import rouge_scorer - -from langchain.prompts import PromptTemplate -from functools import partial -from langchain.chains.llm import LLMChain -from langchain.llms import OpenAI -from multiprocessing import Pool - - -def is_all_chinese(strs): - for _char in strs: - if not '\u4e00' <= _char <= '\u9fa5': - return False - return True - - -class ChineseTokenizer(): - def tokenize(self, text): - tokens = [i.strip() for i in text if i.strip()] - return tokens - - -class SELF(object): - n_instance = 3 # 指令不够样本来凑,每个指令加入多个instance - prefix = "{id}. 指令:" - blacklist = ['图片', '图像', '文件', '作图', '绘画', '视频', '音频', '音乐', '流程图'] - - def __init__(self, seed_file, openai_key, n_human, n_machine, n_instruct, prompt): - self.llm = OpenAI(openai_api_key=openai_key, temperature=1, - stop=[f'\n{n_instruct}', '{n_instruct}', '{n_instruct}.'], # 当已生成足够的指令则停止 - logit_bias={'50259': -100}, # 不生成最后的停止符# - max_tokens=-1 - ) # 默认davinci-003 - self.n_human, self.n_machine, self.n_instruct = n_human, n_machine, n_instruct - self.n_gen, self.n_keep = 0, 0 - self.human_instruction_data = [] - self.machine_instruction_data = [] - self.scorer = None # rougeL用于文本相似度计算 - self.all_instruction_tokens = [] # 全部指令,用于新指令消重 - self.all_instruction = [] # 全部指令,用于新指令消重 - self.sample_few_shot = None #每轮few-shot采样填充成prompt,for gradio - self.load_seed_task(seed_file) - self.init(prompt) - - def load_seed_task(self, seed_file): - instruction_data = [] - with open(seed_file, 'r', encoding='UTF8') as f: - for i in f.readlines(): - tmp = json.loads(i) - for j in range(min(len(tmp['instances']), SELF.n_instance)): - instruction_data.append({'instruction': tmp['instruction'], - 'input': tmp['instances'][j]['input'], - 'output': tmp['instances'][j]['output']}) - self.human_instruction_data = instruction_data - - def init(self, prompt): - if not prompt: - prompt = self_prompt - self.chain = LLMChain(llm=self.llm, prompt=PromptTemplate.from_template(prompt)) - self.scorer = rouge_scorer.RougeScorer(['rougeL'], use_stemmer=False, tokenizer=ChineseTokenizer()) - self.all_instruction = [i['instruction'] for i in self.human_instruction_data + self.machine_instruction_data] - self.all_instruction_tokens = [self.scorer._tokenizer.tokenize(i) for i in - self.all_instruction] - - @property - def first_id(self): - # 第一个机器生成的指令id,前面是few-shot样例 - return int(self.n_human + min(self.n_machine, len(self.machine_instruction_data)) + 1) - - def generate(self): - """ - 新指令生成 - 1. 采样few-shot[n_human + n_machine] - 2. 生成指令 - 3. 解析模型结果得到新的指令样本 - """ - # sample - seed_sample = random.sample(self.human_instruction_data, self.n_human) - machine_sample = random.sample(self.machine_instruction_data, - min(self.n_machine, len(self.machine_instruction_data))) - # build few-shot - self.sample_few_shot = gen_few_shot_prompt(seed_sample + machine_sample) - # generate - result = self.chain({'few_shot': self.sample_few_shot + SELF.prefix.format(id=self.first_id), # 待生成指令id - 'n_instruct': self.n_instruct}) - return result - - def decode_response(self, response): - if response is None: - return [] - if '###' not in response['text']: - return [] - raw_instruct = SELF.prefix.format(id=self.first_id) + response['text'] - raw_instruct = raw_instruct.split('###') - instruction_data = [] - - for id, inst in enumerate(raw_instruct): - # 因为超长停止的最后一个指令往往被阶段,这里直接丢弃 - if id == len(raw_instruct) and response['finish_reason'] == 'length': - continue - - splitted_data = re.split(f"{id + self.first_id}\.\s+(指令|输入|输出):", inst) - if len(splitted_data) != 7: - continue # 生成部分缺失或格式错误 - else: - inst = splitted_data[2].strip() - input = splitted_data[4].strip() - input = "" if input.lower() == '<无输入>' else input - output = splitted_data[6].strip() - - print({'instruction': inst, 'input': input, 'output': output}) - # 过滤过长,过断的指令 - if len(inst) <= 3 or len(inst) >= 100: - continue - - # 过滤有疑似模型无法执行的指令 - if any((i in inst for i in SELF.blacklist)): - continue - - # 如果指令开头并非中文 - if not is_all_chinese(inst[:3]): - continue - - instruction_data.append({'instruction': inst, 'input': input, 'output': output}) - return instruction_data - - def sim_filter(self, instruction_data): - ## 过滤和已有指令池相似度过高的指令,保证多样性, 使用Rouge-L最长公共子串 - keep_instruction = [] - for inst in instruction_data: - inst_tokens = self.scorer._tokenizer.tokenize(inst['instruction']) - with Pool(os.cpu_count()) as p: - rouge_scores = p.map(partial(rouge_scorer._score_lcs, inst_tokens), self.all_instruction_tokens) - rouge_l = [score.fmeasure for score in rouge_scores] - print(rouge_scores) - print(rouge_l) - top10_sim_inst = { - self.all_instruction[i]: rouge_l[i] for i in np.argsort(rouge_l)[-10:][::-1] - } - print(top10_sim_inst) - if max(rouge_l) > 0.7: - continue - inst['most_similar_instructions'] = top10_sim_inst - inst['avg_similarity_score'] = float(np.mean(rouge_l)) - self.all_instruction.append(inst['instruction']) - self.all_instruction_tokens.append(inst_tokens) - keep_instruction.append(inst) - return keep_instruction - - def step(self): - response = self.generate() - new_instruct_data = self.decode_response(response) - keep_instruct_data = self.sim_filter(new_instruct_data) - self.n_gen += len(new_instruct_data) - self.n_keep += len(keep_instruct_data) - self.machine_instruction_data += keep_instruct_data - return keep_instruct_data # for gradio output only - - def dump_file(self, output_file): - with open(output_file, 'w', encoding='UTF8') as f: - for i in self.machine_instruction_data: - f.write(json.dumps(i, ensure_ascii=False) + '\n') - - -# Only Used for gradio display -def init_instance(seed_file, openai_key, n_human, n_machine, n_instruct, prompt): - # 允许用户输入prompt修改前缀指令命令 - if not prompt: - prompt = self_prompt - self_instance = SELF(seed_file.name, openai_key, n_human, n_machine, n_instruct, prompt) - return self_instance - - -def generate_instruction(self_instance): - keep_instruct_data = self_instance.step() - - return (self_instance.sample_few_shot, - json.dumps(keep_instruct_data, ensure_ascii=False), - f'已生成{self_instance.n_gen} 可用{self_instance.n_keep}') diff --git a/spaces/yeshpanovrustem/ner-kazakh/README.md b/spaces/yeshpanovrustem/ner-kazakh/README.md deleted file mode 100644 index 87deb81e3da6820667bf8c5e4b237a46be330807..0000000000000000000000000000000000000000 --- a/spaces/yeshpanovrustem/ner-kazakh/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Ner Kazakh -emoji: 🏢 -colorFrom: yellow -colorTo: pink -sdk: streamlit -sdk_version: 1.28.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/GroundingDINO/groundingdino/datasets/transforms.py b/spaces/yizhangliu/Grounded-Segment-Anything/GroundingDINO/groundingdino/datasets/transforms.py deleted file mode 100644 index 915125c9054daada5cb7eb27c04999022333339b..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/GroundingDINO/groundingdino/datasets/transforms.py +++ /dev/null @@ -1,311 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Transforms and data augmentation for both image + bbox. -""" -import os -import random - -import PIL -import torch -import torchvision.transforms as T -import torchvision.transforms.functional as F - -from GroundingDINO.groundingdino.util.box_ops import box_xyxy_to_cxcywh -from GroundingDINO.groundingdino.util.misc import interpolate - - -def crop(image, target, region): - cropped_image = F.crop(image, *region) - - target = target.copy() - i, j, h, w = region - - # should we do something wrt the original size? - target["size"] = torch.tensor([h, w]) - - fields = ["labels", "area", "iscrowd", "positive_map"] - - if "boxes" in target: - boxes = target["boxes"] - max_size = torch.as_tensor([w, h], dtype=torch.float32) - cropped_boxes = boxes - torch.as_tensor([j, i, j, i]) - cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) - cropped_boxes = cropped_boxes.clamp(min=0) - area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) - target["boxes"] = cropped_boxes.reshape(-1, 4) - target["area"] = area - fields.append("boxes") - - if "masks" in target: - # FIXME should we update the area here if there are no boxes? - target["masks"] = target["masks"][:, i : i + h, j : j + w] - fields.append("masks") - - # remove elements for which the boxes or masks that have zero area - if "boxes" in target or "masks" in target: - # favor boxes selection when defining which elements to keep - # this is compatible with previous implementation - if "boxes" in target: - cropped_boxes = target["boxes"].reshape(-1, 2, 2) - keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1) - else: - keep = target["masks"].flatten(1).any(1) - - for field in fields: - if field in target: - target[field] = target[field][keep] - - if os.environ.get("IPDB_SHILONG_DEBUG", None) == "INFO": - # for debug and visualization only. - if "strings_positive" in target: - target["strings_positive"] = [ - _i for _i, _j in zip(target["strings_positive"], keep) if _j - ] - - return cropped_image, target - - -def hflip(image, target): - flipped_image = F.hflip(image) - - w, h = image.size - - target = target.copy() - if "boxes" in target: - boxes = target["boxes"] - boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor( - [w, 0, w, 0] - ) - target["boxes"] = boxes - - if "masks" in target: - target["masks"] = target["masks"].flip(-1) - - return flipped_image, target - - -def resize(image, target, size, max_size=None): - # size can be min_size (scalar) or (w, h) tuple - - def get_size_with_aspect_ratio(image_size, size, max_size=None): - w, h = image_size - if max_size is not None: - min_original_size = float(min((w, h))) - max_original_size = float(max((w, h))) - if max_original_size / min_original_size * size > max_size: - size = int(round(max_size * min_original_size / max_original_size)) - - if (w <= h and w == size) or (h <= w and h == size): - return (h, w) - - if w < h: - ow = size - oh = int(size * h / w) - else: - oh = size - ow = int(size * w / h) - - return (oh, ow) - - def get_size(image_size, size, max_size=None): - if isinstance(size, (list, tuple)): - return size[::-1] - else: - return get_size_with_aspect_ratio(image_size, size, max_size) - - size = get_size(image.size, size, max_size) - rescaled_image = F.resize(image, size) - - if target is None: - return rescaled_image, None - - ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) - ratio_width, ratio_height = ratios - - target = target.copy() - if "boxes" in target: - boxes = target["boxes"] - scaled_boxes = boxes * torch.as_tensor( - [ratio_width, ratio_height, ratio_width, ratio_height] - ) - target["boxes"] = scaled_boxes - - if "area" in target: - area = target["area"] - scaled_area = area * (ratio_width * ratio_height) - target["area"] = scaled_area - - h, w = size - target["size"] = torch.tensor([h, w]) - - if "masks" in target: - target["masks"] = ( - interpolate(target["masks"][:, None].float(), size, mode="nearest")[:, 0] > 0.5 - ) - - return rescaled_image, target - - -def pad(image, target, padding): - # assumes that we only pad on the bottom right corners - padded_image = F.pad(image, (0, 0, padding[0], padding[1])) - if target is None: - return padded_image, None - target = target.copy() - # should we do something wrt the original size? - target["size"] = torch.tensor(padded_image.size[::-1]) - if "masks" in target: - target["masks"] = torch.nn.functional.pad(target["masks"], (0, padding[0], 0, padding[1])) - return padded_image, target - - -class ResizeDebug(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - return resize(img, target, self.size) - - -class RandomCrop(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - region = T.RandomCrop.get_params(img, self.size) - return crop(img, target, region) - - -class RandomSizeCrop(object): - def __init__(self, min_size: int, max_size: int, respect_boxes: bool = False): - # respect_boxes: True to keep all boxes - # False to tolerence box filter - self.min_size = min_size - self.max_size = max_size - self.respect_boxes = respect_boxes - - def __call__(self, img: PIL.Image.Image, target: dict): - init_boxes = len(target["boxes"]) - max_patience = 10 - for i in range(max_patience): - w = random.randint(self.min_size, min(img.width, self.max_size)) - h = random.randint(self.min_size, min(img.height, self.max_size)) - region = T.RandomCrop.get_params(img, [h, w]) - result_img, result_target = crop(img, target, region) - if ( - not self.respect_boxes - or len(result_target["boxes"]) == init_boxes - or i == max_patience - 1 - ): - return result_img, result_target - return result_img, result_target - - -class CenterCrop(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - image_width, image_height = img.size - crop_height, crop_width = self.size - crop_top = int(round((image_height - crop_height) / 2.0)) - crop_left = int(round((image_width - crop_width) / 2.0)) - return crop(img, target, (crop_top, crop_left, crop_height, crop_width)) - - -class RandomHorizontalFlip(object): - def __init__(self, p=0.5): - self.p = p - - def __call__(self, img, target): - if random.random() < self.p: - return hflip(img, target) - return img, target - - -class RandomResize(object): - def __init__(self, sizes, max_size=None): - assert isinstance(sizes, (list, tuple)) - self.sizes = sizes - self.max_size = max_size - - def __call__(self, img, target=None): - size = random.choice(self.sizes) - return resize(img, target, size, self.max_size) - - -class RandomPad(object): - def __init__(self, max_pad): - self.max_pad = max_pad - - def __call__(self, img, target): - pad_x = random.randint(0, self.max_pad) - pad_y = random.randint(0, self.max_pad) - return pad(img, target, (pad_x, pad_y)) - - -class RandomSelect(object): - """ - Randomly selects between transforms1 and transforms2, - with probability p for transforms1 and (1 - p) for transforms2 - """ - - def __init__(self, transforms1, transforms2, p=0.5): - self.transforms1 = transforms1 - self.transforms2 = transforms2 - self.p = p - - def __call__(self, img, target): - if random.random() < self.p: - return self.transforms1(img, target) - return self.transforms2(img, target) - - -class ToTensor(object): - def __call__(self, img, target): - return F.to_tensor(img), target - - -class RandomErasing(object): - def __init__(self, *args, **kwargs): - self.eraser = T.RandomErasing(*args, **kwargs) - - def __call__(self, img, target): - return self.eraser(img), target - - -class Normalize(object): - def __init__(self, mean, std): - self.mean = mean - self.std = std - - def __call__(self, image, target=None): - image = F.normalize(image, mean=self.mean, std=self.std) - if target is None: - return image, None - target = target.copy() - h, w = image.shape[-2:] - if "boxes" in target: - boxes = target["boxes"] - boxes = box_xyxy_to_cxcywh(boxes) - boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32) - target["boxes"] = boxes - return image, target - - -class Compose(object): - def __init__(self, transforms): - self.transforms = transforms - - def __call__(self, image, target): - for t in self.transforms: - image, target = t(image, target) - return image, target - - def __repr__(self): - format_string = self.__class__.__name__ + "(" - for t in self.transforms: - format_string += "\n" - format_string += " {0}".format(t) - format_string += "\n)" - return format_string diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/fnet/configuration_fnet.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/fnet/configuration_fnet.py deleted file mode 100644 index 9efa06487756ddad5edda75a7dde98b12d729851..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/fnet/configuration_fnet.py +++ /dev/null @@ -1,121 +0,0 @@ -# coding=utf-8 -# Copyright 2021 Google AI and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" FNet model configuration""" - -from ...configuration_utils import PretrainedConfig -from ...utils import logging - - -logger = logging.get_logger(__name__) - -FNET_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json", - "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json" - # See all FNet models at https://huggingface.co/models?filter=fnet -} - - -class FNetConfig(PretrainedConfig): - r""" - This is the configuration class to store the configuration of a [`FNetModel`]. It is used to instantiate an FNet - model according to the specified arguments, defining the model architecture. Instantiating a configuration with the - defaults will yield a similar configuration to that of the FNet - [google/fnet-base](https://huggingface.co/google/fnet-base) architecture. - - Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the - documentation from [`PretrainedConfig`] for more information. - - - Args: - vocab_size (`int`, *optional*, defaults to 32000): - Vocabulary size of the FNet model. Defines the number of different tokens that can be represented by the - `inputs_ids` passed when calling [`FNetModel`] or [`TFFNetModel`]. - hidden_size (`int`, *optional*, defaults to 768): - Dimension of the encoder layers and the pooler layer. - num_hidden_layers (`int`, *optional*, defaults to 12): - Number of hidden layers in the Transformer encoder. - intermediate_size (`int`, *optional*, defaults to 3072): - Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. - hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`): - The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, - `"relu"`, `"selu"` and `"gelu_new"` are supported. - hidden_dropout_prob (`float`, *optional*, defaults to 0.1): - The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. - max_position_embeddings (`int`, *optional*, defaults to 512): - The maximum sequence length that this model might ever be used with. Typically set this to something large - just in case (e.g., 512 or 1024 or 2048). - type_vocab_size (`int`, *optional*, defaults to 4): - The vocabulary size of the `token_type_ids` passed when calling [`FNetModel`] or [`TFFNetModel`]. - initializer_range (`float`, *optional*, defaults to 0.02): - The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - layer_norm_eps (`float`, *optional*, defaults to 1e-12): - The epsilon used by the layer normalization layers. - use_tpu_fourier_optimizations (`bool`, *optional*, defaults to `False`): - Determines whether to use TPU optimized FFTs. If `True`, the model will favor axis-wise FFTs transforms. - Set to `False` for GPU/CPU hardware, in which case n-dimensional FFTs are used. - tpu_short_seq_length (`int`, *optional*, defaults to 512): - The sequence length that is expected by the model when using TPUs. This will be used to initialize the DFT - matrix only when *use_tpu_fourier_optimizations* is set to `True` and the input sequence is shorter than or - equal to 4096 tokens. - - Example: - - ```python - >>> from transformers import FNetConfig, FNetModel - - >>> # Initializing a FNet fnet-base style configuration - >>> configuration = FNetConfig() - - >>> # Initializing a model (with random weights) from the fnet-base style configuration - >>> model = FNetModel(configuration) - - >>> # Accessing the model configuration - >>> configuration = model.config - ```""" - model_type = "fnet" - - def __init__( - self, - vocab_size=32000, - hidden_size=768, - num_hidden_layers=12, - intermediate_size=3072, - hidden_act="gelu_new", - hidden_dropout_prob=0.1, - max_position_embeddings=512, - type_vocab_size=4, - initializer_range=0.02, - layer_norm_eps=1e-12, - use_tpu_fourier_optimizations=False, - tpu_short_seq_length=512, - pad_token_id=3, - bos_token_id=1, - eos_token_id=2, - **kwargs, - ): - super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) - - self.vocab_size = vocab_size - self.max_position_embeddings = max_position_embeddings - self.hidden_size = hidden_size - self.num_hidden_layers = num_hidden_layers - self.intermediate_size = intermediate_size - self.hidden_act = hidden_act - self.hidden_dropout_prob = hidden_dropout_prob - self.initializer_range = initializer_range - self.type_vocab_size = type_vocab_size - self.layer_norm_eps = layer_norm_eps - self.use_tpu_fourier_optimizations = use_tpu_fourier_optimizations - self.tpu_short_seq_length = tpu_short_seq_length diff --git a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/diffusion/diffusion_onnx.py b/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/diffusion/diffusion_onnx.py deleted file mode 100644 index 1c1e80321de162b5233801efa3423739f7f92bdc..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/diffusion/diffusion_onnx.py +++ /dev/null @@ -1,612 +0,0 @@ -from collections import deque -from functools import partial -from inspect import isfunction -import torch.nn.functional as F -import librosa.sequence -import numpy as np -from torch.nn import Conv1d -from torch.nn import Mish -import torch -from torch import nn -from tqdm import tqdm -import math - - -def exists(x): - return x is not None - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def extract(a, t): - return a[t].reshape((1, 1, 1, 1)) - - -def noise_like(shape, device, repeat=False): - repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) - noise = lambda: torch.randn(shape, device=device) - return repeat_noise() if repeat else noise() - - -def linear_beta_schedule(timesteps, max_beta=0.02): - """ - linear schedule - """ - betas = np.linspace(1e-4, max_beta, timesteps) - return betas - - -def cosine_beta_schedule(timesteps, s=0.008): - """ - cosine schedule - as proposed in https://openreview.net/forum?id=-NEXDKk8gZ - """ - steps = timesteps + 1 - x = np.linspace(0, steps, steps) - alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2 - alphas_cumprod = alphas_cumprod / alphas_cumprod[0] - betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1]) - return np.clip(betas, a_min=0, a_max=0.999) - - -beta_schedule = { - "cosine": cosine_beta_schedule, - "linear": linear_beta_schedule, -} - - -def extract_1(a, t): - return a[t].reshape((1, 1, 1, 1)) - - -def predict_stage0(noise_pred, noise_pred_prev): - return (noise_pred + noise_pred_prev) / 2 - - -def predict_stage1(noise_pred, noise_list): - return (noise_pred * 3 - - noise_list[-1]) / 2 - - -def predict_stage2(noise_pred, noise_list): - return (noise_pred * 23 - - noise_list[-1] * 16 - + noise_list[-2] * 5) / 12 - - -def predict_stage3(noise_pred, noise_list): - return (noise_pred * 55 - - noise_list[-1] * 59 - + noise_list[-2] * 37 - - noise_list[-3] * 9) / 24 - - -class SinusoidalPosEmb(nn.Module): - def __init__(self, dim): - super().__init__() - self.dim = dim - self.half_dim = dim // 2 - self.emb = 9.21034037 / (self.half_dim - 1) - self.emb = torch.exp(torch.arange(self.half_dim) * torch.tensor(-self.emb)).unsqueeze(0) - self.emb = self.emb.cpu() - - def forward(self, x): - emb = self.emb * x - emb = torch.cat((emb.sin(), emb.cos()), dim=-1) - return emb - - -class ResidualBlock(nn.Module): - def __init__(self, encoder_hidden, residual_channels, dilation): - super().__init__() - self.residual_channels = residual_channels - self.dilated_conv = Conv1d(residual_channels, 2 * residual_channels, 3, padding=dilation, dilation=dilation) - self.diffusion_projection = nn.Linear(residual_channels, residual_channels) - self.conditioner_projection = Conv1d(encoder_hidden, 2 * residual_channels, 1) - self.output_projection = Conv1d(residual_channels, 2 * residual_channels, 1) - - def forward(self, x, conditioner, diffusion_step): - diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1) - conditioner = self.conditioner_projection(conditioner) - y = x + diffusion_step - y = self.dilated_conv(y) + conditioner - - gate, filter_1 = torch.split(y, [self.residual_channels, self.residual_channels], dim=1) - - y = torch.sigmoid(gate) * torch.tanh(filter_1) - y = self.output_projection(y) - - residual, skip = torch.split(y, [self.residual_channels, self.residual_channels], dim=1) - - return (x + residual) / 1.41421356, skip - - -class DiffNet(nn.Module): - def __init__(self, in_dims, n_layers, n_chans, n_hidden): - super().__init__() - self.encoder_hidden = n_hidden - self.residual_layers = n_layers - self.residual_channels = n_chans - self.input_projection = Conv1d(in_dims, self.residual_channels, 1) - self.diffusion_embedding = SinusoidalPosEmb(self.residual_channels) - dim = self.residual_channels - self.mlp = nn.Sequential( - nn.Linear(dim, dim * 4), - Mish(), - nn.Linear(dim * 4, dim) - ) - self.residual_layers = nn.ModuleList([ - ResidualBlock(self.encoder_hidden, self.residual_channels, 1) - for i in range(self.residual_layers) - ]) - self.skip_projection = Conv1d(self.residual_channels, self.residual_channels, 1) - self.output_projection = Conv1d(self.residual_channels, in_dims, 1) - nn.init.zeros_(self.output_projection.weight) - - def forward(self, spec, diffusion_step, cond): - x = spec.squeeze(0) - x = self.input_projection(x) # x [B, residual_channel, T] - x = F.relu(x) - # skip = torch.randn_like(x) - diffusion_step = diffusion_step.float() - diffusion_step = self.diffusion_embedding(diffusion_step) - diffusion_step = self.mlp(diffusion_step) - - x, skip = self.residual_layers[0](x, cond, diffusion_step) - # noinspection PyTypeChecker - for layer in self.residual_layers[1:]: - x, skip_connection = layer.forward(x, cond, diffusion_step) - skip = skip + skip_connection - x = skip / math.sqrt(len(self.residual_layers)) - x = self.skip_projection(x) - x = F.relu(x) - x = self.output_projection(x) # [B, 80, T] - return x.unsqueeze(1) - - -class AfterDiffusion(nn.Module): - def __init__(self, spec_max, spec_min, v_type='a'): - super().__init__() - self.spec_max = spec_max - self.spec_min = spec_min - self.type = v_type - - def forward(self, x): - x = x.squeeze(1).permute(0, 2, 1) - mel_out = (x + 1) / 2 * (self.spec_max - self.spec_min) + self.spec_min - if self.type == 'nsf-hifigan-log10': - mel_out = mel_out * 0.434294 - return mel_out.transpose(2, 1) - - -class Pred(nn.Module): - def __init__(self, alphas_cumprod): - super().__init__() - self.alphas_cumprod = alphas_cumprod - - def forward(self, x_1, noise_t, t_1, t_prev): - a_t = extract(self.alphas_cumprod, t_1).cpu() - a_prev = extract(self.alphas_cumprod, t_prev).cpu() - a_t_sq, a_prev_sq = a_t.sqrt().cpu(), a_prev.sqrt().cpu() - x_delta = (a_prev - a_t) * ((1 / (a_t_sq * (a_t_sq + a_prev_sq))) * x_1 - 1 / ( - a_t_sq * (((1 - a_prev) * a_t).sqrt() + ((1 - a_t) * a_prev).sqrt())) * noise_t) - x_pred = x_1 + x_delta.cpu() - - return x_pred - - -class GaussianDiffusion(nn.Module): - def __init__(self, - out_dims=128, - n_layers=20, - n_chans=384, - n_hidden=256, - timesteps=1000, - k_step=1000, - max_beta=0.02, - spec_min=-12, - spec_max=2): - super().__init__() - self.denoise_fn = DiffNet(out_dims, n_layers, n_chans, n_hidden) - self.out_dims = out_dims - self.mel_bins = out_dims - self.n_hidden = n_hidden - betas = beta_schedule['linear'](timesteps, max_beta=max_beta) - - alphas = 1. - betas - alphas_cumprod = np.cumprod(alphas, axis=0) - alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) - timesteps, = betas.shape - self.num_timesteps = int(timesteps) - self.k_step = k_step - - self.noise_list = deque(maxlen=4) - - to_torch = partial(torch.tensor, dtype=torch.float32) - - self.register_buffer('betas', to_torch(betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) - - # calculations for posterior q(x_{t-1} | x_t, x_0) - posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod) - # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) - self.register_buffer('posterior_variance', to_torch(posterior_variance)) - # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain - self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) - self.register_buffer('posterior_mean_coef1', to_torch( - betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) - self.register_buffer('posterior_mean_coef2', to_torch( - (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) - - self.register_buffer('spec_min', torch.FloatTensor([spec_min])[None, None, :out_dims]) - self.register_buffer('spec_max', torch.FloatTensor([spec_max])[None, None, :out_dims]) - self.ad = AfterDiffusion(self.spec_max, self.spec_min) - self.xp = Pred(self.alphas_cumprod) - - def q_mean_variance(self, x_start, t): - mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start - variance = extract(1. - self.alphas_cumprod, t, x_start.shape) - log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape) - return mean, variance, log_variance - - def predict_start_from_noise(self, x_t, t, noise): - return ( - extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - - extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise - ) - - def q_posterior(self, x_start, x_t, t): - posterior_mean = ( - extract(self.posterior_mean_coef1, t, x_t.shape) * x_start + - extract(self.posterior_mean_coef2, t, x_t.shape) * x_t - ) - posterior_variance = extract(self.posterior_variance, t, x_t.shape) - posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape) - return posterior_mean, posterior_variance, posterior_log_variance_clipped - - def p_mean_variance(self, x, t, cond): - noise_pred = self.denoise_fn(x, t, cond=cond) - x_recon = self.predict_start_from_noise(x, t=t, noise=noise_pred) - - x_recon.clamp_(-1., 1.) - - model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample(self, x, t, cond, clip_denoised=True, repeat_noise=False): - b, *_, device = *x.shape, x.device - model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, cond=cond) - noise = noise_like(x.shape, device, repeat_noise) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - @torch.no_grad() - def p_sample_plms(self, x, t, interval, cond, clip_denoised=True, repeat_noise=False): - """ - Use the PLMS method from - [Pseudo Numerical Methods for Diffusion Models on Manifolds](https://arxiv.org/abs/2202.09778). - """ - - def get_x_pred(x, noise_t, t): - a_t = extract(self.alphas_cumprod, t) - a_prev = extract(self.alphas_cumprod, torch.max(t - interval, torch.zeros_like(t))) - a_t_sq, a_prev_sq = a_t.sqrt(), a_prev.sqrt() - - x_delta = (a_prev - a_t) * ((1 / (a_t_sq * (a_t_sq + a_prev_sq))) * x - 1 / ( - a_t_sq * (((1 - a_prev) * a_t).sqrt() + ((1 - a_t) * a_prev).sqrt())) * noise_t) - x_pred = x + x_delta - - return x_pred - - noise_list = self.noise_list - noise_pred = self.denoise_fn(x, t, cond=cond) - - if len(noise_list) == 0: - x_pred = get_x_pred(x, noise_pred, t) - noise_pred_prev = self.denoise_fn(x_pred, max(t - interval, 0), cond=cond) - noise_pred_prime = (noise_pred + noise_pred_prev) / 2 - elif len(noise_list) == 1: - noise_pred_prime = (3 * noise_pred - noise_list[-1]) / 2 - elif len(noise_list) == 2: - noise_pred_prime = (23 * noise_pred - 16 * noise_list[-1] + 5 * noise_list[-2]) / 12 - else: - noise_pred_prime = (55 * noise_pred - 59 * noise_list[-1] + 37 * noise_list[-2] - 9 * noise_list[-3]) / 24 - - x_prev = get_x_pred(x, noise_pred_prime, t) - noise_list.append(noise_pred) - - return x_prev - - def q_sample(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - return ( - extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + - extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise - ) - - def p_losses(self, x_start, t, cond, noise=None, loss_type='l2'): - noise = default(noise, lambda: torch.randn_like(x_start)) - - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - x_recon = self.denoise_fn(x_noisy, t, cond) - - if loss_type == 'l1': - loss = (noise - x_recon).abs().mean() - elif loss_type == 'l2': - loss = F.mse_loss(noise, x_recon) - else: - raise NotImplementedError() - - return loss - - def org_forward(self, - condition, - init_noise=None, - gt_spec=None, - infer=True, - infer_speedup=100, - method='pndm', - k_step=1000, - use_tqdm=True): - """ - conditioning diffusion, use fastspeech2 encoder output as the condition - """ - cond = condition - b, device = condition.shape[0], condition.device - if not infer: - spec = self.norm_spec(gt_spec) - t = torch.randint(0, self.k_step, (b,), device=device).long() - norm_spec = spec.transpose(1, 2)[:, None, :, :] # [B, 1, M, T] - return self.p_losses(norm_spec, t, cond=cond) - else: - shape = (cond.shape[0], 1, self.out_dims, cond.shape[2]) - - if gt_spec is None: - t = self.k_step - if init_noise is None: - x = torch.randn(shape, device=device) - else: - x = init_noise - else: - t = k_step - norm_spec = self.norm_spec(gt_spec) - norm_spec = norm_spec.transpose(1, 2)[:, None, :, :] - x = self.q_sample(x_start=norm_spec, t=torch.tensor([t - 1], device=device).long()) - - if method is not None and infer_speedup > 1: - if method == 'dpm-solver': - from .dpm_solver_pytorch import NoiseScheduleVP, model_wrapper, DPM_Solver - # 1. Define the noise schedule. - noise_schedule = NoiseScheduleVP(schedule='discrete', betas=self.betas[:t]) - - # 2. Convert your discrete-time `model` to the continuous-time - # noise prediction model. Here is an example for a diffusion model - # `model` with the noise prediction type ("noise") . - def my_wrapper(fn): - def wrapped(x, t, **kwargs): - ret = fn(x, t, **kwargs) - if use_tqdm: - self.bar.update(1) - return ret - - return wrapped - - model_fn = model_wrapper( - my_wrapper(self.denoise_fn), - noise_schedule, - model_type="noise", # or "x_start" or "v" or "score" - model_kwargs={"cond": cond} - ) - - # 3. Define dpm-solver and sample by singlestep DPM-Solver. - # (We recommend singlestep DPM-Solver for unconditional sampling) - # You can adjust the `steps` to balance the computation - # costs and the sample quality. - dpm_solver = DPM_Solver(model_fn, noise_schedule) - - steps = t // infer_speedup - if use_tqdm: - self.bar = tqdm(desc="sample time step", total=steps) - x = dpm_solver.sample( - x, - steps=steps, - order=3, - skip_type="time_uniform", - method="singlestep", - ) - if use_tqdm: - self.bar.close() - elif method == 'pndm': - self.noise_list = deque(maxlen=4) - if use_tqdm: - for i in tqdm( - reversed(range(0, t, infer_speedup)), desc='sample time step', - total=t // infer_speedup, - ): - x = self.p_sample_plms( - x, torch.full((b,), i, device=device, dtype=torch.long), - infer_speedup, cond=cond - ) - else: - for i in reversed(range(0, t, infer_speedup)): - x = self.p_sample_plms( - x, torch.full((b,), i, device=device, dtype=torch.long), - infer_speedup, cond=cond - ) - else: - raise NotImplementedError(method) - else: - if use_tqdm: - for i in tqdm(reversed(range(0, t)), desc='sample time step', total=t): - x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond) - else: - for i in reversed(range(0, t)): - x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond) - x = x.squeeze(1).transpose(1, 2) # [B, T, M] - return self.denorm_spec(x).transpose(2, 1) - - def norm_spec(self, x): - return (x - self.spec_min) / (self.spec_max - self.spec_min) * 2 - 1 - - def denorm_spec(self, x): - return (x + 1) / 2 * (self.spec_max - self.spec_min) + self.spec_min - - def get_x_pred(self, x_1, noise_t, t_1, t_prev): - a_t = extract(self.alphas_cumprod, t_1) - a_prev = extract(self.alphas_cumprod, t_prev) - a_t_sq, a_prev_sq = a_t.sqrt(), a_prev.sqrt() - x_delta = (a_prev - a_t) * ((1 / (a_t_sq * (a_t_sq + a_prev_sq))) * x_1 - 1 / ( - a_t_sq * (((1 - a_prev) * a_t).sqrt() + ((1 - a_t) * a_prev).sqrt())) * noise_t) - x_pred = x_1 + x_delta - return x_pred - - def OnnxExport(self, project_name=None, init_noise=None, hidden_channels=256, export_denoise=True, export_pred=True, export_after=True): - cond = torch.randn([1, self.n_hidden, 10]).cpu() - if init_noise is None: - x = torch.randn((1, 1, self.mel_bins, cond.shape[2]), dtype=torch.float32).cpu() - else: - x = init_noise - pndms = 100 - - org_y_x = self.org_forward(cond, init_noise=x) - - device = cond.device - n_frames = cond.shape[2] - step_range = torch.arange(0, self.k_step, pndms, dtype=torch.long, device=device).flip(0) - plms_noise_stage = torch.tensor(0, dtype=torch.long, device=device) - noise_list = torch.zeros((0, 1, 1, self.mel_bins, n_frames), device=device) - - ot = step_range[0] - ot_1 = torch.full((1,), ot, device=device, dtype=torch.long) - if export_denoise: - torch.onnx.export( - self.denoise_fn, - (x.cpu(), ot_1.cpu(), cond.cpu()), - f"{project_name}_denoise.onnx", - input_names=["noise", "time", "condition"], - output_names=["noise_pred"], - dynamic_axes={ - "noise": [3], - "condition": [2] - }, - opset_version=16 - ) - - for t in step_range: - t_1 = torch.full((1,), t, device=device, dtype=torch.long) - noise_pred = self.denoise_fn(x, t_1, cond) - t_prev = t_1 - pndms - t_prev = t_prev * (t_prev > 0) - if plms_noise_stage == 0: - if export_pred: - torch.onnx.export( - self.xp, - (x.cpu(), noise_pred.cpu(), t_1.cpu(), t_prev.cpu()), - f"{project_name}_pred.onnx", - input_names=["noise", "noise_pred", "time", "time_prev"], - output_names=["noise_pred_o"], - dynamic_axes={ - "noise": [3], - "noise_pred": [3] - }, - opset_version=16 - ) - - x_pred = self.get_x_pred(x, noise_pred, t_1, t_prev) - noise_pred_prev = self.denoise_fn(x_pred, t_prev, cond=cond) - noise_pred_prime = predict_stage0(noise_pred, noise_pred_prev) - - elif plms_noise_stage == 1: - noise_pred_prime = predict_stage1(noise_pred, noise_list) - - elif plms_noise_stage == 2: - noise_pred_prime = predict_stage2(noise_pred, noise_list) - - else: - noise_pred_prime = predict_stage3(noise_pred, noise_list) - - noise_pred = noise_pred.unsqueeze(0) - - if plms_noise_stage < 3: - noise_list = torch.cat((noise_list, noise_pred), dim=0) - plms_noise_stage = plms_noise_stage + 1 - - else: - noise_list = torch.cat((noise_list[-2:], noise_pred), dim=0) - - x = self.get_x_pred(x, noise_pred_prime, t_1, t_prev) - if export_after: - torch.onnx.export( - self.ad, - x.cpu(), - f"{project_name}_after.onnx", - input_names=["x"], - output_names=["mel_out"], - dynamic_axes={ - "x": [3] - }, - opset_version=16 - ) - x = self.ad(x) - - print((x == org_y_x).all()) - return x - - def forward(self, condition=None, init_noise=None, pndms=None, k_step=None): - cond = condition - x = init_noise - - device = cond.device - n_frames = cond.shape[2] - step_range = torch.arange(0, k_step.item(), pndms.item(), dtype=torch.long, device=device).flip(0) - plms_noise_stage = torch.tensor(0, dtype=torch.long, device=device) - noise_list = torch.zeros((0, 1, 1, self.mel_bins, n_frames), device=device) - - ot = step_range[0] - ot_1 = torch.full((1,), ot, device=device, dtype=torch.long) - - for t in step_range: - t_1 = torch.full((1,), t, device=device, dtype=torch.long) - noise_pred = self.denoise_fn(x, t_1, cond) - t_prev = t_1 - pndms - t_prev = t_prev * (t_prev > 0) - if plms_noise_stage == 0: - x_pred = self.get_x_pred(x, noise_pred, t_1, t_prev) - noise_pred_prev = self.denoise_fn(x_pred, t_prev, cond=cond) - noise_pred_prime = predict_stage0(noise_pred, noise_pred_prev) - - elif plms_noise_stage == 1: - noise_pred_prime = predict_stage1(noise_pred, noise_list) - - elif plms_noise_stage == 2: - noise_pred_prime = predict_stage2(noise_pred, noise_list) - - else: - noise_pred_prime = predict_stage3(noise_pred, noise_list) - - noise_pred = noise_pred.unsqueeze(0) - - if plms_noise_stage < 3: - noise_list = torch.cat((noise_list, noise_pred), dim=0) - plms_noise_stage = plms_noise_stage + 1 - - else: - noise_list = torch.cat((noise_list[-2:], noise_pred), dim=0) - - x = self.get_x_pred(x, noise_pred_prime, t_1, t_prev) - x = self.ad(x) - return x diff --git a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/vdecoder/hifiganwithsnake/env.py b/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/vdecoder/hifiganwithsnake/env.py deleted file mode 100644 index 2bdbc95d4f7a8bad8fd4f5eef657e2b51d946056..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/vdecoder/hifiganwithsnake/env.py +++ /dev/null @@ -1,15 +0,0 @@ -import os -import shutil - - -class AttrDict(dict): - def __init__(self, *args, **kwargs): - super(AttrDict, self).__init__(*args, **kwargs) - self.__dict__ = self - - -def build_env(config, config_name, path): - t_path = os.path.join(path, config_name) - if config != t_path: - os.makedirs(path, exist_ok=True) - shutil.copyfile(config, os.path.join(path, config_name)) diff --git a/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/hacks/block-logical.js b/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/hacks/block-logical.js deleted file mode 100644 index d223f529dfdb0bed26473fbb73cc757218c59015..0000000000000000000000000000000000000000 --- a/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/hacks/block-logical.js +++ /dev/null @@ -1,40 +0,0 @@ -let Declaration = require('../declaration') - -class BlockLogical extends Declaration { - /** - * Use old syntax for -moz- and -webkit- - */ - prefixed(prop, prefix) { - if (prop.includes('-start')) { - return prefix + prop.replace('-block-start', '-before') - } - return prefix + prop.replace('-block-end', '-after') - } - - /** - * Return property name by spec - */ - normalize(prop) { - if (prop.includes('-before')) { - return prop.replace('-before', '-block-start') - } - return prop.replace('-after', '-block-end') - } -} - -BlockLogical.names = [ - 'border-block-start', - 'border-block-end', - 'margin-block-start', - 'margin-block-end', - 'padding-block-start', - 'padding-block-end', - 'border-before', - 'border-after', - 'margin-before', - 'margin-after', - 'padding-before', - 'padding-after' -] - -module.exports = BlockLogical diff --git a/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/hacks/text-decoration-skip-ink.js b/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/hacks/text-decoration-skip-ink.js deleted file mode 100644 index 25dc4dbe42d4c6e07a55a9c8e059ecf64911ac6b..0000000000000000000000000000000000000000 --- a/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/hacks/text-decoration-skip-ink.js +++ /dev/null @@ -1,23 +0,0 @@ -let Declaration = require('../declaration') - -class TextDecorationSkipInk extends Declaration { - /** - * Change prefix for ink value - */ - set(decl, prefix) { - if (decl.prop === 'text-decoration-skip-ink' && decl.value === 'auto') { - decl.prop = prefix + 'text-decoration-skip' - decl.value = 'ink' - return decl - } else { - return super.set(decl, prefix) - } - } -} - -TextDecorationSkipInk.names = [ - 'text-decoration-skip-ink', - 'text-decoration-skip' -] - -module.exports = TextDecorationSkipInk diff --git a/spaces/younker/chatgpt-turbo/client/node_modules/fraction.js/fraction.min.js b/spaces/younker/chatgpt-turbo/client/node_modules/fraction.js/fraction.min.js deleted file mode 100644 index f0cc9d5c04d6de4b48b55f32acc1f4d8ae77770c..0000000000000000000000000000000000000000 --- a/spaces/younker/chatgpt-turbo/client/node_modules/fraction.js/fraction.min.js +++ /dev/null @@ -1,19 +0,0 @@ -/* -Fraction.js v4.2.0 05/03/2022 -https://www.xarg.org/2014/03/rational-numbers-in-javascript/ - -Copyright (c) 2021, Robert Eisele (robert@xarg.org) -Dual licensed under the MIT or GPL Version 2 licenses. -*/ -(function(z){function p(a,c){var b=0,d=1,f=1,l=0,k=0,t=0,x=1,u=1,g=0,h=1,v=1,q=1;if(void 0!==a&&null!==a)if(void 0!==c){if(b=a,d=c,f=b*d,0!==b%1||0!==d%1)throw m.NonIntegerParameter;}else switch(typeof a){case "object":if("d"in a&&"n"in a)b=a.n,d=a.d,"s"in a&&(b*=a.s);else if(0 in a)b=a[0],1 in a&&(d=a[1]);else throw m.InvalidParameter;f=b*d;break;case "number":0>a&&(f=a,a=-a);if(0===a%1)b=a;else if(0=h&&1E7>=q;)if(b=(g+ -v)/(h+q),a===b){1E7>=h+q?(b=g+v,d=h+q):q>h?(b=v,d=q):(b=g,d=h);break}else a>b?(g+=v,h+=q):(v+=g,q+=h),1E7f?-1:1;e.n=Math.abs(b);e.d=Math.abs(d)}function r(a,c){if(isNaN(a=parseInt(a,10)))throw m.InvalidParameter;return a*c}function n(a,c){if(0===c)throw m.DivisionByZero; -var b=Object.create(m.prototype);b.s=0>a?-1:1;a=0>a?-a:a;var d=w(a,c);b.n=a/d;b.d=c/d;return b}function y(a){for(var c={},b=a,d=2,f=4;f<=b;){for(;0===b%d;)b/=d,c[d]=(c[d]||0)+1;f+=1+2*d++}b!==a?1e.s?n(Math.pow(this.s*this.d,e.n),Math.pow(this.n,e.n)):n(Math.pow(this.s*this.n,e.n),Math.pow(this.d, -e.n));if(0>this.s)return null;var b=y(this.n),d=y(this.d),f=1,l=1,k;for(k in b)if("1"!==k){if("0"===k){f=0;break}b[k]*=e.n;if(0===b[k]%e.d)b[k]/=e.d;else return null;f*=Math.pow(k,b[k])}for(k in d)if("1"!==k){d[k]*=e.n;if(0===d[k]%e.d)d[k]/=e.d;else return null;l*=Math.pow(k,d[k])}return 0>e.s?n(l,f):n(f,l)},equals:function(a,c){p(a,c);return this.s*this.n*e.d===e.s*e.n*this.d},compare:function(a,c){p(a,c);var b=this.s*this.n*e.d-e.s*e.n*this.d;return(0b)},simplify:function(a){if(isNaN(this.n)|| -isNaN(this.d))return this;a=a||.001;for(var c=this.abs(),b=c.toContinued(),d=1;dthis.s&&(b+="-");1===f?b+=d:(a&&0<(c=Math.floor(d/f))&&(b=b+c+" ",d%=f),b=b+d+"/",b+=f);return b},toLatex:function(a){var c, -b="",d=this.n,f=this.d;0>this.s&&(b+="-");1===f?b+=d:(a&&0<(c=Math.floor(d/f))&&(b+=c,d%=f),b=b+"\\frac{"+d+"}{"+f,b+="}");return b},toContinued:function(){var a=this.n,c=this.d,b=[];if(isNaN(a)||isNaN(c))return b;do{b.push(Math.floor(a/c));var d=a%c;a=c;c=d}while(1!==a);return b},toString:function(a){var c=this.n,b=this.d;if(isNaN(c)||isNaN(b))return"NaN";var d;a:{for(d=b;0===d%2;d/=2);for(;0===d%5;d/=5);if(1===d)d=0;else{for(var f=10%d,l=1;1!==f;l++)if(f=10*f%d,2E3>=1)k&1&&(t=t*l%b);l=t;for(k=0;300>k;k++){if(f===l){l=k;break a}f=10*f%b;l=10*l%b}l=0}f=0>this.s?"-":"";f+=c/b|0;(c=c%b*10)&&(f+=".");if(d){for(a=l;a--;)f+=c/b|0,c%=b,c*=10;f+="(";for(a=d;a--;)f+=c/b|0,c%=b,c*=10;f+=")"}else for(a=a||15;c&&a--;)f+=c/b|0,c%=b,c*=10;return f}};"function"===typeof define&&define.amd?define([],function(){return m}):"object"===typeof exports?(Object.defineProperty(m,"__esModule",{value:!0}),m["default"]=m,m.Fraction=m,module.exports=m): -z.Fraction=m})(this); \ No newline at end of file diff --git "a/spaces/yunfei0710/gpt-academic/crazy_functions/\346\225\260\345\255\246\345\212\250\347\224\273\347\224\237\346\210\220manim.py" "b/spaces/yunfei0710/gpt-academic/crazy_functions/\346\225\260\345\255\246\345\212\250\347\224\273\347\224\237\346\210\220manim.py" deleted file mode 100644 index 26e61b1b3032c180b4cb59625eba00d5f7b7c441..0000000000000000000000000000000000000000 --- "a/spaces/yunfei0710/gpt-academic/crazy_functions/\346\225\260\345\255\246\345\212\250\347\224\273\347\224\237\346\210\220manim.py" +++ /dev/null @@ -1,187 +0,0 @@ -from toolbox import CatchException, update_ui, gen_time_str -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from .crazy_utils import input_clipping - -def inspect_dependency(chatbot, history): - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import manim - return True - except: - chatbot.append(["导入依赖失败", "使用该模块需要额外依赖,安装方法:```pip install manim manimgl```"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return False - -def eval_manim(code): - import subprocess, sys, os, shutil - - with open('gpt_log/MyAnimation.py', 'w', encoding='utf8') as f: - f.write(code) - - def get_class_name(class_string): - import re - # Use regex to extract the class name - class_name = re.search(r'class (\w+)\(', class_string).group(1) - return class_name - - class_name = get_class_name(code) - - try: - subprocess.check_output([sys.executable, '-c', f"from gpt_log.MyAnimation import {class_name}; {class_name}().render()"]) - shutil.move('media/videos/1080p60/{class_name}.mp4', f'gpt_log/{class_name}-{gen_time_str()}.mp4') - return f'gpt_log/{gen_time_str()}.mp4' - except subprocess.CalledProcessError as e: - output = e.output.decode() - print(f"Command returned non-zero exit status {e.returncode}: {output}.") - return f"Evaluating python script failed: {e.output}." - except: - print('generating mp4 failed') - return "Generating mp4 failed." - - -def get_code_block(reply): - import re - pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks - matches = re.findall(pattern, reply) # find all code blocks in text - if len(matches) != 1: - raise RuntimeError("GPT is not generating proper code.") - return matches[0].strip('python') # code block - -@CatchException -def 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - # 清空历史,以免输入溢出 - history = [] - - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "生成数学动画, 此插件处于开发阶段, 建议暂时不要使用, 作者: binary-husky, 插件初始化中 ..." - ]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖, 如果缺少依赖, 则给出安装建议 - dep_ok = yield from inspect_dependency(chatbot=chatbot, history=history) # 刷新界面 - if not dep_ok: return - - # 输入 - i_say = f'Generate a animation to show: ' + txt - demo = ["Here is some examples of manim", examples_of_manim()] - _, demo = input_clipping(inputs="", history=demo, max_token_limit=2560) - # 开始 - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=i_say, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=demo, - sys_prompt= - r"Write a animation script with 3blue1brown's manim. "+ - r"Please begin with `from manim import *`. " + - r"Answer me with a code block wrapped by ```." - ) - chatbot.append(["开始生成动画", "..."]) - history.extend([i_say, gpt_say]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - - # 将代码转为动画 - code = get_code_block(gpt_say) - res = eval_manim(code) - - chatbot.append(("生成的视频文件路径", res)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - -# 在这里放一些网上搜集的demo,辅助gpt生成代码 -def examples_of_manim(): - return r""" - - -``` - -class MovingGroupToDestination(Scene): - def construct(self): - group = VGroup(Dot(LEFT), Dot(ORIGIN), Dot(RIGHT, color=RED), Dot(2 * RIGHT)).scale(1.4) - dest = Dot([4, 3, 0], color=YELLOW) - self.add(group, dest) - self.play(group.animate.shift(dest.get_center() - group[2].get_center())) - self.wait(0.5) - -``` - - -``` - -class LatexWithMovingFramebox(Scene): - def construct(self): - text=MathTex( - "\\frac{d}{dx}f(x)g(x)=","f(x)\\frac{d}{dx}g(x)","+", - "g(x)\\frac{d}{dx}f(x)" - ) - self.play(Write(text)) - framebox1 = SurroundingRectangle(text[1], buff = .1) - framebox2 = SurroundingRectangle(text[3], buff = .1) - self.play( - Create(framebox1), - ) - self.wait() - self.play( - ReplacementTransform(framebox1,framebox2), - ) - self.wait() - -``` - - - -``` - -class PointWithTrace(Scene): - def construct(self): - path = VMobject() - dot = Dot() - path.set_points_as_corners([dot.get_center(), dot.get_center()]) - def update_path(path): - previous_path = path.copy() - previous_path.add_points_as_corners([dot.get_center()]) - path.become(previous_path) - path.add_updater(update_path) - self.add(path, dot) - self.play(Rotating(dot, radians=PI, about_point=RIGHT, run_time=2)) - self.wait() - self.play(dot.animate.shift(UP)) - self.play(dot.animate.shift(LEFT)) - self.wait() - -``` - -``` - -# do not use get_graph, this funciton is deprecated - -class ExampleFunctionGraph(Scene): - def construct(self): - cos_func = FunctionGraph( - lambda t: np.cos(t) + 0.5 * np.cos(7 * t) + (1 / 7) * np.cos(14 * t), - color=RED, - ) - - sin_func_1 = FunctionGraph( - lambda t: np.sin(t) + 0.5 * np.sin(7 * t) + (1 / 7) * np.sin(14 * t), - color=BLUE, - ) - - sin_func_2 = FunctionGraph( - lambda t: np.sin(t) + 0.5 * np.sin(7 * t) + (1 / 7) * np.sin(14 * t), - x_range=[-4, 4], - color=GREEN, - ).move_to([0, 1, 0]) - - self.add(cos_func, sin_func_1, sin_func_2) - -``` -""" \ No newline at end of file diff --git a/spaces/zeno-ml/openai-evals/README.md b/spaces/zeno-ml/openai-evals/README.md deleted file mode 100644 index 09bc8c7538d4bb029f01ce83453ff01c7c169be4..0000000000000000000000000000000000000000 --- a/spaces/zeno-ml/openai-evals/README.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: Zeno Evals Hub -emoji: 🏃 -colorFrom: pink -colorTo: indigo -sdk: docker -pinned: false -license: mit -fullWidth: true ---- - -# Zeno + OpenAI Evals - -![Github Actions CI tests](https://github.com/zeno-ml/zeno-openai-evals/actions/workflows/test.yml/badge.svg) -[![MIT license](https://img.shields.io/badge/License-MIT-blue.svg)](https://lbesson.mit-license.org/) -[![Discord](https://img.shields.io/discord/1086004954872950834)](https://discord.gg/km62pDKAkE) - -OpenAI's [Evals library](https://github.com/openai/evals) is a great resource providing evaluation sets for LLMS. - -This repo provides a hub for exploring these results using the [Zeno](https://zenoml.com) evaluation tool. - -## Add New Evals - -To add new evals, add a new entry to `evals/evals.yaml` with the following fields: - -- `results-file`: The first `.jsonl` result from `oaievals` -- `link`: A link to the evals commit for this evaluation -- `description`: A succint description of what the evaluation is testing -- `second-results-file`: An optional second `.jsonl` result from `oaievals`. Must be the same dataset as the first one. -- `functions-file`: An optional Python file with [Zeno functions](https://zenoml.com/docs/api) for the evaluations. - -Make sure you test your evals locally before submitting a PR! - -### Running - -`poetry install` - -`python -m zeno-evals-hub evals/evals.yaml` diff --git a/spaces/zhangyd/bingo/src/lib/hooks/use-enter-submit.tsx b/spaces/zhangyd/bingo/src/lib/hooks/use-enter-submit.tsx deleted file mode 100644 index d66b2d3253baff164235d4ca791aae6d84721835..0000000000000000000000000000000000000000 --- a/spaces/zhangyd/bingo/src/lib/hooks/use-enter-submit.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import { useRef, type RefObject } from 'react' - -export function useEnterSubmit(): { - formRef: RefObject - onKeyDown: (event: React.KeyboardEvent) => void -} { - const formRef = useRef(null) - - const handleKeyDown = ( - event: React.KeyboardEvent - ): void => { - if ( - event.key === 'Enter' && - !event.shiftKey && - !event.nativeEvent.isComposing - ) { - formRef.current?.requestSubmit() - event.preventDefault() - } - } - - return { formRef, onKeyDown: handleKeyDown } -}