I use a set of ATNConfig objects not simple states. An ATNConfig
-# is both a state (ala normal conversion) and a RuleContext describing
-# the chain of rules (if any) followed to arrive at that state.
-#
-# A DFA state may have multiple references to a particular state,
-# but with different ATN contexts (with same or different alts)
-# meaning that state was reached via a different set of rule invocations.
-#/
-class DFAState(object):
-
- def __init__(self, stateNumber:int=-1, configs:ATNConfigSet=ATNConfigSet()):
- self.stateNumber = stateNumber
- self.configs = configs
- # {@code edges[symbol]} points to target of symbol. Shift up by 1 so (-1)
- # {@link Token#EOF} maps to {@code edges[0]}.
- self.edges = None
- self.isAcceptState = False
- # if accept state, what ttype do we match or alt do we predict?
- # This is set to {@link ATN#INVALID_ALT_NUMBER} when {@link #predicates}{@code !=null} or
- # {@link #requiresFullContext}.
- self.prediction = 0
- self.lexerActionExecutor = None
- # Indicates that this state was created during SLL prediction that
- # discovered a conflict between the configurations in the state. Future
- # {@link ParserATNSimulator#execATN} invocations immediately jumped doing
- # full context prediction if this field is true.
- self.requiresFullContext = False
- # During SLL parsing, this is a list of predicates associated with the
- # ATN configurations of the DFA state. When we have predicates,
- # {@link #requiresFullContext} is {@code false} since full context prediction evaluates predicates
- # on-the-fly. If this is not null, then {@link #prediction} is
- # {@link ATN#INVALID_ALT_NUMBER}.
- #
- # We only use these for non-{@link #requiresFullContext} but conflicting states. That
- # means we know from the context (it's $ or we don't dip into outer
- # context) that it's an ambiguity not a conflict.
- #
- # This list is computed by {@link ParserATNSimulator#predicateDFAState}.
- self.predicates = None
-
-
-
- # Get the set of all alts mentioned by all ATN configurations in this
- # DFA state.
- def getAltSet(self):
- if self.configs is not None:
- return set(cfg.alt for cfg in self.configs) or None
- return None
-
- def __hash__(self):
- return hash(self.configs)
-
- # Two {@link DFAState} instances are equal if their ATN configuration sets
- # are the same. This method is used to see if a state already exists.
- #
- # Because the number of alternatives and number of ATN configurations are
- # finite, there is a finite number of DFA states that can be processed.
- # This is necessary to show that the algorithm terminates.
- #
- # Cannot test the DFA state numbers here because in
- # {@link ParserATNSimulator#addDFAState} we need to know if any other state
- # exists that has this exact set of ATN configurations. The
- # {@link #stateNumber} is irrelevant.
- def __eq__(self, other):
- # compare set of ATN configurations in this set with other
- if self is other:
- return True
- elif not isinstance(other, DFAState):
- return False
- else:
- return self.configs==other.configs
-
- def __str__(self):
- with StringIO() as buf:
- buf.write(str(self.stateNumber))
- buf.write(":")
- buf.write(str(self.configs))
- if self.isAcceptState:
- buf.write("=>")
- if self.predicates is not None:
- buf.write(str(self.predicates))
- else:
- buf.write(str(self.prediction))
- return buf.getvalue()
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/criterions/fairseq_criterion.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/criterions/fairseq_criterion.py
deleted file mode 100644
index ff4beb02503ea48a6c09596630aad4c710be94b6..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/criterions/fairseq_criterion.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import inspect
-from typing import Any, Dict, List
-
-from fairseq import metrics, utils
-from fairseq.dataclass import FairseqDataclass
-from fairseq.dataclass.utils import gen_parser_from_dataclass
-from torch.nn.modules.loss import _Loss
-
-
-class FairseqCriterion(_Loss):
- def __init__(self, task):
- super().__init__()
- self.task = task
- if hasattr(task, "target_dictionary"):
- tgt_dict = task.target_dictionary
- self.padding_idx = tgt_dict.pad() if tgt_dict is not None else -100
-
- @classmethod
- def add_args(cls, parser):
- """Add criterion-specific arguments to the parser."""
- dc = getattr(cls, "__dataclass", None)
- if dc is not None:
- gen_parser_from_dataclass(parser, dc())
-
- @classmethod
- def build_criterion(cls, cfg: FairseqDataclass, task):
- """Construct a criterion from command-line args."""
- # arguments in the __init__.
- init_args = {}
- for p in inspect.signature(cls).parameters.values():
- if (
- p.kind == p.POSITIONAL_ONLY
- or p.kind == p.VAR_POSITIONAL
- or p.kind == p.VAR_KEYWORD
- ):
- # we haven't implemented inference for these argument types,
- # but PRs welcome :)
- raise NotImplementedError("{} not supported".format(p.kind))
-
- assert p.kind in {p.POSITIONAL_OR_KEYWORD, p.KEYWORD_ONLY}
-
- if p.name == "task":
- init_args["task"] = task
- elif p.name == "cfg":
- init_args["cfg"] = cfg
- elif hasattr(cfg, p.name):
- init_args[p.name] = getattr(cfg, p.name)
- elif p.default != p.empty:
- pass # we'll use the default value
- else:
- raise NotImplementedError(
- "Unable to infer Criterion arguments, please implement "
- "{}.build_criterion".format(cls.__name__)
- )
- return cls(**init_args)
-
- def forward(self, model, sample, reduce=True):
- """Compute the loss for the given sample.
-
- Returns a tuple with three elements:
- 1) the loss
- 2) the sample size, which is used as the denominator for the gradient
- 3) logging outputs to display while training
- """
- raise NotImplementedError
-
- @staticmethod
- def aggregate_logging_outputs(
- logging_outputs: List[Dict[str, Any]]
- ) -> Dict[str, Any]:
- """Aggregate logging outputs from data parallel training."""
- utils.deprecation_warning(
- "The aggregate_logging_outputs API is deprecated. "
- "Please use the reduce_metrics API instead."
- )
- raise NotImplementedError
-
- @classmethod
- def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None:
- """Aggregate logging outputs from data parallel training."""
- utils.deprecation_warning(
- "Criterions should implement the reduce_metrics API. "
- "Falling back to deprecated aggregate_logging_outputs API."
- )
- agg_logging_outputs = cls.aggregate_logging_outputs(logging_outputs)
- for k, v in agg_logging_outputs.items():
- if k in {"nsentences", "ntokens", "sample_size"}:
- continue
- metrics.log_scalar(k, v)
-
- @staticmethod
- def logging_outputs_can_be_summed() -> bool:
- """
- Whether the logging outputs returned by `forward` can be summed
- across workers prior to calling `reduce_metrics`. Setting this
- to True will improves distributed training speed.
- """
- return False
-
-
-class LegacyFairseqCriterion(FairseqCriterion):
- def __init__(self, args, task):
- super().__init__(task=task)
- self.args = args
-
- utils.deprecation_warning(
- "Criterions should take explicit arguments instead of an "
- "argparse.Namespace object, please update your criterion by "
- "extending FairseqCriterion instead of LegacyFairseqCriterion."
- )
-
- @classmethod
- def build_criterion(cls, args, task):
- """Construct a criterion from command-line args."""
- return cls(args, task)
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/data/fairseq_dataset.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/data/fairseq_dataset.py
deleted file mode 100644
index 2bde7fc57b99df2e14e2186a5f9cd98982870ddd..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/data/fairseq_dataset.py
+++ /dev/null
@@ -1,205 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-import numpy as np
-import torch.utils.data
-from fairseq.data import data_utils
-
-logger = logging.getLogger(__name__)
-
-
-class EpochListening:
- """Mixin for receiving updates whenever the epoch increments."""
-
- @property
- def can_reuse_epoch_itr_across_epochs(self):
- """
- Whether we can reuse the :class:`fairseq.data.EpochBatchIterator` for
- this dataset across epochs.
-
- This needs to return ``False`` if the sample sizes can change across
- epochs, in which case we may need to regenerate batches at each epoch.
- If your dataset relies in ``set_epoch`` then you should consider setting
- this to ``False``.
- """
- return True
-
- def set_epoch(self, epoch):
- """Will receive the updated epoch number at the beginning of the epoch."""
- pass
-
-
-class FairseqDataset(torch.utils.data.Dataset, EpochListening):
- """A dataset that provides helpers for batching."""
-
- def __getitem__(self, index):
- raise NotImplementedError
-
- def __len__(self):
- raise NotImplementedError
-
- def collater(self, samples):
- """Merge a list of samples to form a mini-batch.
-
- Args:
- samples (List[dict]): samples to collate
-
- Returns:
- dict: a mini-batch suitable for forwarding with a Model
- """
- raise NotImplementedError
-
- def num_tokens(self, index):
- """Return the number of tokens in a sample. This value is used to
- enforce ``--max-tokens`` during batching."""
- raise NotImplementedError
-
- def num_tokens_vec(self, indices):
- """Return the number of tokens for a set of positions defined by indices.
- This value is used to enforce ``--max-tokens`` during batching."""
- raise NotImplementedError
-
- def size(self, index):
- """Return an example's size as a float or tuple. This value is used when
- filtering a dataset with ``--max-positions``."""
- raise NotImplementedError
-
- def ordered_indices(self):
- """Return an ordered list of indices. Batches will be constructed based
- on this order."""
- return np.arange(len(self), dtype=np.int64)
-
- @property
- def supports_prefetch(self):
- """Whether this dataset supports prefetching."""
- return False
-
- def attr(self, attr: str, index: int):
- return getattr(self, attr, None)
-
- def prefetch(self, indices):
- """Prefetch the data required for this epoch."""
- raise NotImplementedError
-
- def get_batch_shapes(self):
- """
- Return a list of valid batch shapes, for example::
-
- [(8, 512), (16, 256), (32, 128)]
-
- The first dimension of each tuple is the batch size and can be ``None``
- to automatically infer the max batch size based on ``--max-tokens``.
- The second dimension of each tuple is the max supported length as given
- by :func:`fairseq.data.FairseqDataset.num_tokens`.
-
- This will be used by :func:`fairseq.data.FairseqDataset.batch_by_size`
- to restrict batch shapes. This is useful on TPUs to avoid too many
- dynamic shapes (and recompilations).
- """
- return None
-
- def batch_by_size(
- self,
- indices,
- max_tokens=None,
- max_sentences=None,
- required_batch_size_multiple=1,
- ):
- """
- Given an ordered set of indices, return batches according to
- *max_tokens*, *max_sentences* and *required_batch_size_multiple*.
- """
- from fairseq.data import data_utils
-
- fixed_shapes = self.get_batch_shapes()
- if fixed_shapes is not None:
-
- def adjust_bsz(bsz, num_tokens):
- if bsz is None:
- assert max_tokens is not None, "Must specify --max-tokens"
- bsz = max_tokens // num_tokens
- if max_sentences is not None:
- bsz = min(bsz, max_sentences)
- elif (
- bsz >= required_batch_size_multiple
- and bsz % required_batch_size_multiple != 0
- ):
- bsz -= bsz % required_batch_size_multiple
- return bsz
-
- fixed_shapes = np.array(
- [
- [adjust_bsz(bsz, num_tokens), num_tokens]
- for (bsz, num_tokens) in fixed_shapes
- ]
- )
-
- try:
- num_tokens_vec = self.num_tokens_vec(indices).astype("int64")
- except NotImplementedError:
- num_tokens_vec = None
-
- return data_utils.batch_by_size(
- indices,
- num_tokens_fn=self.num_tokens,
- num_tokens_vec=num_tokens_vec,
- max_tokens=max_tokens,
- max_sentences=max_sentences,
- required_batch_size_multiple=required_batch_size_multiple,
- fixed_shapes=fixed_shapes,
- )
-
- def filter_indices_by_size(self, indices, max_sizes):
- """
- Filter a list of sample indices. Remove those that are longer than
- specified in *max_sizes*.
-
- WARNING: don't update, override method in child classes
-
- Args:
- indices (np.array): original array of sample indices
- max_sizes (int or list[int] or tuple[int]): max sample size,
- can be defined separately for src and tgt (then list or tuple)
-
- Returns:
- np.array: filtered sample array
- list: list of removed indices
- """
- if isinstance(max_sizes, float) or isinstance(max_sizes, int):
- if hasattr(self, "sizes") and isinstance(self.sizes, np.ndarray):
- ignored = indices[self.sizes[indices] > max_sizes].tolist()
- indices = indices[self.sizes[indices] <= max_sizes]
- elif (
- hasattr(self, "sizes")
- and isinstance(self.sizes, list)
- and len(self.sizes) == 1
- ):
- ignored = indices[self.sizes[0][indices] > max_sizes].tolist()
- indices = indices[self.sizes[0][indices] <= max_sizes]
- else:
- indices, ignored = data_utils._filter_by_size_dynamic(
- indices, self.size, max_sizes
- )
- else:
- indices, ignored = data_utils._filter_by_size_dynamic(
- indices, self.size, max_sizes
- )
- return indices, ignored
-
- @property
- def supports_fetch_outside_dataloader(self):
- """Whether this dataset supports fetching outside the workers of the dataloader."""
- return True
-
-
-class FairseqIterableDataset(torch.utils.data.IterableDataset, EpochListening):
- """
- For datasets that need to be read sequentially, usually because the data is
- being streamed or otherwise can't be manipulated on a single machine.
- """
-
- def __iter__(self):
- raise NotImplementedError
diff --git a/spaces/arxnov/anotest/ONNXVITS_to_onnx.py b/spaces/arxnov/anotest/ONNXVITS_to_onnx.py
deleted file mode 100644
index 846e39849535ed08accb10d7001f2431a851d372..0000000000000000000000000000000000000000
--- a/spaces/arxnov/anotest/ONNXVITS_to_onnx.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import ONNXVITS_models
-import utils
-from text import text_to_sequence
-import torch
-import commons
-
-def get_text(text, hps):
- text_norm = text_to_sequence(text, hps.symbols, hps.data.text_cleaners)
- if hps.data.add_blank:
- text_norm = commons.intersperse(text_norm, 0)
- text_norm = torch.LongTensor(text_norm)
- return text_norm
-
-hps = utils.get_hparams_from_file("../vits/pretrained_models/uma87.json")
-symbols = hps.symbols
-net_g = ONNXVITS_models.SynthesizerTrn(
- len(symbols),
- hps.data.filter_length // 2 + 1,
- hps.train.segment_size // hps.data.hop_length,
- n_speakers=hps.data.n_speakers,
- **hps.model)
-_ = net_g.eval()
-_ = utils.load_checkpoint("../vits/pretrained_models/uma_1153000.pth", net_g)
-
-text1 = get_text("ありがとうございます。", hps)
-stn_tst = text1
-with torch.no_grad():
- x_tst = stn_tst.unsqueeze(0)
- x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
- sid = torch.tensor([0])
- o = net_g(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, length_scale=1)
\ No newline at end of file
diff --git a/spaces/aryadytm/remove-photo-background/app.py b/spaces/aryadytm/remove-photo-background/app.py
deleted file mode 100644
index c7240ff5fe26fad0f5dda8e99fd8de5da63311e6..0000000000000000000000000000000000000000
--- a/spaces/aryadytm/remove-photo-background/app.py
+++ /dev/null
@@ -1,80 +0,0 @@
-import streamlit as st
-import os
-from datetime import datetime
-from PIL import Image
-from io import BytesIO
-
-from src.utils import change_background, matte
-from src.st_style import apply_prod_style
-
-# apply_prod_style(st) # NOTE: Uncomment this for production!
-
-
-def image_download_button(pil_image, filename: str, fmt: str, label="Download"):
- if fmt not in ["jpg", "png"]:
- raise Exception(f"Unknown image format (Available: {fmt} - case sensitive)")
-
- pil_format = "JPEG" if fmt == "jpg" else "PNG"
- file_format = "jpg" if fmt == "jpg" else "png"
- mime = "image/jpeg" if fmt == "jpg" else "image/png"
-
- buf = BytesIO()
- pil_image.save(buf, format=pil_format)
-
- return st.download_button(
- label=label,
- data=buf.getvalue(),
- file_name=f'{filename}.{file_format}',
- mime=mime,
- )
-
-
-st.title("AI Photo Background Removal")
-st.image(Image.open("assets/demo.jpg"))
-st.write(
- """
- You want to remove your photo background, but don't have the time and effort to learn photo editing skills?
- **This app will change or remove your photo background, in seconds.**
- """
-)
-
-uploaded_file = st.file_uploader(
- label="Upload your photo here",
- accept_multiple_files=False, type=["png", "jpg", "jpeg"],
-)
-
-if uploaded_file is not None:
-
- with st.expander("Original photo", expanded=True):
- if uploaded_file is not None:
- st.image(uploaded_file)
- else:
- st.warning("You haven't uploaded any photo yet")
-
- in_mode = st.selectbox("Choose background color", ["Transparent (PNG)", "White", "Black", "Green", "Red", "Blue"])
- in_submit = st.button("Submit")
-
- if uploaded_file is not None and in_submit:
- img_input = Image.open(uploaded_file)
-
- with st.spinner("AI is doing magic to your photo. Please wait..."):
- hexmap = {
- "Transparent (PNG)": "#000000",
- "Black": "#000000",
- "White": "#FFFFFF",
- "Green": "#22EE22",
- "Red": "#EE2222",
- "Blue": "#2222EE",
- }
- alpha = 0.0 if in_mode == "Transparent (PNG)" else 1.0
- img_matte = matte(img_input)
- img_output = change_background(img_input, img_matte, background_alpha=alpha, background_hex=hexmap[in_mode])
-
- with st.expander("Success!", expanded=True):
- st.image(img_output)
- uploaded_name = os.path.splitext(uploaded_file.name)[0]
- image_download_button(
- pil_image=img_output,
- filename=uploaded_name,
- fmt="png"
- )
\ No newline at end of file
diff --git a/spaces/attention-refocusing/Attention-refocusing/gligen/create_meta.py b/spaces/attention-refocusing/Attention-refocusing/gligen/create_meta.py
deleted file mode 100644
index 7512c6d377df98db7e17515a7143b7a4ef7d5f32..0000000000000000000000000000000000000000
--- a/spaces/attention-refocusing/Attention-refocusing/gligen/create_meta.py
+++ /dev/null
@@ -1,170 +0,0 @@
-CKPTS = [
-
- dict(
- path="/home/chunyl/azure_mount/yuhengdb/fine_tune_ldm/version5_branch6_output/GoldG+SBU+CC3M+CC12M+O365/second_stage_drop_both/tag01/checkpoint_00450001.pth",
- feature_type=['before','after_reproject'],
- save_folder_name="v5b6_drop_both",
- ),
-
-
- # dict(
- # path="/home/v-yuhengli/blobfuse/output/fine_tune_ldm/version5_branch6_output/GoldG+SBU+CC3M+CC12M+O365/second_stage_drop_none/tag00/checkpoint_00165001.pth",
- # feature_type=['before','after_reproject'],
- # save_folder_name="v5b6_drop_none",
- # ),
-
-
-
-
-
-]
-
-
-
-# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
-
-
-
-
-
-
-
-
- # if meta["has_image_mask"] == 0:
- # image_embeddings = text_embeddings
- # if meta["has_text_mask"] == 0:
- # text_embeddings = image_embeddings
-
- # out = {
- # "boxes" : boxes.unsqueeze(0).repeat(batch,1,1),
- # "masks" : masks.unsqueeze(0).repeat(batch,1),
- # "text_masks" : masks.unsqueeze(0).repeat(batch,1),
- # "image_masks" : masks.unsqueeze(0).repeat(batch,1),
- # "text_embeddings" : text_embeddings.unsqueeze(0).repeat(batch,1,1),
- # "image_embeddings" : image_embeddings.unsqueeze(0).repeat(batch,1,1)
- # }
-
-
-
-
-
-
-
-META = [
-
-
- dict(
- prompt = "a teddy bear sitting next to a red bird",
- phrases = ['a teddy bear', 'a red bird'],
- images = ['images/teddy.jpg', 'images/red_bird.jpg'],
- locations = [ [0.0,0.09,0.33,0.76], [0.55,0.11,1.0,0.8] ],
- alpha_type = [1.0, 0, 0.0],
- has_text_mask = 1,
- has_image_mask = 0,
- save_folder_name="teddy_bird_1_1"
- ),
-
-
- # dict(
- # prompt = "a teddy bear sitting next to a bird",
- # phrases = ['a teddy bear', 'a bird'],
- # images = ['images/teddy.jpg', 'images/red_bird.jpg'],
- # locations = [ [0.0,0.09,0.33,0.76], [0.55,0.11,1.0,0.8] ],
- # alpha_type = [1.0, 0, 0.0],
- # has_text_mask = 1,
- # has_image_mask = 1,
- # save_folder_name="teddy_bird_1_1"
- # ),
-
-
- # dict(
- # prompt = "a teddy bear sitting next to a bird",
- # phrases = ['a teddy bear', 'a bird'],
- # images = ['images/teddy.jpg', 'images/red_bird.jpg'],
- # locations = [ [0.0,0.09,0.33,0.76], [0.55,0.11,1.0,0.8] ],
- # alpha_type = [0.5, 0, 0.5],
- # has_text_mask = 1,
- # has_image_mask = 0,
- # save_folder_name="teddy_bird_1_0"
- # ),
-
- # dict(
- # prompt = "",
- # phrases = ['a teddy bear', 'an umbrella'],
- # images = ['images/teddy.jpg', 'images/umbrella.png'],
- # locations = [ [0.0,0.09,0.33,0.76], [0.55,0.11,1.0,0.8] ],
- # alpha_type = [1.0, 0, 0.0],
- # has_text_mask = 1,
- # has_image_mask = 1,
- # save_folder_name="empty_teddy_umbrella_1_1"
- # ),
-
- # dict(
- # prompt = "hello kitty and bird hybrid",
- # phrases = ['a hello kitty', 'a hello kitty'],
- # images = ['images/red_bird.jpg', 'images/red_bird.jpg'],
- # locations = [ [0.0,0.09,0.33,0.76], [0.55,0.11,1.0,0.8] ],
- # has_text_mask = 1,
- # has_image_mask = 1,
- # save_folder_name="hello+bird_1_1"
- # ),
-
- # dict(
- # prompt = "hello kitty and teddy bear hybrid",
- # phrases = ['a hello kitty', 'a hello kitty'],
- # images = ['images/teddy.jpg', 'images/teddy.jpg'],
- # locations = [ [0.0,0.09,0.33,0.76], [0.55,0.11,1.0,0.8] ],
- # has_text_mask = 1,
- # has_image_mask = 1,
- # save_folder_name="hello+teddy_1_1"
- # ),
-
- # dict(
- # prompt = "bird and hello kitty hybrid",
- # phrases = ['a bird', 'a bird'],
- # images = ['images/hello.jpg', 'images/hello.jpg'],
- # locations = [ [0.0,0.09,0.33,0.76], [0.55,0.11,1.0,0.8] ],
- # alpha_type = [1.0, 0, 0.0],
- # has_text_mask = 1,
- # has_image_mask = 0.5,
- # save_folder_name="bird+hello_1_1"
- # ),
-
-
-
- # dict(
- # prompt = "a deer standing in front of a brick house in the woods, anime, oil painting, high resolution, cottagecore, ghibli inspired, 4k",
- # phrases = ['a deer'],
- # images = ['images/sky.jpg'],
- # locations = [ [0.0,0.5,0.5,0.9] ],
- # alpha_type = [1, 0, 0],
- # has_text_mask = 1,
- # has_image_mask = 1,
- # save_folder_name="deer_sky"
- # ),
-
-
- # dict(
- # prompt = "A woman sitting in a restaurant with a slice of pizza in front of her",
- # phrases = ['dining table', 'pizza', 'person', 'wall', 'car', 'paper', 'chair', 'window', 'bottle', 'cup'],
- # images = ['images/hello.jpg','images/hello.jpg','images/hello.jpg','images/hello.jpg','images/hello.jpg','images/hello.jpg','images/hello.jpg','images/hello.jpg','images/hello.jpg','images/hello.jpg'],
- # locations = [ [0.0030, 0.3589, 1.0000, 1.0000],
- # [0.0779, 0.6744, 0.9768, 1.0000],
- # [0.2236, 0.0000, 0.7809, 0.4352],
- # [0.0000, 0.0000, 0.4313, 0.4505],
- # [0.6275, 0.1050, 0.9444, 0.2497],
- # [0.0000, 0.3859, 0.1250, 0.6922],
- # [0.7137, 0.2389, 0.8540, 0.4549],
- # [0.0000, 0.0000, 0.4667, 0.0630],
- # [0.3822, 0.4235, 0.4932, 0.6575],
- # [0.6616, 0.3617, 0.7880, 0.5165] ],
- # alpha_type = [0.0, 0, 1.0],
- # has_text_mask = 1,
- # has_image_mask = 0,
- # save_folder_name="pizza_1_0"
- # ),
-
-
-
-
-]
\ No newline at end of file
diff --git a/spaces/aupfe08/stt_or_tts/README.md b/spaces/aupfe08/stt_or_tts/README.md
deleted file mode 100644
index ec290abad5aedf84c35ccec62e306405530f0407..0000000000000000000000000000000000000000
--- a/spaces/aupfe08/stt_or_tts/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Stt Or Tts
-emoji: 🐨
-colorFrom: gray
-colorTo: purple
-sdk: gradio
-sdk_version: 3.40.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/avivdm1/AutoGPT/tests/milvus_memory_test.py b/spaces/avivdm1/AutoGPT/tests/milvus_memory_test.py
deleted file mode 100644
index 84fd6e6d5006e781fa5e1065f949b2160537d913..0000000000000000000000000000000000000000
--- a/spaces/avivdm1/AutoGPT/tests/milvus_memory_test.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# sourcery skip: snake-case-functions
-"""Tests for the MilvusMemory class."""
-import os
-import sys
-import unittest
-
-try:
- from autogpt.memory.milvus import MilvusMemory
-
- def mock_config() -> dict:
- """Mock the Config class"""
- return type(
- "MockConfig",
- (object,),
- {
- "debug_mode": False,
- "continuous_mode": False,
- "speak_mode": False,
- "milvus_collection": "autogpt",
- "milvus_addr": "localhost:19530",
- },
- )
-
- class TestMilvusMemory(unittest.TestCase):
- """Tests for the MilvusMemory class."""
-
- def setUp(self) -> None:
- """Set up the test environment"""
- self.cfg = mock_config()
- self.memory = MilvusMemory(self.cfg)
-
- def test_add(self) -> None:
- """Test adding a text to the cache"""
- text = "Sample text"
- self.memory.clear()
- self.memory.add(text)
- result = self.memory.get(text)
- self.assertEqual([text], result)
-
- def test_clear(self) -> None:
- """Test clearing the cache"""
- self.memory.clear()
- self.assertEqual(self.memory.collection.num_entities, 0)
-
- def test_get(self) -> None:
- """Test getting a text from the cache"""
- text = "Sample text"
- self.memory.clear()
- self.memory.add(text)
- result = self.memory.get(text)
- self.assertEqual(result, [text])
-
- def test_get_relevant(self) -> None:
- """Test getting relevant texts from the cache"""
- text1 = "Sample text 1"
- text2 = "Sample text 2"
- self.memory.clear()
- self.memory.add(text1)
- self.memory.add(text2)
- result = self.memory.get_relevant(text1, 1)
- self.assertEqual(result, [text1])
-
- def test_get_stats(self) -> None:
- """Test getting the cache stats"""
- text = "Sample text"
- self.memory.clear()
- self.memory.add(text)
- stats = self.memory.get_stats()
- self.assertEqual(15, len(stats))
-
-except:
- print("Milvus not installed, skipping tests")
diff --git a/spaces/awacke1/Docker.VSCode.Integration.HF/DockerfileDescription.md b/spaces/awacke1/Docker.VSCode.Integration.HF/DockerfileDescription.md
deleted file mode 100644
index 7d4ef966dd8b05a4649391e70936518eacb59ffc..0000000000000000000000000000000000000000
--- a/spaces/awacke1/Docker.VSCode.Integration.HF/DockerfileDescription.md
+++ /dev/null
@@ -1,200 +0,0 @@
-# 5 Terraform commands you should know:
-
-1. terraform init: https://developer.hashicorp.com/terraform/cli/commands/init
-2. terraform plan: https://developer.hashicorp.com/terraform/cli/commands/plan
-3. terraform apply: https://developer.hashicorp.com/terraform/cli/commands/apply
-4. terraform import: https://developer.hashicorp.com/terraform/cli/import
-5. terraform destroy: https://developer.hashicorp.com/terraform/cli/commands/destroy
-
-# Azure Resources TF Management:
-
-| Step | Title | Tasks |
-|------|--------------------|-------------------------------------------------------------------------------------------------------------|
-| 1 | PreReqs | Terraform installed, Azure cloud account, VSCode extensions, Docker, ACR, Deploy and Test |
-| 2 | Demo | Create resources in Azure cloud |
-| 3 | | Create terraform files for those resources |
-| 4 | | Run terraform apply |
-| 5 | | Run terraform import for each resource |
-| 6 | | Verify terraform state |
-| 7 | | Perform terraform destroy to clean up resources |
-
-
-# Docker and Linux SOAR Setup:
-
-| Step | Title | Tasks |
-|------|--------------------|-------------------------------------------------------------------------------------------------------------|
-| 1 | PreReqs | Docker, ACR, Deploy and Test |
-| 2 | Demo | Modify Dockerfile to start and configure resources |
-| 3 | | Modify Docker base image to remove old apts w security issuesand cert issues |
-| 4 | | Modify Docker base image to configure soar and python components including packages, requirements | |
-| 5 | | Run Docker push, deploy to ACR and test |
-
-
-
-
-
-Give me an analysis on what this dockerfile does: FROM nvidia/cuda:11.3.1-base-ubuntu20.04
-
-ENV DEBIAN_FRONTEND=noninteractive \
- TZ=Europe/Paris
-
-# Remove any third-party apt sources to avoid issues with expiring keys.
-# Install some basic utilities
-RUN rm -f /etc/apt/sources.list.d/*.list && \
- apt-get update && apt-get install -y \
- curl \
- ca-certificates \
- sudo \
- git \
- git-lfs \
- zip \
- unzip \
- htop \
- bzip2 \
- libx11-6 \
- build-essential \
- libsndfile-dev \
- software-properties-common \
- && rm -rf /var/lib/apt/lists/*
-
-ARG BUILD_DATE
-ARG VERSION
-ARG CODE_RELEASE
-RUN \
- echo "**** install openvscode-server runtime dependencies ****" && \
- apt-get update && \
- apt-get install -y \
- jq \
- libatomic1 \
- nano \
- net-tools \
- netcat && \
- echo "**** install openvscode-server ****" && \
- if [ -z ${CODE_RELEASE+x} ]; then \
- CODE_RELEASE=$(curl -sX GET "https://api.github.com/repos/gitpod-io/openvscode-server/releases/latest" \
- | awk '/tag_name/{print $4;exit}' FS='[""]' \
- | sed 's|^openvscode-server-v||'); \
- fi && \
- mkdir -p /app/openvscode-server && \
- curl -o \
- /tmp/openvscode-server.tar.gz -L \
- "https://github.com/gitpod-io/openvscode-server/releases/download/openvscode-server-v${CODE_RELEASE}/openvscode-server-v${CODE_RELEASE}-linux-x64.tar.gz" && \
- tar xf \
- /tmp/openvscode-server.tar.gz -C \
- /app/openvscode-server/ --strip-components=1 && \
- echo "**** clean up ****" && \
- apt-get clean && \
- rm -rf \
- /tmp/* \
- /var/lib/apt/lists/* \
- /var/tmp/*
-COPY root/ /
-
-RUN add-apt-repository ppa:flexiondotorg/nvtop && \
- apt-get upgrade -y && \
- apt-get install -y --no-install-recommends nvtop
-
-RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - && \
- apt-get install -y nodejs && \
- npm install -g configurable-http-proxy
-# Create a working directory
-WORKDIR /app
-# Create a non-root user and switch to it
-RUN adduser --disabled-password --gecos '' --shell /bin/bash user \
- && chown -R user:user /app
-RUN echo "user ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/90-user
-USER user
-# All users can use /home/user as their home directory
-ENV HOME=/home/user
-RUN mkdir $HOME/.cache $HOME/.config \
- && chmod -R 777 $HOME
-# Set up the Conda environment
-ENV CONDA_AUTO_UPDATE_CONDA=false \
- PATH=$HOME/miniconda/bin:$PATH
-RUN curl -sLo ~/miniconda.sh https://repo.continuum.io/miniconda/Miniconda3-py39_4.10.3-Linux-x86_64.sh \
- && chmod +x ~/miniconda.sh \
- && ~/miniconda.sh -b -p ~/miniconda \
- && rm ~/miniconda.sh \
- && conda clean -ya
-
-WORKDIR $HOME/app
-
-#######################################
-# Start root user section
-#######################################
-
-USER root
-
-# User Debian packages
-## Security warning : Potential user code executed as root (build time)
-RUN --mount=target=/root/packages.txt,source=packages.txt \
- apt-get update && \
- xargs -r -a /root/packages.txt apt-get install -y --no-install-recommends \
- && rm -rf /var/lib/apt/lists/*
-
-RUN --mount=target=/root/on_startup.sh,source=on_startup.sh,readwrite \
- bash /root/on_startup.sh
-
-#######################################
-# End root user section
-#######################################
-
-USER user
-
-# Python packages
-RUN --mount=target=requirements.txt,source=requirements.txt \
- pip install --no-cache-dir --upgrade -r requirements.txt
-
-# Copy the current directory contents into the container at $HOME/app setting the owner to the user
-COPY --chown=user . $HOME/app
-
-RUN chmod +x start_server.sh
-
-ENV PYTHONUNBUFFERED=1 \
- GRADIO_ALLOW_FLAGGING=never \
- GRADIO_NUM_PORTS=1 \
- GRADIO_SERVER_NAME=0.0.0.0 \
- GRADIO_THEME=huggingface \
- SYSTEM=spaces \
- SHELL=/bin/bash
-
-EXPOSE 7860 3000
-
-CMD ["./start_server.sh"]
-This Dockerfile describes the steps to create a Docker image with an environment that includes various tools, libraries, and applications. The image is based on the nvidia/cuda:11.3.1-base-ubuntu20.04 image, which is a Ubuntu 20.04 base image with NVIDIA CUDA 11.3.1 libraries pre-installed. The steps in this Dockerfile can be broken down as follows:
-
-Set environment variables:
-
-Set the timezone to Europe/Paris
-Set the Debian frontend to noninteractive mode for silent installations
-Remove third-party apt sources to avoid issues with expiring keys.
-
-Install basic utilities and libraries using apt-get.
-
-Install the OpenVSCode-Server, which is an open-source version of VSCode Server, allowing you to run a remote VSCode environment.
-
-Add the flexiondotorg/nvtop repository and install the NVTop GPU monitoring tool.
-
-Install Node.js 14.x and the configurable-http-proxy package.
-
-Create a non-root user named user and set appropriate permissions.
-
-Set up the Conda environment with Miniconda3 and Python 3.9.
-
-Install user-specified Debian packages by reading from a packages.txt file.
-
-Execute a script named on_startup.sh as the root user.
-
-Switch back to the non-root user user.
-
-Install Python packages specified in a requirements.txt file.
-
-Copy the current directory contents into the container at $HOME/app.
-
-Set environment variables for Gradio and other configurations.
-
-Expose ports 7860 and 3000.
-
-Set the CMD to execute the start_server.sh script when the container starts.
-
-This Dockerfile sets up a development environment with NVIDIA CUDA support, a Conda environment, OpenVSCode-Server, NVTop, and various other utilities. It also installs user-specified Debian and Python packages, and runs a user-specified script at startup. The image is prepared to run a server using Gradio, a library for creating web-based UIs for ML models, and exposes two ports for external communication.
\ No newline at end of file
diff --git a/spaces/awacke1/VideoSwap/app.py b/spaces/awacke1/VideoSwap/app.py
deleted file mode 100644
index 5ae20aa5a05e97daac8913ff6594a729368cce54..0000000000000000000000000000000000000000
--- a/spaces/awacke1/VideoSwap/app.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import gradio as gr
-
-
-def video_flip(video):
- return video
-
-
-demo = gr.Interface(video_flip, gr.Video(), "playable_video", value="zC3EhH5ssposNojKpYfp--85T53.mp4", interactive=True)
-
-#import gradio as gr
-
-
-def sentence_builder(quantity, animal, place, activity_list, morning):
- return f"""The {quantity} {animal}s went to the {place} where they {" and ".join(activity_list)} until the {"morning" if morning else "night"}"""
-
-
-demo2 = gr.Interface(
- sentence_builder,
- [
- gr.Slider(2, 20),
- gr.Dropdown(["cat", "dog", "bird"]),
- gr.Radio(["park", "zoo", "road"]),
- gr.CheckboxGroup(["ran", "swam", "ate", "slept"]),
- gr.Checkbox(label="Is it the morning?"),
- ],
- "text",
- examples=[
- [2, "cat", "park", ["ran", "swam"], True],
- [4, "dog", "zoo", ["ate", "swam"], False],
- [10, "bird", "road", ["ran"], False],
- [8, "cat", "zoo", ["ate"], True],
- ],
-)
-
-demo.launch()
-#demo2.launch()
\ No newline at end of file
diff --git a/spaces/awesomepotato2016/recommender/app.py b/spaces/awesomepotato2016/recommender/app.py
deleted file mode 100644
index 0919ec01bd2ffb85c5af3efc860c2f33310c3a76..0000000000000000000000000000000000000000
--- a/spaces/awesomepotato2016/recommender/app.py
+++ /dev/null
@@ -1,72 +0,0 @@
-import gradio as gr
-import fastai
-from fastai.collab import *
-from fastai.tabular.all import *
-import pandas as pd
-# get rid of this if huggingface is linux
-# import pathlib
-# temp = pathlib.PosixPath
-# pathlib.PosixPath = pathlib.WindowsPath
-
-def initialize_params(size):
- return nn.Parameter(torch.randn(size)*0.1) # parameter class adds it to Module parameters, automatically turns on requires grad
-class CollabFilter(nn.Module):
- def __init__(self,num_users,num_items,num_latent,y_range):
- super().__init__()
- self.user_factors = initialize_params((num_users,num_latent))
- self.item_factors = initialize_params((num_items,num_latent))
- self.y_range = y_range
- # self.user_bias = initialize_params((num_users))
- # self.item_bias = initialize_params((num_items))
- def forward(self,X):
- users = self.user_factors[X[:,0]]
- items = self.item_factors[X[:,1]]
- # ubias = self.user_bias[X]
- # itbias = self.item_bias[X]
- z = (users*items).sum(dim=1,keepdim=True).sigmoid() # take product, take sigmoid to scale values
- low, high = self.y_range
- z = z*(high-low)+low# get values between 0 and 10 (scale of ratings)
- return z
-mylearner = load_learner('export.pkl')
-
-dls = torch.load('anime_titles.pkl')
-descriptions = pd.read_pickle('descriptions.pkl')
-print(descriptions.columns)
-temp = descriptions[['title','synopsis']].to_dict(orient='list')
-titletodescription = dict(zip(temp['title'],temp['synopsis']))
-def predict(name):
- name = name.strip()
- idx = dls.classes['title'].o2i[name]
- model = mylearner.model
- with torch.no_grad():
- m1 = model.item_factors[idx].unsqueeze(dim=0)
- sim = nn.CosineSimilarity(dim=1)
- res = sim(m1,model.item_factors)
- # most smilar
- cosVals,mostSim = torch.topk(res,6)
- # print(f"Most similar anime to {name}")
- # mostalike = f"Most similar anime to {name}:\n"
- mostalike = dict()
- for k in range(1,len(mostSim)):
- mostalike[dls.classes['title'][mostSim[k]]] = float(cosVals[k].item())
- # print(mostalike)
- return mostalike
-def get_description(title):
- title = title.strip()
- if title not in titletodescription:
- return "Description not found."
- else:
- return titletodescription[title]
-# predict("Kuroshitsuji")
-title = "Anime recommender"
-description = """The ur-example for collaborative filtering is the MovieLens Database,
-but let's be honest: movie suggestions are nice, but I don't like watching similar movies.
-For me, media needs to be acclaimed but not to pop, and have a certain degree of cult following.
-Thus, I'm going to a collaborative filtering recommender with anime the website My Anime List.
-I found a [dataset](https://www.kaggle.com/datasets/marlesson/myanimelist-dataset-animes-profiles-reviews) on Kaggle with reviews
-and used it to construct this recommender. Make sure your title is typed exactly and is found on on [this list](https://huggingface.co/spaces/awesomepotato2016/recommender/raw/main/titles.txt)"""
-inp = gr.Textbox(max_lines=1,placeholder="Put the title of an anime!")
-out = gr.Label()
-iface = gr.Interface(title=title,description=description,fn=predict, inputs=inp, outputs=out)
-synopsisgetter = gr.Interface(description="Get the synopsis of a anime",fn=get_description,inputs='text',outputs='text')
-gr.TabbedInterface([iface,synopsisgetter],["Recommender","Read Synopsis"]).launch()
\ No newline at end of file
diff --git a/spaces/ayaanzaveri/mnist/app.py b/spaces/ayaanzaveri/mnist/app.py
deleted file mode 100644
index fe6bfb6fd92ffdfce8043838437f5ed6b0f436e5..0000000000000000000000000000000000000000
--- a/spaces/ayaanzaveri/mnist/app.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import tensorflow as tf
-import numpy as np
-import cv2 as cv
-from urllib.request import urlretrieve
-import gradio as gr
-
-urlretrieve("https://github.com/AyaanZaveri/mnist/raw/main/mnist-model.h5", "mnist-model.h5")
-
-model = tf.keras.models.load_model("mnist-model.h5")
-
-def recognize_digit(image):
- image = cv.resize(image, (28, 28))
- image = image / 255
- image = image.reshape((1, 28, 28))
- prediction = model.predict(image).tolist()[0]
- return {str(i): prediction[i] for i in range(10)}
-
-gr.Interface(fn=recognize_digit,
- inputs="sketchpad",
- outputs=gr.outputs.Label(num_top_classes=3),
- live=True,
- css=".footer {display:none !important}",
- # title="MNIST Sketchpad",
- description="Draw a number 0 through 9.",
- thumbnail="https://raw.githubusercontent.com/gradio-app/real-time-mnist/master/thumbnail2.png").launch();
\ No newline at end of file
diff --git a/spaces/badayvedat/LLaVA/docs/LLaVA_Bench.md b/spaces/badayvedat/LLaVA/docs/LLaVA_Bench.md
deleted file mode 100644
index 5921964c43f599b2d820de5092a1c3b4c39de60f..0000000000000000000000000000000000000000
--- a/spaces/badayvedat/LLaVA/docs/LLaVA_Bench.md
+++ /dev/null
@@ -1,31 +0,0 @@
-# LLaVA-Bench [[Download](https://huggingface.co/datasets/liuhaotian/llava-bench-in-the-wild)]
-
-**-Introduction-** Large commercial multimodal chatbots have been released in this week, including
-- [Multimodal Bing-Chat by Microsoft](https://blogs.bing.com/search/july-2023/Bing-Chat-Enterprise-announced,-multimodal-Visual-Search-rolling-out-to-Bing-Chat) (July 18, 2023)
-- [Multimodal Bard by Google](https://bard.google.com/).
-
-These chatbots are presumably supported by proprietary large multimodal models (LMM). Compared with the open-source LMM such as LLaVA, proprietary LMM represent the scaling success upperbound of the current SoTA techniques. They share the goal of developing multimodal chatbots that follow human intents to complete various daily-life visual tasks in the wild. While it remains less unexplored how to evaluate multimodal chat ability, it provides useful feedback to study open-source LMMs against the commercial multimodal chatbots. In addition to the *LLaVA-Bench (COCO)* dataset we used to develop the early versions of LLaVA, we are releasing [*LLaVA-Bench (In-the-Wild)*](https://huggingface.co/datasets/liuhaotian/llava-bench-in-the-wild) to the community for the public use.
-
-## LLaVA-Bench (In-the-Wild *[Ongoing work]*)
-
-To evaluate the model's capability in more challenging tasks and generalizability to novel domains, we collect a diverse set of 24 images with 60 questions in total, including indoor and outdoor scenes, memes, paintings, sketches, etc, and associate each image with a highly-detailed and manually-curated description and a proper selection of questions. Such design also assesses the model's robustness to different prompts. In this release, we also categorize questions into three categories: conversation (simple QA), detailed description, and complex reasoning. We continue to expand and improve the diversity of the LLaVA-Bench (In-the-Wild). We manually query Bing-Chat and Bard to get the responses.
-
-### Results
-
-The score is measured by comparing against a reference answer generated by text-only GPT-4. It is generated by feeding the question, along with the ground truth image annotations as the context. A text-only GPT-4 evaluator rates both answers. We query GPT-4 by putting the reference answer first, and then the answer generated by the candidate model. We upload images at their original resolution to Bard and Bing-Chat to obtain the results.
-
-| Approach | Conversation | Detail | Reasoning | Overall |
-|----------------|--------------|--------|-----------|---------|
-| Bard-0718 | 83.7 | 69.7 | 78.7 | 77.8 |
-| Bing-Chat-0629 | 59.6 | 52.2 | 90.1 | 71.5 |
-| LLaVA-13B-v1-336px-0719 (beam=1) | 64.3 | 55.9 | 81.7 | 70.1 |
-| LLaVA-13B-v1-336px-0719 (beam=5) | 68.4 | 59.9 | 84.3 | 73.5 |
-
-Note that Bard sometimes refuses to answer questions about images containing humans, and Bing-Chat blurs the human faces in the images. We also provide the benchmark score for the subset without humans.
-
-| Approach | Conversation | Detail | Reasoning | Overall |
-|----------------|--------------|--------|-----------|---------|
-| Bard-0718 | 94.9 | 74.3 | 84.3 | 84.6 |
-| Bing-Chat-0629 | 55.8 | 53.6 | 93.5 | 72.6 |
-| LLaVA-13B-v1-336px-0719 (beam=1) | 62.2 | 56.4 | 82.2 | 70.0 |
-| LLaVA-13B-v1-336px-0719 (beam=5) | 65.6 | 61.7 | 85.0 | 73.6 |
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/cameras/CubeCamera.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/cameras/CubeCamera.d.ts
deleted file mode 100644
index f2dea8b7b743e900a7d33eb700330af89770289b..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/cameras/CubeCamera.d.ts
+++ /dev/null
@@ -1,19 +0,0 @@
-import { WebGLRenderTargetCube } from './../renderers/WebGLRenderTargetCube';
-import { Scene } from './../scenes/Scene';
-import { WebGLRenderer } from './../renderers/WebGLRenderer';
-import { Object3D } from './../core/Object3D';
-
-export class CubeCamera extends Object3D {
- constructor(near?: number, far?: number, cubeResolution?: number);
-
- type: 'CubeCamera';
-
- renderTarget: WebGLRenderTargetCube;
-
- /**
- * @deprecated Use {@link CubeCamera#update .update()} instead
- */
- //updateCubeMap(renderer: Renderer, scene: Scene): void;
-
- update(renderer: WebGLRenderer, scene: Scene): void;
-}
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/math/Box3.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/math/Box3.d.ts
deleted file mode 100644
index 55e4a187f72b19d01969183669757a5094d6be79..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/math/Box3.d.ts
+++ /dev/null
@@ -1,54 +0,0 @@
-import { Vector3 } from './Vector3';
-import { Object3D } from './../core/Object3D';
-import { Sphere } from './Sphere';
-import { Plane } from './Plane';
-import { Matrix4 } from './Matrix4';
-
-export class Box3 {
- constructor(min?: Vector3, max?: Vector3);
-
- max: Vector3;
- min: Vector3;
-
- set(min: Vector3, max: Vector3): this;
- setFromArray(array: ArrayLike): this;
- setFromPoints(points: Vector3[]): this;
- setFromCenterAndSize(center: Vector3, size: Vector3): this;
- setFromObject(object: Object3D): this;
- clone(): this;
- copy(box: Box3): this;
- makeEmpty(): this;
- isEmpty(): boolean;
- getCenter(target: Vector3): Vector3;
- getSize(target: Vector3): Vector3;
- expandByPoint(point: Vector3): this;
- expandByVector(vector: Vector3): this;
- expandByScalar(scalar: number): this;
- expandByObject(object: Object3D): this;
- containsPoint(point: Vector3): boolean;
- containsBox(box: Box3): boolean;
- getParameter(point: Vector3): Vector3;
- intersectsBox(box: Box3): boolean;
- intersectsSphere(sphere: Sphere): boolean;
- intersectsPlane(plane: Plane): boolean;
- clampPoint(point: Vector3, target: Vector3): Vector3;
- distanceToPoint(point: Vector3): number;
- getBoundingSphere(target: Sphere): Sphere;
- intersect(box: Box3): this;
- union(box: Box3): this;
- applyMatrix4(matrix: Matrix4): this;
- translate(offset: Vector3): this;
- equals(box: Box3): boolean;
- /**
- * @deprecated Use {@link Box3#isEmpty .isEmpty()} instead.
- */
- empty(): any;
- /**
- * @deprecated Use {@link Box3#intersectsBox .intersectsBox()} instead.
- */
- isIntersectionBox(b: any): any;
- /**
- * @deprecated Use {@link Box3#intersectsSphere .intersectsSphere()} instead.
- */
- isIntersectionSphere(s: any): any;
-}
diff --git a/spaces/bigPear/digitalWDF/src/web_demo.py b/spaces/bigPear/digitalWDF/src/web_demo.py
deleted file mode 100644
index 0bfccf9f2bbd84ddcb127581fbcbc792c5dfd91e..0000000000000000000000000000000000000000
--- a/spaces/bigPear/digitalWDF/src/web_demo.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# coding=utf-8
-# Implement user interface in browser for ChatGLM fine-tuned with PEFT.
-# This code is largely borrowed from https://github.com/THUDM/ChatGLM-6B/blob/main/web_demo.py
-
-
-import gradio as gr
-import mdtex2html
-
-from utils import ModelArguments, load_pretrained
-from transformers import HfArgumentParser
-
-
-parser = HfArgumentParser(ModelArguments)
-model_args, = parser.parse_args_into_dataclasses()
-model, tokenizer = load_pretrained(model_args)
-model = model.cuda()
-model.eval()
-
-
-"""Override Chatbot.postprocess"""
-
-def postprocess(self, y):
- if y is None:
- return []
- for i, (message, response) in enumerate(y):
- y[i] = (
- None if message is None else mdtex2html.convert((message)),
- None if response is None else mdtex2html.convert(response),
- )
- return y
-
-
-gr.Chatbot.postprocess = postprocess
-
-
-def parse_text(text): # copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT
- lines = text.split("\n")
- lines = [line for line in lines if line != ""]
- count = 0
- for i, line in enumerate(lines):
- if "```" in line:
- count += 1
- items = line.split('`')
- if count % 2 == 1:
- lines[i] = f''
- else:
- lines[i] = f'
'
- else:
- if i > 0:
- if count % 2 == 1:
- line = line.replace("`", "\`")
- line = line.replace("<", "<")
- line = line.replace(">", ">")
- line = line.replace(" ", " ")
- line = line.replace("*", "*")
- line = line.replace("_", "_")
- line = line.replace("-", "-")
- line = line.replace(".", ".")
- line = line.replace("!", "!")
- line = line.replace("(", "(")
- line = line.replace(")", ")")
- line = line.replace("$", "$")
- lines[i] = "
"+line
- text = "".join(lines)
- return text
-
-
-def predict(input, chatbot, max_length, top_p, temperature, history):
- chatbot.append((parse_text(input), ""))
- for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p,
- temperature=temperature):
- chatbot[-1] = (parse_text(input), parse_text(response))
-
- yield chatbot, history
-
-
-def reset_user_input():
- return gr.update(value='')
-
-
-def reset_state():
- return [], []
-
-
-with gr.Blocks() as demo:
- gr.HTML("""ChatGLM-Efficient-Tuning
""")
-
- chatbot = gr.Chatbot()
- with gr.Row():
- with gr.Column(scale=4):
- with gr.Column(scale=12):
- user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style(
- container=False)
- with gr.Column(min_width=32, scale=1):
- submitBtn = gr.Button("Submit", variant="primary")
- with gr.Column(scale=1):
- emptyBtn = gr.Button("Clear History")
- max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True)
- top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True)
- temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
-
- history = gr.State([])
-
- submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history],
- show_progress=True)
- submitBtn.click(reset_user_input, [], [user_input])
-
- emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress=True)
-
-demo.queue().launch(server_name="0.0.0.0", share=False, inbrowser=True)
diff --git a/spaces/billsar1912/YOLOv5x6-marine-vessels-detection/apps/main_model.py b/spaces/billsar1912/YOLOv5x6-marine-vessels-detection/apps/main_model.py
deleted file mode 100644
index 1dceae237cab02ff9ccf7d58b9886f0217f1008c..0000000000000000000000000000000000000000
--- a/spaces/billsar1912/YOLOv5x6-marine-vessels-detection/apps/main_model.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import time
-from turtle import width
-import torch
-import numpy as np
-import streamlit as st
-
-def app():
- st.write("## Ship Imagery Prediction")
- st.write("### Model evaluation:")
- eval_col1, eval_col2, eval_col3, eval_col4, eval_col5 = st.columns(spec=5)
- eval_col1.metric("Precision", "89.52%")
- eval_col2.metric("Recall", "83.54%")
- eval_col3.metric("F1-Score", "86.43%")
- eval_col4.metric("mAP 0.5", "85.39%")
- eval_col5.metric("mAP 0.5:0.95", "62.63%")
-
- uploaded_file = st.file_uploader("Choose a ship imagery")
- if uploaded_file is not None:
- st.image(uploaded_file, caption='Image to predict')
- folder_path = st.text_input("Image path",
- help="This field the image path field that the model will predict the object inside the image that we have uploaded",
- placeholder="Copy the path of image to this field")
-
- prediction = st.button("Predict")
- if prediction:
- ship_model = torch.hub.load('ultralytics/yolov5', 'custom', path="apps/model/main_model.pt", force_reload=True)
- ship_model.conf = 0.6
- ship_model.iou = 0.55
- results = ship_model(f"{folder_path}")
- with st.spinner("Loading..."):
- time.sleep(3.5)
- st.success("Done!")
- st.image(np.squeeze(results.render()))
- results.print()
diff --git a/spaces/bioriAsaeru/text-to-voice/Bdo Patch Download Slow DNS ve TCPIP Ayarlarn Sfrlayarak Sorunu Giderin.md b/spaces/bioriAsaeru/text-to-voice/Bdo Patch Download Slow DNS ve TCPIP Ayarlarn Sfrlayarak Sorunu Giderin.md
deleted file mode 100644
index 026454d8e9f44292183fa79612445f78b7fcc358..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Bdo Patch Download Slow DNS ve TCPIP Ayarlarn Sfrlayarak Sorunu Giderin.md
+++ /dev/null
@@ -1,37 +0,0 @@
-
-I have yet to play the game since the transfer.
I have 1gbps internet and my speed test is normal, however my game WILL not patch. It either gives me some error and tells me i need stable connection (im using ethernet) or when it does manage to work after a minute it goes down to like .56 kb/s.
ive reset my pc, completeley uninstalled the game, ive reset my internet, ive disconeccted every other device from my network, i have no idea what to do.
-Bdo Patch Download Slow
Download Zip ★★★ https://urloso.com/2uyQzX
-Unfortunately, I'm in the same situation. The Launcher says Patching but is slower than molasses in January. Not as fast as you, but I have a fast connection (500MB). I submitted a ticket yesterday with screenshots and dxdiag info. The highest I've gotten was 21% and that was over 24 hours.
-Any fix for this? Wanted to get back into this game, Transfered my account and running into the same issue. I'm stuck at 1% trying to patch what I can only assume is the launcher. Got Fiber internet (500DL/UL), cleared DNS, reset network, uninstalled, updated computer, turned off AV, the works. Still nothing. Left it running for over 48 hours and nothing changes. Sent a ticket, but I fear for a month response.
-Keep doing this until you get past the initial 1%. Sometimes I had to delete the same file repeatedly and restart the launcher. Eventually the download will continue past the trouble files and your patching will continue smoothly.
-Patches used to take minutes. Now they take hours. I have a 155 Mbps download speed. The patch is downloading at 5 KBps. This has been happening for the last few patches. I have googled the issue and tried all of the solutions provided (turn off IPv6, disable multi-threaded rendering and Launcher GPU acceleration in the launcher) and still 5 KBps. What else can I do to fix this?
-
-The last few patches and updates have really been slow for me/also have a 100 mb/s (on speedtest) but on downloads the highest it went was 3 mb/s but 90% of the time its mostly on the hundreds Kilobytes and on this patch only 20-30 ?
-Downloads are slow because the servers are slow. My down speed is 200x faster than the 500 KB/s I'm getting on this update right now. No money spent on hosting servers (peer hosted) and seemingly very little money spent on update distribution servers, at least on xbox side.
-UPDATE: 200-500 KB/s The ~250 I'm getting now is an average including all of the faster download periods. I've been downloading the same 10 MB for the last 10 minutes. I estimate that I'm actually getting less then 100 KB/s down. Now in ISDN speeds, rapidly approaching dialup speeds. Atrocious.
-You guys know that sections of the internet between you and what you're connected to affect the speed of things too right? Unless you're plugged into the same hub in the same room as the thing you're connected to, there are other computers and transmission lines passing the information along and any problems on that path can slow things down.
-Turning of the firewalls and proxy speeded it up some. Still have days where it takes almost 2 hours. Hobie-Wan, it downloaded for 3 months between 30-4o minutes evey day and then turned into a toaster. Warframe support sent me the firewalls and proxy idea.
-I have a 300 mb/s fibre optic connection. I can download a 25 GB game off of steam in less than 10 minutes flat. Yet when I do ANY patch from the Warframe launcher (note: Warframe is installed directly from their website, not through Steam), I am often downloading at a measely 2 mb/s at most. It can take the same amount of time as a 25 GB game, sometimes longer.
-Here I am, steadily downloading between 50-150kb/s when updates come through. Perhaps you should educate yourself on the correct terminology to use for downstream vs link speeds and the difference between a megabit and a megabyte....as well as how networking functions as a whole.
-Necroposting but I think I found the solution for people that only uses windows anti-virus, uncheck Bulk download and Agressive download options in the launcher. If the error presents again try checking those options again. At least that fixes the error for me every time it presents, just altern between checking and unchecking those options until its back to your regular download speed.
-Select Limit how much bandwidth is used for downloading updates in the background. Then use the slider to set the percentage of available bandwidth used for background downloads. Lower values use less bandwidth but cause updates to be delivered more slowly.
-We're constantly improving Firefox. The latest version is faster than ever before and contains fixes for many problems that could slow down your Firefox. See Update Firefox to the latest release to learn how to update.
-Content you don't need such as ads or tracking scripts can significantly slow down page loading. Firefox's built-in content-blocking feature can make the pages load faster by preventing third-party trackers from loading. See Enhanced Tracking Protection in Firefox for desktop for details.
-So I am really frustrated that NCSoft is doing NOTHING regarding their launcher issues. I reported them in email few weeks ago that there is an issue with downloading game client! (there are topics about same issues even during f2p launch). The launcher shows download speed do not match resource monitor or any other tool's shown download speed.
-For example. Today I reinstalled windows, I had some copy of BnS client which was downloaded about a month ago. So if we count all the updates which came during this period was only about 1 GB size, but my launcher shows that 4GB files are needed to download.... anyway....
-The fun begins here. Download speed is not matching at all the actual download speed shown in resource monitor or netlimiter or any other 3rd software which allows to see you internet connection speeds.
-NCSoft launcher download speed is very inconsistent and shows very low download speed (jumps from 1mb/s to 20kb/s all the time), meanwhile windows source monitor and Netlimiter shows that launcher actually is using FULL DOWNLOAD SPEED.
-I think ncsoft uses non persitent http connections, so after each file is done downloading it back back in speed. Because of this you always see download speed jumping from max to 0. And it continues with each file you download. It extent download time and is super annoying.... AND NCSOFT DO NOTHING ABOUT IT!
-When a developer releases a new version of a game, work is done on the backend of GeForce NOW to download that patch, replicate each bit to all of our storage systems, test for the proper security features, and finally copy it back onto all of our data centers worldwide, becoming available for gamers.
-Steam is the main gaming client of nearly every PC gamer, yet it still encounters occasional problems despite the frequent updates. One frustrating issue happens when you're trying to update a game: You have adequate disk space, but Steam cancels the download and returns the NOT ENOUGH FREE DISK SPACE error.
-This error can result from various hazards on Steam's path to update the game, from a broken download to insufficient writing access. Read on for several solutions you can try in order to resolve this error.
-When Steam is downloading files, they're temporarily stored in a folder called downloading. It's possible that, because of various reasons, the downloaded files are corrupt and Steam can't decide where to resume the download. Deleting the downloading folder will make Steam start the download over and do it properly.
-It is also a possibility your Steam client's download cache has gotten tangled and is causing problems with the download. The download cache keeps track of all the downloads until they're installed, and if there are corrupt files in the cache, it can keep the downloads from starting. You can fix this by clearing the download cache.
-Your Steam library is where the platform stores your games and apps. The files regarding the library and its structure can get corrupted and the corruption can cause Steam to get confused when downloading files. Thankfully, Steam has implemented a feature that lets you repair your Steam libraries.
-Steam allocates the entire game's size on the disk for even the smallest updates because it creates a temporary copy of the original game files in the downloading folder, applies the updates, and then deletes the temporary files. This method has received many complaints from users and gamers, but to this day, there is no remedy for it.
-Download errors can be a real bummer, especially when you're all excited to play the game as soon as possible. With these solutions, you're likely to fix the not enough free disk space error in Steam. Now that you know how to start your downloads, downloading them faster is another thing you should learn.
-If your launcher closes after clicking install, check the task manager, if you see it there and it has network activity that means it is downloading the game in the background. When finished it will pop back into view.
-@MitchShallGame @HogwartsLegacy Sucks to see all these issues with steam. I play on Xbox and downloaded it the day before and hopped on an hour after it was released (an hour after because I was asleep not issues).
-@ak1r4_n @BynineB Unfortunately, I downloaded the demo from steam but it didn't work for me. The game opens but the character doesn't move. I tried using keyboard and gamepad (xbox) but nothing worked... I wonder what could that be...
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/bioriAsaeru/text-to-voice/Connectify Hotspot 2016 Crack Onhax Windows Troubleshooting and FAQ.md b/spaces/bioriAsaeru/text-to-voice/Connectify Hotspot 2016 Crack Onhax Windows Troubleshooting and FAQ.md
deleted file mode 100644
index 642b2c5cc0b366f127b8c41acafa9bb6db80c121..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Connectify Hotspot 2016 Crack Onhax Windows Troubleshooting and FAQ.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Connectify Hotspot 2016 Crack Onhax Windows
Download Zip ✺✺✺ https://urloso.com/2uyQgA
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/bioriAsaeru/text-to-voice/El Chingonario Pdf Descargar Gratis El verbo chingar en todas sus formas y variantes.md b/spaces/bioriAsaeru/text-to-voice/El Chingonario Pdf Descargar Gratis El verbo chingar en todas sus formas y variantes.md
deleted file mode 100644
index e4ef60674298dd6151ef0fe00d1f374e4b0d1358..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/El Chingonario Pdf Descargar Gratis El verbo chingar en todas sus formas y variantes.md
+++ /dev/null
@@ -1,6 +0,0 @@
-El Chingonario Pdf Descargar Gratis
DOWNLOAD --->>> https://urloso.com/2uyQ36
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/bioriAsaeru/text-to-voice/La prisonniere malika oufkir pdf free A testimony of human rights violations and resistance in North Africa.md b/spaces/bioriAsaeru/text-to-voice/La prisonniere malika oufkir pdf free A testimony of human rights violations and resistance in North Africa.md
deleted file mode 100644
index 1da66db6c844e0e1886ed322583fd40c2bda15de..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/La prisonniere malika oufkir pdf free A testimony of human rights violations and resistance in North Africa.md
+++ /dev/null
@@ -1,6 +0,0 @@
-la prisonniere malika oufkir pdf free
Download Zip » https://urloso.com/2uyRdH
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/birsardar/stable-diffusion-mat-outpainting-primer/metrics/precision_recall.py b/spaces/birsardar/stable-diffusion-mat-outpainting-primer/metrics/precision_recall.py
deleted file mode 100644
index 8200b7ef51963ae218e3b871de270a826bf10459..0000000000000000000000000000000000000000
--- a/spaces/birsardar/stable-diffusion-mat-outpainting-primer/metrics/precision_recall.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""Precision/Recall (PR) from the paper "Improved Precision and Recall
-Metric for Assessing Generative Models". Matches the original implementation
-by Kynkaanniemi et al. at
-https://github.com/kynkaat/improved-precision-and-recall-metric/blob/master/precision_recall.py"""
-
-import torch
-from . import metric_utils
-
-#----------------------------------------------------------------------------
-
-def compute_distances(row_features, col_features, num_gpus, rank, col_batch_size):
- assert 0 <= rank < num_gpus
- num_cols = col_features.shape[0]
- num_batches = ((num_cols - 1) // col_batch_size // num_gpus + 1) * num_gpus
- col_batches = torch.nn.functional.pad(col_features, [0, 0, 0, -num_cols % num_batches]).chunk(num_batches)
- dist_batches = []
- for col_batch in col_batches[rank :: num_gpus]:
- dist_batch = torch.cdist(row_features.unsqueeze(0), col_batch.unsqueeze(0))[0]
- for src in range(num_gpus):
- dist_broadcast = dist_batch.clone()
- if num_gpus > 1:
- torch.distributed.broadcast(dist_broadcast, src=src)
- dist_batches.append(dist_broadcast.cpu() if rank == 0 else None)
- return torch.cat(dist_batches, dim=1)[:, :num_cols] if rank == 0 else None
-
-#----------------------------------------------------------------------------
-
-def compute_pr(opts, max_real, num_gen, nhood_size, row_batch_size, col_batch_size):
- detector_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
- detector_kwargs = dict(return_features=True)
-
- real_features = metric_utils.compute_feature_stats_for_dataset(
- opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
- rel_lo=0, rel_hi=0, capture_all=True, max_items=max_real).get_all_torch().to(torch.float16).to(opts.device)
-
- gen_features = metric_utils.compute_feature_stats_for_generator(
- opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
- rel_lo=0, rel_hi=1, capture_all=True, max_items=num_gen).get_all_torch().to(torch.float16).to(opts.device)
-
- results = dict()
- for name, manifold, probes in [('precision', real_features, gen_features), ('recall', gen_features, real_features)]:
- kth = []
- for manifold_batch in manifold.split(row_batch_size):
- dist = compute_distances(row_features=manifold_batch, col_features=manifold, num_gpus=opts.num_gpus, rank=opts.rank, col_batch_size=col_batch_size)
- kth.append(dist.to(torch.float32).kthvalue(nhood_size + 1).values.to(torch.float16) if opts.rank == 0 else None)
- kth = torch.cat(kth) if opts.rank == 0 else None
- pred = []
- for probes_batch in probes.split(row_batch_size):
- dist = compute_distances(row_features=probes_batch, col_features=manifold, num_gpus=opts.num_gpus, rank=opts.rank, col_batch_size=col_batch_size)
- pred.append((dist <= kth).any(dim=1) if opts.rank == 0 else None)
- results[name] = float(torch.cat(pred).to(torch.float32).mean() if opts.rank == 0 else 'nan')
- return results['precision'], results['recall']
-
-#----------------------------------------------------------------------------
diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/data/benchmark.py b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/data/benchmark.py
deleted file mode 100644
index ac2f372a4b111ad40b8e720adea208608271bab6..0000000000000000000000000000000000000000
--- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/data/benchmark.py
+++ /dev/null
@@ -1,225 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import logging
-import numpy as np
-from itertools import count
-from typing import List, Tuple
-import torch
-import tqdm
-from fvcore.common.timer import Timer
-
-from detectron2.utils import comm
-
-from .build import build_batch_data_loader
-from .common import DatasetFromList, MapDataset
-from .samplers import TrainingSampler
-
-logger = logging.getLogger(__name__)
-
-
-class _EmptyMapDataset(torch.utils.data.Dataset):
- """
- Map anything to emptiness.
- """
-
- def __init__(self, dataset):
- self.ds = dataset
-
- def __len__(self):
- return len(self.ds)
-
- def __getitem__(self, idx):
- _ = self.ds[idx]
- return [0]
-
-
-def iter_benchmark(
- iterator, num_iter: int, warmup: int = 5, max_time_seconds: float = 60
-) -> Tuple[float, List[float]]:
- """
- Benchmark an iterator/iterable for `num_iter` iterations with an extra
- `warmup` iterations of warmup.
- End early if `max_time_seconds` time is spent on iterations.
-
- Returns:
- float: average time (seconds) per iteration
- list[float]: time spent on each iteration. Sometimes useful for further analysis.
- """
- num_iter, warmup = int(num_iter), int(warmup)
-
- iterator = iter(iterator)
- for _ in range(warmup):
- next(iterator)
- timer = Timer()
- all_times = []
- for curr_iter in tqdm.trange(num_iter):
- start = timer.seconds()
- if start > max_time_seconds:
- num_iter = curr_iter
- break
- next(iterator)
- all_times.append(timer.seconds() - start)
- avg = timer.seconds() / num_iter
- return avg, all_times
-
-
-class DataLoaderBenchmark:
- """
- Some common benchmarks that help understand perf bottleneck of a standard dataloader
- made of dataset, mapper and sampler.
- """
-
- def __init__(
- self,
- dataset,
- *,
- mapper,
- sampler=None,
- total_batch_size,
- num_workers=0,
- max_time_seconds: int = 90,
- ):
- """
- Args:
- max_time_seconds (int): maximum time to spent for each benchmark
- other args: same as in `build.py:build_detection_train_loader`
- """
- if isinstance(dataset, list):
- dataset = DatasetFromList(dataset, copy=False, serialize=True)
- if sampler is None:
- sampler = TrainingSampler(len(dataset))
-
- self.dataset = dataset
- self.mapper = mapper
- self.sampler = sampler
- self.total_batch_size = total_batch_size
- self.num_workers = num_workers
- self.per_gpu_batch_size = self.total_batch_size // comm.get_world_size()
-
- self.max_time_seconds = max_time_seconds
-
- def _benchmark(self, iterator, num_iter, warmup, msg=None):
- avg, all_times = iter_benchmark(iterator, num_iter, warmup, self.max_time_seconds)
- if msg is not None:
- self._log_time(msg, avg, all_times)
- return avg, all_times
-
- def _log_time(self, msg, avg, all_times, distributed=False):
- percentiles = [np.percentile(all_times, k, interpolation="nearest") for k in [1, 5, 95, 99]]
- if not distributed:
- logger.info(
- f"{msg}: avg={1.0/avg:.1f} it/s, "
- f"p1={percentiles[0]:.2g}s, p5={percentiles[1]:.2g}s, "
- f"p95={percentiles[2]:.2g}s, p99={percentiles[3]:.2g}s."
- )
- return
- avg_per_gpu = comm.all_gather(avg)
- percentiles_per_gpu = comm.all_gather(percentiles)
- if comm.get_rank() > 0:
- return
- for idx, avg, percentiles in zip(count(), avg_per_gpu, percentiles_per_gpu):
- logger.info(
- f"GPU{idx} {msg}: avg={1.0/avg:.1f} it/s, "
- f"p1={percentiles[0]:.2g}s, p5={percentiles[1]:.2g}s, "
- f"p95={percentiles[2]:.2g}s, p99={percentiles[3]:.2g}s."
- )
-
- def benchmark_dataset(self, num_iter, warmup=5):
- """
- Benchmark the speed of taking raw samples from the dataset.
- """
-
- def loader():
- while True:
- for k in self.sampler:
- yield self.dataset[k]
-
- self._benchmark(loader(), num_iter, warmup, "Dataset Alone")
-
- def benchmark_mapper(self, num_iter, warmup=5):
- """
- Benchmark the speed of taking raw samples from the dataset and map
- them in a single process.
- """
-
- def loader():
- while True:
- for k in self.sampler:
- yield self.mapper(self.dataset[k])
-
- self._benchmark(loader(), num_iter, warmup, "Single Process Mapper (sec/sample)")
-
- def benchmark_workers(self, num_iter, warmup=10):
- """
- Benchmark the dataloader by tuning num_workers to [0, 1, self.num_workers].
- """
- candidates = [0, 1]
- if self.num_workers not in candidates:
- candidates.append(self.num_workers)
-
- dataset = MapDataset(self.dataset, self.mapper)
- for n in candidates:
- loader = build_batch_data_loader(
- dataset,
- self.sampler,
- self.total_batch_size,
- num_workers=n,
- )
- self._benchmark(
- iter(loader),
- num_iter * max(n, 1),
- warmup * max(n, 1),
- f"DataLoader ({n} workers, bs={self.per_gpu_batch_size})",
- )
- del loader
-
- def benchmark_IPC(self, num_iter, warmup=10):
- """
- Benchmark the dataloader where each worker outputs nothing. This
- eliminates the IPC overhead compared to the regular dataloader.
-
- PyTorch multiprocessing's IPC only optimizes for torch tensors.
- Large numpy arrays or other data structure may incur large IPC overhead.
- """
- n = self.num_workers
- dataset = _EmptyMapDataset(MapDataset(self.dataset, self.mapper))
- loader = build_batch_data_loader(
- dataset, self.sampler, self.total_batch_size, num_workers=n
- )
- self._benchmark(
- iter(loader),
- num_iter * max(n, 1),
- warmup * max(n, 1),
- f"DataLoader ({n} workers, bs={self.per_gpu_batch_size}) w/o comm",
- )
-
- def benchmark_distributed(self, num_iter, warmup=10):
- """
- Benchmark the dataloader in each distributed worker, and log results of
- all workers. This helps understand the final performance as well as
- the variances among workers.
-
- It also prints startup time (first iter) of the dataloader.
- """
- gpu = comm.get_world_size()
- dataset = MapDataset(self.dataset, self.mapper)
- n = self.num_workers
- loader = build_batch_data_loader(
- dataset, self.sampler, self.total_batch_size, num_workers=n
- )
-
- timer = Timer()
- loader = iter(loader)
- next(loader)
- startup_time = timer.seconds()
- logger.info("Dataloader startup time: {:.2f} seconds".format(startup_time))
-
- comm.synchronize()
-
- avg, all_times = self._benchmark(loader, num_iter * max(n, 1), warmup * max(n, 1))
- del loader
- self._log_time(
- f"DataLoader ({gpu} GPUs x {n} workers, total bs={self.total_batch_size})",
- avg,
- all_times,
- True,
- )
diff --git "a/spaces/bugbounted/Whisper-Auto-Subtitled-Video-Generator/pages/04_\360\237\224\212_Upload_Audio_File.py" "b/spaces/bugbounted/Whisper-Auto-Subtitled-Video-Generator/pages/04_\360\237\224\212_Upload_Audio_File.py"
deleted file mode 100644
index 165811a931340714baed021471efe7063007c13e..0000000000000000000000000000000000000000
--- "a/spaces/bugbounted/Whisper-Auto-Subtitled-Video-Generator/pages/04_\360\237\224\212_Upload_Audio_File.py"
+++ /dev/null
@@ -1,205 +0,0 @@
-import whisper
-import streamlit as st
-from streamlit_lottie import st_lottie
-from utils import write_vtt, write_srt
-import ffmpeg
-import requests
-from typing import Iterator
-from io import StringIO
-import numpy as np
-import pathlib
-import os
-
-st.set_page_config(page_title="رونویس خودکار", page_icon="🔊", layout="wide")
-
-# Define a function that we can use to load lottie files from a link.
-@st.cache(allow_output_mutation=True)
-def load_lottieurl(url: str):
- r = requests.get(url)
- if r.status_code != 200:
- return None
- return r.json()
-
-
-APP_DIR = pathlib.Path(__file__).parent.absolute()
-
-LOCAL_DIR = APP_DIR / "local_audio"
-LOCAL_DIR.mkdir(exist_ok=True)
-save_dir = LOCAL_DIR / "output"
-save_dir.mkdir(exist_ok=True)
-
-
-col1, col2 = st.columns([1, 3])
-with col1:
- lottie = load_lottieurl("https://assets1.lottiefiles.com/packages/lf20_1xbk4d2v.json")
- st_lottie(lottie)
-
-with col2:
- st.write("""
- ## رونویس خودکار
- ##### یک فایل صوتی وارد کنید و رونوشت بگیرید.
- ###### ➠ اگر می خواهید صدا را به زبان اصلی آن رونویسی کنید، کار را به عنوان "Transcribe" انتخاب کنید.
- ###### ➠ اگر می خواهید رونویسی را به انگلیسی ترجمه کنید، کار را به عنوان "Translate" انتخاب کنید.
- ###### توصیه می کنم از مدل پایه شروع کنید و سپس با مدل های بزرگتر آزمایش کنید، مدل های کوچک و متوسط اغلب به خوبی کار می کنند. """)
-
-loaded_model = whisper.load_model("base")
-current_size = "None"
-
-
-@st.cache(allow_output_mutation=True)
-def change_model(current_size, size):
- if current_size != size:
- loaded_model = whisper.load_model(size)
- return loaded_model
- else:
- raise Exception("Model size is the same as the current size.")
-
-@st.cache(allow_output_mutation=True)
-def inferecence(loaded_model, uploaded_file, task):
- with open(f"{save_dir}/input.mp3", "wb") as f:
- f.write(uploaded_file.read())
- audio = ffmpeg.input(f"{save_dir}/input.mp3")
- audio = ffmpeg.output(audio, f"{save_dir}/output.wav", acodec="pcm_s16le", ac=1, ar="16k")
- ffmpeg.run(audio, overwrite_output=True)
- if task == "Transcribe":
- options = dict(task="transcribe", best_of=5)
- results = loaded_model.transcribe(f"{save_dir}/output.wav", **options)
- vtt = getSubs(results["segments"], "vtt", 80)
- srt = getSubs(results["segments"], "srt", 80)
- lang = results["language"]
- return results["text"], vtt, srt, lang
- elif task == "Translate":
- options = dict(task="translate", best_of=5)
- results = loaded_model.transcribe(f"{save_dir}/output.wav", **options)
- vtt = getSubs(results["segments"], "vtt", 80)
- srt = getSubs(results["segments"], "srt", 80)
- lang = results["language"]
- return results["text"], vtt, srt, lang
- else:
- raise ValueError("Task not supported")
-
-
-def getSubs(segments: Iterator[dict], format: str, maxLineWidth: int) -> str:
- segmentStream = StringIO()
-
- if format == 'vtt':
- write_vtt(segments, file=segmentStream, maxLineWidth=maxLineWidth)
- elif format == 'srt':
- write_srt(segments, file=segmentStream, maxLineWidth=maxLineWidth)
- else:
- raise Exception("Unknown format " + format)
-
- segmentStream.seek(0)
- return segmentStream.read()
-
-
-def main():
- size = st.selectbox("اندازه مدل را انتخاب کنید (هرچه مدل بزرگتر باشد، رونویسی دقیق تر خواهد بود، اما زمان بیشتری طول می کشد)", ["tiny", "base", "small", "medium", "large"], index=1)
- loaded_model = change_model(current_size, size)
- st.write(f"Model is {'multilingual' if loaded_model.is_multilingual else 'English-only'} "
- f"and has {sum(np.prod(p.shape) for p in loaded_model.parameters()):,} parameters.")
- input_file = st.file_uploader("Upload an audio file", type=["mp3", "wav", "m4a"])
- if input_file is not None:
- filename = input_file.name[:-4]
- else:
- filename = None
- task = st.selectbox("Select Task", ["Transcribe", "Translate"], index=0)
- if task == "Transcribe":
- if st.button("Transcribe"):
- results = inferecence(loaded_model, input_file, task)
- col3, col4 = st.columns(2)
- col5, col6, col7 = st.columns(3)
- col9, col10 = st.columns(2)
-
- with col3:
- st.audio(input_file)
-
- with open("transcript.txt", "w+", encoding='utf8') as f:
- f.writelines(results[0])
- f.close()
- with open(os.path.join(os.getcwd(), "transcript.txt"), "rb") as f:
- datatxt = f.read()
-
-
- with open("transcript.vtt", "w+",encoding='utf8') as f:
- f.writelines(results[1])
- f.close()
- with open(os.path.join(os.getcwd(), "transcript.vtt"), "rb") as f:
- datavtt = f.read()
-
- with open("transcript.srt", "w+",encoding='utf8') as f:
- f.writelines(results[2])
- f.close()
- with open(os.path.join(os.getcwd(), "transcript.srt"), "rb") as f:
- datasrt = f.read()
-
- with col5:
- st.download_button(label="Download Transcript (.txt)",
- data=datatxt,
- file_name="transcript.txt")
- with col6:
- st.download_button(label="Download Transcript (.vtt)",
- data=datavtt,
- file_name="transcript.vtt")
- with col7:
- st.download_button(label="Download Transcript (.srt)",
- data=datasrt,
- file_name="transcript.srt")
- with col9:
- st.success("You can download the transcript in .srt format, edit it (if you need to) and upload it to YouTube to create subtitles for your video.")
- with col10:
- st.info("Streamlit refreshes after the download button is clicked. The data is cached so you can download the transcript again without having to transcribe the video again.")
-
- elif task == "Translate":
- if st.button("Translate to English"):
- results = inferecence(loaded_model, input_file, task)
- col3, col4 = st.columns(2)
- col5, col6, col7 = st.columns(3)
- col9, col10 = st.columns(2)
-
- with col3:
- st.audio(input_file)
-
- with open("transcript.txt", "w+", encoding='utf8') as f:
- f.writelines(results[0])
- f.close()
- with open(os.path.join(os.getcwd(), "transcript.txt"), "rb") as f:
- datatxt = f.read()
-
-
- with open("transcript.vtt", "w+",encoding='utf8') as f:
- f.writelines(results[1])
- f.close()
- with open(os.path.join(os.getcwd(), "transcript.vtt"), "rb") as f:
- datavtt = f.read()
-
- with open("transcript.srt", "w+",encoding='utf8') as f:
- f.writelines(results[2])
- f.close()
- with open(os.path.join(os.getcwd(), "transcript.srt"), "rb") as f:
- datasrt = f.read()
-
- with col5:
- st.download_button(label="Download Transcript (.txt)",
- data=datatxt,
- file_name="transcript.txt")
- with col6:
- st.download_button(label="Download Transcript (.vtt)",
- data=datavtt,
- file_name="transcript.vtt")
- with col7:
- st.download_button(label="Download Transcript (.srt)",
- data=datasrt,
- file_name="transcript.srt")
- with col9:
- st.success("You can download the transcript in .srt format, edit it (if you need to) and upload it to YouTube to create subtitles for your video.")
- with col10:
- st.info("Streamlit refreshes after the download button is clicked. The data is cached so you can download the transcript again without having to transcribe the video again.")
-
- else:
- st.error("Please select a task.")
-
-
-if __name__ == "__main__":
- main()
- st.markdown("###### Made with :heart: by [@bugbounted](https://github.com/bugbounted)")
\ No newline at end of file
diff --git a/spaces/camenduru-com/audioldm-text-to-audio-generation/audioldm/clap/open_clip/transform.py b/spaces/camenduru-com/audioldm-text-to-audio-generation/audioldm/clap/open_clip/transform.py
deleted file mode 100644
index 77aaa722c4a5544ac50de6df35d3e922f63b111d..0000000000000000000000000000000000000000
--- a/spaces/camenduru-com/audioldm-text-to-audio-generation/audioldm/clap/open_clip/transform.py
+++ /dev/null
@@ -1,45 +0,0 @@
-from torchvision.transforms import (
- Normalize,
- Compose,
- RandomResizedCrop,
- InterpolationMode,
- ToTensor,
- Resize,
- CenterCrop,
-)
-
-
-def _convert_to_rgb(image):
- return image.convert("RGB")
-
-
-def image_transform(
- image_size: int,
- is_train: bool,
- mean=(0.48145466, 0.4578275, 0.40821073),
- std=(0.26862954, 0.26130258, 0.27577711),
-):
- normalize = Normalize(mean=mean, std=std)
- if is_train:
- return Compose(
- [
- RandomResizedCrop(
- image_size,
- scale=(0.9, 1.0),
- interpolation=InterpolationMode.BICUBIC,
- ),
- _convert_to_rgb,
- ToTensor(),
- normalize,
- ]
- )
- else:
- return Compose(
- [
- Resize(image_size, interpolation=InterpolationMode.BICUBIC),
- CenterCrop(image_size),
- _convert_to_rgb,
- ToTensor(),
- normalize,
- ]
- )
diff --git a/spaces/candlend/vits-hoshimi/sovits/train.py b/spaces/candlend/vits-hoshimi/sovits/train.py
deleted file mode 100644
index 97557410edb18717b0330c602fbaa9984f647b13..0000000000000000000000000000000000000000
--- a/spaces/candlend/vits-hoshimi/sovits/train.py
+++ /dev/null
@@ -1,281 +0,0 @@
-import logging
-logging.getLogger('matplotlib').setLevel(logging.WARNING)
-import os
-import json
-import argparse
-import itertools
-import math
-import torch
-from torch import nn, optim
-from torch.nn import functional as F
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard import SummaryWriter
-import torch.multiprocessing as mp
-import torch.distributed as dist
-from torch.nn.parallel import DistributedDataParallel as DDP
-from torch.cuda.amp import autocast, GradScaler
-
-import commons
-import utils
-from data_utils import TextAudioSpeakerLoader, EvalDataLoader
-from models import (
- SynthesizerTrn,
- MultiPeriodDiscriminator,
-)
-from losses import (
- kl_loss,
- generator_loss, discriminator_loss, feature_loss
-)
-
-from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
-
-torch.backends.cudnn.benchmark = True
-global_step = 0
-
-
-# os.environ['TORCH_DISTRIBUTED_DEBUG'] = 'INFO'
-
-
-def main():
- """Assume Single Node Multi GPUs Training Only"""
- assert torch.cuda.is_available(), "CPU training is not allowed."
- hps = utils.get_hparams()
-
- n_gpus = torch.cuda.device_count()
- os.environ['MASTER_ADDR'] = 'localhost'
- os.environ['MASTER_PORT'] = hps.train.port
-
- mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
-
-
-def run(rank, n_gpus, hps):
- global global_step
- if rank == 0:
- logger = utils.get_logger(hps.model_dir)
- logger.info(hps)
- utils.check_git_hash(hps.model_dir)
- writer = SummaryWriter(log_dir=hps.model_dir)
- writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
-
- dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank)
- torch.manual_seed(hps.train.seed)
- torch.cuda.set_device(rank)
-
- train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps)
- train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True,
- batch_size=hps.train.batch_size)
- if rank == 0:
- eval_dataset = EvalDataLoader(hps.data.validation_files, hps)
- eval_loader = DataLoader(eval_dataset, num_workers=1, shuffle=False,
- batch_size=1, pin_memory=False,
- drop_last=False)
-
- net_g = SynthesizerTrn(
- hps.data.filter_length // 2 + 1,
- hps.train.segment_size // hps.data.hop_length,
- **hps.model).cuda(rank)
- net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
- optim_g = torch.optim.AdamW(
- net_g.parameters(),
- hps.train.learning_rate,
- betas=hps.train.betas,
- eps=hps.train.eps)
- optim_d = torch.optim.AdamW(
- net_d.parameters(),
- hps.train.learning_rate,
- betas=hps.train.betas,
- eps=hps.train.eps)
- net_g = DDP(net_g, device_ids=[rank]) # , find_unused_parameters=True)
- net_d = DDP(net_d, device_ids=[rank])
-
- try:
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g,
- optim_g)
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d,
- optim_d)
- global_step = (epoch_str - 1) * len(train_loader)
- except:
- epoch_str = 1
- global_step = 0
-
- scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
- scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
-
- scaler = GradScaler(enabled=hps.train.fp16_run)
-
- for epoch in range(epoch_str, hps.train.epochs + 1):
- if rank == 0:
- train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler,
- [train_loader, eval_loader], logger, [writer, writer_eval])
- else:
- train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler,
- [train_loader, None], None, None)
- scheduler_g.step()
- scheduler_d.step()
-
-
-def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
- net_g, net_d = nets
- optim_g, optim_d = optims
- scheduler_g, scheduler_d = schedulers
- train_loader, eval_loader = loaders
- if writers is not None:
- writer, writer_eval = writers
-
- # train_loader.batch_sampler.set_epoch(epoch)
- global global_step
-
- net_g.train()
- net_d.train()
- for batch_idx, items in enumerate(train_loader):
- c, f0, spec, y, spk = items
- g = spk.cuda(rank, non_blocking=True)
- spec, y = spec.cuda(rank, non_blocking=True), y.cuda(rank, non_blocking=True)
- c = c.cuda(rank, non_blocking=True)
- f0 = f0.cuda(rank, non_blocking=True)
- mel = spec_to_mel_torch(
- spec,
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.mel_fmin,
- hps.data.mel_fmax)
-
- with autocast(enabled=hps.train.fp16_run):
- y_hat, ids_slice, z_mask, \
- (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(c, f0, spec, g=g, mel=mel)
-
- y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
- y_hat_mel = mel_spectrogram_torch(
- y_hat.squeeze(1),
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.hop_length,
- hps.data.win_length,
- hps.data.mel_fmin,
- hps.data.mel_fmax
- )
- y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
-
- # Discriminator
- y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
-
- with autocast(enabled=False):
- loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
- loss_disc_all = loss_disc
-
- optim_d.zero_grad()
- scaler.scale(loss_disc_all).backward()
- scaler.unscale_(optim_d)
- grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
- scaler.step(optim_d)
-
- with autocast(enabled=hps.train.fp16_run):
- # Generator
- y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
- with autocast(enabled=False):
- loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
- loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
- loss_fm = feature_loss(fmap_r, fmap_g)
- loss_gen, losses_gen = generator_loss(y_d_hat_g)
- loss_gen_all = loss_gen + loss_fm + loss_mel + loss_kl
- optim_g.zero_grad()
- scaler.scale(loss_gen_all).backward()
- scaler.unscale_(optim_g)
- grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
- scaler.step(optim_g)
- scaler.update()
-
- if rank == 0:
- if global_step % hps.train.log_interval == 0:
- lr = optim_g.param_groups[0]['lr']
- losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_kl]
- logger.info('Train Epoch: {} [{:.0f}%]'.format(
- epoch,
- 100. * batch_idx / len(train_loader)))
- logger.info([x.item() for x in losses] + [global_step, lr])
-
- scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr,
- "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g}
- scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/kl": loss_kl})
-
- scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
- scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
- scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
- image_dict = {
- "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
- "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
- "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
- }
-
- utils.summarize(
- writer=writer,
- global_step=global_step,
- images=image_dict,
- scalars=scalar_dict
- )
-
- if global_step % hps.train.eval_interval == 0:
- evaluate(hps, net_g, eval_loader, writer_eval)
- utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch,
- os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
- utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch,
- os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
- global_step += 1
-
- if rank == 0:
- logger.info('====> Epoch: {}'.format(epoch))
-
-
-def evaluate(hps, generator, eval_loader, writer_eval):
- generator.eval()
- image_dict = {}
- audio_dict = {}
- with torch.no_grad():
- for batch_idx, items in enumerate(eval_loader):
- c, f0, spec, y, spk = items
- g = spk[:1].cuda(0)
- spec, y = spec[:1].cuda(0), y[:1].cuda(0)
- c = c[:1].cuda(0)
- f0 = f0[:1].cuda(0)
- mel = spec_to_mel_torch(
- spec,
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.mel_fmin,
- hps.data.mel_fmax)
- y_hat = generator.module.infer(c, f0, g=g, mel=mel)
-
- y_hat_mel = mel_spectrogram_torch(
- y_hat.squeeze(1).float(),
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.hop_length,
- hps.data.win_length,
- hps.data.mel_fmin,
- hps.data.mel_fmax
- )
-
- audio_dict.update({
- f"gen/audio_{batch_idx}": y_hat[0],
- f"gt/audio_{batch_idx}": y[0]
- })
- image_dict.update({
- f"gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()),
- "gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())
- })
- utils.summarize(
- writer=writer_eval,
- global_step=global_step,
- images=image_dict,
- audios=audio_dict,
- audio_sampling_rate=hps.data.sampling_rate
- )
- generator.train()
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/ccolas/TastyPiano/src/music/representation_learning/sentence_transfo/sentence_transformers/util.py b/spaces/ccolas/TastyPiano/src/music/representation_learning/sentence_transfo/sentence_transformers/util.py
deleted file mode 100644
index 392e64fc86adbd6cd6e1d53af8ac6681d699e1f8..0000000000000000000000000000000000000000
--- a/spaces/ccolas/TastyPiano/src/music/representation_learning/sentence_transfo/sentence_transformers/util.py
+++ /dev/null
@@ -1,525 +0,0 @@
-import requests
-import torch
-from torch import Tensor, device
-from typing import List, Callable
-from tqdm.autonotebook import tqdm
-import sys
-import importlib
-import os
-import torch
-import numpy as np
-import queue
-import logging
-
-
-logger = logging.getLogger(__name__)
-
-def pytorch_cos_sim(a: Tensor, b: Tensor):
- """
- Computes the cosine similarity cos_sim(a[i], b[j]) for all i and j.
- :return: Matrix with res[i][j] = cos_sim(a[i], b[j])
- """
- return cos_sim(a, b)
-
-def cos_sim(a: Tensor, b: Tensor):
- """
- Computes the cosine similarity cos_sim(a[i], b[j]) for all i and j.
- :return: Matrix with res[i][j] = cos_sim(a[i], b[j])
- """
- if not isinstance(a, torch.Tensor):
- a = torch.tensor(a)
-
- if not isinstance(b, torch.Tensor):
- b = torch.tensor(b)
-
- if len(a.shape) == 1:
- a = a.unsqueeze(0)
-
- if len(b.shape) == 1:
- b = b.unsqueeze(0)
-
- a_norm = torch.nn.functional.normalize(a, p=2, dim=1)
- b_norm = torch.nn.functional.normalize(b, p=2, dim=1)
- return torch.mm(a_norm, b_norm.transpose(0, 1))
-
-
-def dot_score(a: Tensor, b: Tensor):
- """
- Computes the dot-product dot_prod(a[i], b[j]) for all i and j.
- :return: Matrix with res[i][j] = dot_prod(a[i], b[j])
- """
- if not isinstance(a, torch.Tensor):
- a = torch.tensor(a)
-
- if not isinstance(b, torch.Tensor):
- b = torch.tensor(b)
-
- if len(a.shape) == 1:
- a = a.unsqueeze(0)
-
- if len(b.shape) == 1:
- b = b.unsqueeze(0)
-
- return torch.mm(a, b.transpose(0, 1))
-
-
-def pairwise_dot_score(a: Tensor, b: Tensor):
- """
- Computes the pairwise dot-product dot_prod(a[i], b[i])
- :return: Vector with res[i] = dot_prod(a[i], b[i])
- """
- if not isinstance(a, torch.Tensor):
- a = torch.tensor(a)
-
- if not isinstance(b, torch.Tensor):
- b = torch.tensor(b)
-
- return (a * b).sum(dim=-1)
-
-
-def pairwise_cos_sim(a: Tensor, b: Tensor):
- """
- Computes the pairwise cossim cos_sim(a[i], b[i])
- :return: Vector with res[i] = cos_sim(a[i], b[i])
- """
- if not isinstance(a, torch.Tensor):
- a = torch.tensor(a)
-
- if not isinstance(b, torch.Tensor):
- b = torch.tensor(b)
-
- return pairwise_dot_score(normalize_embeddings(a), normalize_embeddings(b))
-
-
-def normalize_embeddings(embeddings: Tensor):
- """
- Normalizes the embeddings matrix, so that each sentence embedding has unit length
- """
- return torch.nn.functional.normalize(embeddings, p=2, dim=1)
-
-
-def paraphrase_mining(model,
- sentences: List[str],
- show_progress_bar: bool = False,
- batch_size:int = 32,
- *args,
- **kwargs):
- """
- Given a list of sentences / texts, this function performs paraphrase mining. It compares all sentences against all
- other sentences and returns a list with the pairs that have the highest cosine similarity score.
-
- :param model: SentenceTransformer model for embedding computation
- :param sentences: A list of strings (texts or sentences)
- :param show_progress_bar: Plotting of a progress bar
- :param batch_size: Number of texts that are encoded simultaneously by the model
- :param query_chunk_size: Search for most similar pairs for #query_chunk_size at the same time. Decrease, to lower memory footprint (increases run-time).
- :param corpus_chunk_size: Compare a sentence simultaneously against #corpus_chunk_size other sentences. Decrease, to lower memory footprint (increases run-time).
- :param max_pairs: Maximal number of text pairs returned.
- :param top_k: For each sentence, we retrieve up to top_k other sentences
- :param score_function: Function for computing scores. By default, cosine similarity.
- :return: Returns a list of triplets with the format [score, id1, id2]
- """
-
- # Compute embedding for the sentences
- embeddings = model.encode(sentences, show_progress_bar=show_progress_bar, batch_size=batch_size, convert_to_tensor=True)
-
- return paraphrase_mining_embeddings(embeddings, *args, **kwargs)
-
-
-def paraphrase_mining_embeddings(embeddings: Tensor,
- query_chunk_size: int = 5000,
- corpus_chunk_size: int = 100000,
- max_pairs: int = 500000,
- top_k: int = 100,
- score_function: Callable[[Tensor, Tensor], Tensor] = cos_sim):
- """
- Given a list of sentences / texts, this function performs paraphrase mining. It compares all sentences against all
- other sentences and returns a list with the pairs that have the highest cosine similarity score.
-
- :param embeddings: A tensor with the embeddings
- :param query_chunk_size: Search for most similar pairs for #query_chunk_size at the same time. Decrease, to lower memory footprint (increases run-time).
- :param corpus_chunk_size: Compare a sentence simultaneously against #corpus_chunk_size other sentences. Decrease, to lower memory footprint (increases run-time).
- :param max_pairs: Maximal number of text pairs returned.
- :param top_k: For each sentence, we retrieve up to top_k other sentences
- :param score_function: Function for computing scores. By default, cosine similarity.
- :return: Returns a list of triplets with the format [score, id1, id2]
- """
-
- top_k += 1 # A sentence has the highest similarity to itself. Increase +1 as we are interest in distinct pairs
-
- # Mine for duplicates
- pairs = queue.PriorityQueue()
- min_score = -1
- num_added = 0
-
- for corpus_start_idx in range(0, len(embeddings), corpus_chunk_size):
- for query_start_idx in range(0, len(embeddings), query_chunk_size):
- scores = score_function(embeddings[query_start_idx:query_start_idx+query_chunk_size], embeddings[corpus_start_idx:corpus_start_idx+corpus_chunk_size])
-
- scores_top_k_values, scores_top_k_idx = torch.topk(scores, min(top_k, len(scores[0])), dim=1, largest=True, sorted=False)
- scores_top_k_values = scores_top_k_values.cpu().tolist()
- scores_top_k_idx = scores_top_k_idx.cpu().tolist()
-
- for query_itr in range(len(scores)):
- for top_k_idx, corpus_itr in enumerate(scores_top_k_idx[query_itr]):
- i = query_start_idx + query_itr
- j = corpus_start_idx + corpus_itr
-
- if i != j and scores_top_k_values[query_itr][top_k_idx] > min_score:
- pairs.put((scores_top_k_values[query_itr][top_k_idx], i, j))
- num_added += 1
-
- if num_added >= max_pairs:
- entry = pairs.get()
- min_score = entry[0]
-
- # Get the pairs
- added_pairs = set() # Used for duplicate detection
- pairs_list = []
- while not pairs.empty():
- score, i, j = pairs.get()
- sorted_i, sorted_j = sorted([i, j])
-
- if sorted_i != sorted_j and (sorted_i, sorted_j) not in added_pairs:
- added_pairs.add((sorted_i, sorted_j))
- pairs_list.append([score, i, j])
-
- # Highest scores first
- pairs_list = sorted(pairs_list, key=lambda x: x[0], reverse=True)
- return pairs_list
-
-
-def information_retrieval(*args, **kwargs):
- """This function is deprecated. Use semantic_search instead"""
- return semantic_search(*args, **kwargs)
-
-
-def semantic_search(query_embeddings: Tensor,
- corpus_embeddings: Tensor,
- query_chunk_size: int = 100,
- corpus_chunk_size: int = 500000,
- top_k: int = 10,
- score_function: Callable[[Tensor, Tensor], Tensor] = cos_sim):
- """
- This function performs a cosine similarity search between a list of query embeddings and a list of corpus embeddings.
- It can be used for Information Retrieval / Semantic Search for corpora up to about 1 Million entries.
-
- :param query_embeddings: A 2 dimensional tensor with the query embeddings.
- :param corpus_embeddings: A 2 dimensional tensor with the corpus embeddings.
- :param query_chunk_size: Process 100 queries simultaneously. Increasing that value increases the speed, but requires more memory.
- :param corpus_chunk_size: Scans the corpus 100k entries at a time. Increasing that value increases the speed, but requires more memory.
- :param top_k: Retrieve top k matching entries.
- :param score_function: Function for computing scores. By default, cosine similarity.
- :return: Returns a list with one entry for each query. Each entry is a list of dictionaries with the keys 'corpus_id' and 'score', sorted by decreasing cosine similarity scores.
- """
-
- if isinstance(query_embeddings, (np.ndarray, np.generic)):
- query_embeddings = torch.from_numpy(query_embeddings)
- elif isinstance(query_embeddings, list):
- query_embeddings = torch.stack(query_embeddings)
-
- if len(query_embeddings.shape) == 1:
- query_embeddings = query_embeddings.unsqueeze(0)
-
- if isinstance(corpus_embeddings, (np.ndarray, np.generic)):
- corpus_embeddings = torch.from_numpy(corpus_embeddings)
- elif isinstance(corpus_embeddings, list):
- corpus_embeddings = torch.stack(corpus_embeddings)
-
-
- #Check that corpus and queries are on the same device
- if corpus_embeddings.device != query_embeddings.device:
- query_embeddings = query_embeddings.to(corpus_embeddings.device)
-
- queries_result_list = [[] for _ in range(len(query_embeddings))]
-
- for query_start_idx in range(0, len(query_embeddings), query_chunk_size):
- # Iterate over chunks of the corpus
- for corpus_start_idx in range(0, len(corpus_embeddings), corpus_chunk_size):
- # Compute cosine similarities
- cos_scores = score_function(query_embeddings[query_start_idx:query_start_idx+query_chunk_size], corpus_embeddings[corpus_start_idx:corpus_start_idx+corpus_chunk_size])
-
- # Get top-k scores
- cos_scores_top_k_values, cos_scores_top_k_idx = torch.topk(cos_scores, min(top_k, len(cos_scores[0])), dim=1, largest=True, sorted=False)
- cos_scores_top_k_values = cos_scores_top_k_values.cpu().tolist()
- cos_scores_top_k_idx = cos_scores_top_k_idx.cpu().tolist()
-
- for query_itr in range(len(cos_scores)):
- for sub_corpus_id, score in zip(cos_scores_top_k_idx[query_itr], cos_scores_top_k_values[query_itr]):
- corpus_id = corpus_start_idx + sub_corpus_id
- query_id = query_start_idx + query_itr
- queries_result_list[query_id].append({'corpus_id': corpus_id, 'score': score})
-
- #Sort and strip to top_k results
- for idx in range(len(queries_result_list)):
- queries_result_list[idx] = sorted(queries_result_list[idx], key=lambda x: x['score'], reverse=True)
- queries_result_list[idx] = queries_result_list[idx][0:top_k]
-
- return queries_result_list
-
-
-def http_get(url, path):
- """
- Downloads a URL to a given path on disc
- """
- if os.path.dirname(path) != '':
- os.makedirs(os.path.dirname(path), exist_ok=True)
-
- req = requests.get(url, stream=True)
- if req.status_code != 200:
- print("Exception when trying to download {}. Response {}".format(url, req.status_code), file=sys.stderr)
- req.raise_for_status()
- return
-
- download_filepath = path+"_part"
- with open(download_filepath, "wb") as file_binary:
- content_length = req.headers.get('Content-Length')
- total = int(content_length) if content_length is not None else None
- progress = tqdm(unit="B", total=total, unit_scale=True)
- for chunk in req.iter_content(chunk_size=1024):
- if chunk: # filter out keep-alive new chunks
- progress.update(len(chunk))
- file_binary.write(chunk)
-
- os.rename(download_filepath, path)
- progress.close()
-
-
-def batch_to_device(batch, target_device: device):
- """
- send a pytorch batch to a device (CPU/GPU)
- """
- for key in batch:
- if isinstance(batch[key], Tensor):
- batch[key] = batch[key].to(target_device)
- return batch
-
-
-# from https://github.com/vlkit/vlkit/blob/master/vlkit/ops/distributed.py
-class AllGather(torch.autograd.Function):
- """
- all_gather with gradient back-propagation
- """
- @staticmethod
- def forward(ctx, tensor_list, tensor, group, async_op):
- torch.distributed.all_gather(tensor_list, tensor, group=group, async_op=async_op)
- return tuple(tensor_list)
-
- @staticmethod
- def backward(ctx, *grad_list):
- grad_list = list(grad_list)
- rank = torch.distributed.get_rank()
-
- dist_ops = [
- torch.distributed.reduce(grad_list[i], i, async_op=True) for i in range(torch.distributed.get_world_size())
- ]
-
- for op in dist_ops:
- op.wait()
-
- return None, grad_list[rank], None, None
-
-
-all_gather_with_grad = AllGather.apply
-
-
-def mismatched_sizes_all_gather(tensor: Tensor, group=None, async_op=False, mismatched_axis=0):
- # all_gather doesn't support tensor lists where the first dimension is mismatched. This does.
- assert torch.distributed.is_initialized(), "torch.distributed not initialized"
- world_size = torch.distributed.get_world_size()
- # let's get the sizes for everyone
- mismatched_sizes = torch.tensor([tensor.shape[mismatched_axis]], dtype=torch.int64, device="cuda")
- sizes = [torch.zeros_like(mismatched_sizes) for _ in range(world_size)]
- torch.distributed.all_gather(sizes, mismatched_sizes, group=group, async_op=async_op)
- sizes = torch.cat(sizes).cpu().tolist()
- # now pad to the max dim-0 size
- max_size = max(sizes)
- padded = torch.zeros((*tensor.shape[:mismatched_axis], max_size, *tensor.shape[mismatched_axis+1:]),
- device=tensor.device, dtype=tensor.dtype)
- # selects the place where we're adding information
- padded_to_fill = padded.narrow(mismatched_axis, 0, tensor.shape[mismatched_axis])
- padded_to_fill[...] = tensor
- # gather the padded tensors
- tensor_list = [torch.zeros(padded.shape, device=padded.device, dtype=padded.dtype) for _ in range(world_size)]
- all_gather_with_grad(tensor_list, padded, group, async_op)
- # trim off the padding
- for rank in range(world_size):
- # checks that the rest is 0
- assert not tensor_list[rank].narrow(mismatched_axis, sizes[rank], padded.shape[mismatched_axis]-sizes[rank]).count_nonzero().is_nonzero(), \
- "This would remove non-padding information"
- tensor_list[rank] = tensor_list[rank].narrow(mismatched_axis, 0, sizes[rank])
- return tensor_list
-
-
-def fullname(o):
- """
- Gives a full name (package_name.class_name) for a class / object in Python. Will
- be used to load the correct classes from JSON files
- """
-
- module = o.__class__.__module__
- if module is None or module == str.__class__.__module__:
- return o.__class__.__name__ # Avoid reporting __builtin__
- else:
- return module + '.' + o.__class__.__name__
-
-def import_from_string(dotted_path):
- """
- Import a dotted module path and return the attribute/class designated by the
- last name in the path. Raise ImportError if the import failed.
- """
- try:
- module_path, class_name = dotted_path.rsplit('.', 1)
- except ValueError:
- msg = "%s doesn't look like a module path" % dotted_path
- raise ImportError(msg)
-
- try:
- module = importlib.import_module(dotted_path)
- except:
- module = importlib.import_module(module_path)
-
- try:
- return getattr(module, class_name)
- except AttributeError:
- msg = 'Module "%s" does not define a "%s" attribute/class' % (module_path, class_name)
- raise ImportError(msg)
-
-
-def community_detection(embeddings, threshold=0.75, min_community_size=10, init_max_size=1000):
- """
- Function for Fast Community Detection
-
- Finds in the embeddings all communities, i.e. embeddings that are close (closer than threshold).
-
- Returns only communities that are larger than min_community_size. The communities are returned
- in decreasing order. The first element in each list is the central point in the community.
- """
-
- # Maximum size for community
- init_max_size = min(init_max_size, len(embeddings))
-
- # Compute cosine similarity scores
- cos_scores = cos_sim(embeddings, embeddings)
-
- # Minimum size for a community
- top_k_values, _ = cos_scores.topk(k=min_community_size, largest=True)
-
- # Filter for rows >= min_threshold
- extracted_communities = []
- for i in range(len(top_k_values)):
- if top_k_values[i][-1] >= threshold:
- new_cluster = []
-
- # Only check top k most similar entries
- top_val_large, top_idx_large = cos_scores[i].topk(k=init_max_size, largest=True)
- top_idx_large = top_idx_large.tolist()
- top_val_large = top_val_large.tolist()
-
- if top_val_large[-1] < threshold:
- for idx, val in zip(top_idx_large, top_val_large):
- if val < threshold:
- break
-
- new_cluster.append(idx)
- else:
- # Iterate over all entries (slow)
- for idx, val in enumerate(cos_scores[i].tolist()):
- if val >= threshold:
- new_cluster.append(idx)
-
- extracted_communities.append(new_cluster)
-
- # Largest cluster first
- extracted_communities = sorted(extracted_communities, key=lambda x: len(x), reverse=True)
-
- # Step 2) Remove overlapping communities
- unique_communities = []
- extracted_ids = set()
-
- for community in extracted_communities:
- add_cluster = True
- for idx in community:
- if idx in extracted_ids:
- add_cluster = False
- break
-
- if add_cluster:
- unique_communities.append(community)
- for idx in community:
- extracted_ids.add(idx)
-
- return unique_communities
-
-
-##################
-#
-######################
-
-from typing import Dict, Optional, Union
-from pathlib import Path
-from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
-from huggingface_hub import HfApi, hf_hub_url, cached_download
-# from huggingface_hub.snapshot_download import REPO_ID_SEPARATOR
-import fnmatch
-
-def snapshot_download(
- repo_id: str,
- revision: Optional[str] = None,
- cache_dir: Union[str, Path, None] = None,
- library_name: Optional[str] = None,
- library_version: Optional[str] = None,
- user_agent: Union[Dict, str, None] = None,
- ignore_files: Optional[List[str]] = None
-) -> str:
- """
- Method derived from huggingface_hub.
- Adds a new parameters 'ignore_files', which allows to ignore certain files / file-patterns
- """
- if cache_dir is None:
- cache_dir = HUGGINGFACE_HUB_CACHE
- if isinstance(cache_dir, Path):
- cache_dir = str(cache_dir)
-
- _api = HfApi()
- model_info = _api.model_info(repo_id=repo_id, revision=revision)
-
- storage_folder = os.path.join(
- cache_dir, repo_id.replace("/", "_")
- )
-
- for model_file in model_info.siblings:
- if ignore_files is not None:
- skip_download = False
- for pattern in ignore_files:
- if fnmatch.fnmatch(model_file.rfilename, pattern):
- skip_download = True
- break
-
- if skip_download:
- continue
-
- url = hf_hub_url(
- repo_id, filename=model_file.rfilename, revision=model_info.sha
- )
- relative_filepath = os.path.join(*model_file.rfilename.split("/"))
-
- # Create potential nested dir
- nested_dirname = os.path.dirname(
- os.path.join(storage_folder, relative_filepath)
- )
- os.makedirs(nested_dirname, exist_ok=True)
-
- path = cached_download(
- url,
- cache_dir=storage_folder,
- force_filename=relative_filepath,
- library_name=library_name,
- library_version=library_version,
- user_agent=user_agent,
- )
-
- if os.path.exists(path + ".lock"):
- os.remove(path + ".lock")
-
- return storage_folder
diff --git a/spaces/chaowei100/ChatGPT_Taiyi-Stable-Diffusion/funtional_picture.py b/spaces/chaowei100/ChatGPT_Taiyi-Stable-Diffusion/funtional_picture.py
deleted file mode 100644
index 074b28407e6a704798f6132a8d53254937108b96..0000000000000000000000000000000000000000
--- a/spaces/chaowei100/ChatGPT_Taiyi-Stable-Diffusion/funtional_picture.py
+++ /dev/null
@@ -1,22 +0,0 @@
-
-#GAN模型
-from PIL import Image
-import torch
-from diffusers import (
- StableDiffusionPipeline,
- StableDiffusionImg2ImgPipeline,
- StableDiffusionInpaintPipeline,
-)
-device="cuda"
-model_id = "IDEA-CCNL/Taiyi-Stable-Diffusion-1B-Chinese-v0.1"
-
-pipe_text2img = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32)
-pipe_img2img = StableDiffusionImg2ImgPipeline(**pipe_text2img.components)
-def infer_text2img(prompt, guide, steps, width, height, image_in, strength):
- if image_in is not None:
- init_image = image_in.convert("RGB").resize((width, height))
- output = pipe_img2img(prompt, image=init_image, strength=strength, guidance_scale=guide, num_inference_steps=steps)
- else:
- output = pipe_text2img(prompt, width=width, height=height, guidance_scale=guide, num_inference_steps=steps,)
- image = output.images[0]
- return image
\ No newline at end of file
diff --git a/spaces/chendl/compositional_test/transformers/examples/legacy/run_camembert.py b/spaces/chendl/compositional_test/transformers/examples/legacy/run_camembert.py
deleted file mode 100644
index 9651570b39e1e8c02373cd444578179eb635593b..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/examples/legacy/run_camembert.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python
-import torch
-
-from transformers import CamembertForMaskedLM, CamembertTokenizer
-
-
-def fill_mask(masked_input, model, tokenizer, topk=5):
- # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
- assert masked_input.count("") == 1
- input_ids = torch.tensor(tokenizer.encode(masked_input, add_special_tokens=True)).unsqueeze(0) # Batch size 1
- logits = model(input_ids)[0] # The last hidden-state is the first element of the output tuple
- masked_index = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
- logits = logits[0, masked_index, :]
- prob = logits.softmax(dim=0)
- values, indices = prob.topk(k=topk, dim=0)
- topk_predicted_token_bpe = " ".join(
- [tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(indices))]
- )
- masked_token = tokenizer.mask_token
- topk_filled_outputs = []
- for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" ")):
- predicted_token = predicted_token_bpe.replace("\u2581", " ")
- if " {0}".format(masked_token) in masked_input:
- topk_filled_outputs.append(
- (
- masked_input.replace(" {0}".format(masked_token), predicted_token),
- values[index].item(),
- predicted_token,
- )
- )
- else:
- topk_filled_outputs.append(
- (
- masked_input.replace(masked_token, predicted_token),
- values[index].item(),
- predicted_token,
- )
- )
- return topk_filled_outputs
-
-
-tokenizer = CamembertTokenizer.from_pretrained("camembert-base")
-model = CamembertForMaskedLM.from_pretrained("camembert-base")
-model.eval()
-
-masked_input = "Le camembert est :)"
-print(fill_mask(masked_input, model, tokenizer, topk=3))
diff --git a/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/save_len_file.py b/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/save_len_file.py
deleted file mode 100644
index 9e73b59e7e5a2b0a480779db987464f8b8320cee..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/save_len_file.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2020 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import fire
-from torch.utils.data import DataLoader
-from tqdm import tqdm
-
-from transformers import AutoTokenizer
-from utils import Seq2SeqDataset, pickle_save
-
-
-def save_len_file(
- tokenizer_name, data_dir, max_source_length=1024, max_target_length=1024, consider_target=False, **kwargs
-):
- """Save max(src_len, tgt_len) for each example to allow dynamic batching."""
- tok = AutoTokenizer.from_pretrained(tokenizer_name)
- train_ds = Seq2SeqDataset(tok, data_dir, max_source_length, max_target_length, type_path="train", **kwargs)
- pad = tok.pad_token_id
-
- def get_lens(ds):
- dl = tqdm(
- DataLoader(ds, batch_size=512, num_workers=8, shuffle=False, collate_fn=ds.collate_fn),
- desc=str(ds.len_file),
- )
- max_lens = []
- for batch in dl:
- src_lens = batch["input_ids"].ne(pad).sum(1).tolist()
- tgt_lens = batch["labels"].ne(pad).sum(1).tolist()
- if consider_target:
- for src, tgt in zip(src_lens, tgt_lens):
- max_lens.append(max(src, tgt))
- else:
- max_lens.extend(src_lens)
- return max_lens
-
- train_lens = get_lens(train_ds)
- val_ds = Seq2SeqDataset(tok, data_dir, max_source_length, max_target_length, type_path="val", **kwargs)
- val_lens = get_lens(val_ds)
- pickle_save(train_lens, train_ds.len_file)
- pickle_save(val_lens, val_ds.len_file)
-
-
-if __name__ == "__main__":
- fire.Fire(save_len_file)
diff --git a/spaces/chendl/compositional_test/transformers/examples/pytorch/language-modeling/run_clm.py b/spaces/chendl/compositional_test/transformers/examples/pytorch/language-modeling/run_clm.py
deleted file mode 100644
index 9941e2f21be2b7d31d69eca0ef17067253c59392..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/examples/pytorch/language-modeling/run_clm.py
+++ /dev/null
@@ -1,635 +0,0 @@
-#!/usr/bin/env python
-# coding=utf-8
-# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset.
-
-Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
-https://huggingface.co/models?filter=text-generation
-"""
-# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.
-
-import logging
-import math
-import os
-import sys
-from dataclasses import dataclass, field
-from itertools import chain
-from typing import Optional
-
-import datasets
-import evaluate
-import torch
-from datasets import load_dataset
-
-import transformers
-from transformers import (
- CONFIG_MAPPING,
- MODEL_FOR_CAUSAL_LM_MAPPING,
- AutoConfig,
- AutoModelForCausalLM,
- AutoTokenizer,
- HfArgumentParser,
- Trainer,
- TrainingArguments,
- default_data_collator,
- is_torch_tpu_available,
- set_seed,
-)
-from transformers.testing_utils import CaptureLogger
-from transformers.trainer_utils import get_last_checkpoint
-from transformers.utils import check_min_version, send_example_telemetry
-from transformers.utils.versions import require_version
-
-
-# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
-check_min_version("4.28.0")
-
-require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
-
-logger = logging.getLogger(__name__)
-
-
-MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys())
-MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
-
-
-@dataclass
-class ModelArguments:
- """
- Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
- """
-
- model_name_or_path: Optional[str] = field(
- default=None,
- metadata={
- "help": (
- "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
- )
- },
- )
- model_type: Optional[str] = field(
- default=None,
- metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
- )
- config_overrides: Optional[str] = field(
- default=None,
- metadata={
- "help": (
- "Override some existing default config settings when a model is trained from scratch. Example: "
- "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
- )
- },
- )
- config_name: Optional[str] = field(
- default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
- )
- tokenizer_name: Optional[str] = field(
- default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
- )
- cache_dir: Optional[str] = field(
- default=None,
- metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
- )
- use_fast_tokenizer: bool = field(
- default=True,
- metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
- )
- model_revision: str = field(
- default="main",
- metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
- )
- use_auth_token: bool = field(
- default=False,
- metadata={
- "help": (
- "Will use the token generated when running `huggingface-cli login` (necessary to use this script "
- "with private models)."
- )
- },
- )
- torch_dtype: Optional[str] = field(
- default=None,
- metadata={
- "help": (
- "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the "
- "dtype will be automatically derived from the model's weights."
- ),
- "choices": ["auto", "bfloat16", "float16", "float32"],
- },
- )
- low_cpu_mem_usage: bool = field(
- default=False,
- metadata={
- "help": (
- "It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded."
- "set True will benefit LLM loading time and RAM consumption."
- )
- },
- )
-
- def __post_init__(self):
- if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
- raise ValueError(
- "--config_overrides can't be used in combination with --config_name or --model_name_or_path"
- )
-
-
-@dataclass
-class DataTrainingArguments:
- """
- Arguments pertaining to what data we are going to input our model for training and eval.
- """
-
- dataset_name: Optional[str] = field(
- default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
- )
- dataset_config_name: Optional[str] = field(
- default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
- )
- train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
- validation_file: Optional[str] = field(
- default=None,
- metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
- )
- max_train_samples: Optional[int] = field(
- default=None,
- metadata={
- "help": (
- "For debugging purposes or quicker training, truncate the number of training examples to this "
- "value if set."
- )
- },
- )
- max_eval_samples: Optional[int] = field(
- default=None,
- metadata={
- "help": (
- "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
- "value if set."
- )
- },
- )
- streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"})
- block_size: Optional[int] = field(
- default=None,
- metadata={
- "help": (
- "Optional input sequence length after tokenization. "
- "The training dataset will be truncated in block of this size for training. "
- "Default to the model max input length for single sentence inputs (take into account special tokens)."
- )
- },
- )
- overwrite_cache: bool = field(
- default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
- )
- validation_split_percentage: Optional[int] = field(
- default=5,
- metadata={
- "help": "The percentage of the train set used as validation set in case there's no validation split"
- },
- )
- preprocessing_num_workers: Optional[int] = field(
- default=None,
- metadata={"help": "The number of processes to use for the preprocessing."},
- )
- keep_linebreaks: bool = field(
- default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."}
- )
-
- def __post_init__(self):
- if self.streaming:
- require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`")
-
- if self.dataset_name is None and self.train_file is None and self.validation_file is None:
- raise ValueError("Need either a dataset name or a training/validation file.")
- else:
- if self.train_file is not None:
- extension = self.train_file.split(".")[-1]
- assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
- if self.validation_file is not None:
- extension = self.validation_file.split(".")[-1]
- assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
-
-
-def main():
- # See all possible arguments in src/transformers/training_args.py
- # or by passing the --help flag to this script.
- # We now keep distinct sets of args, for a cleaner separation of concerns.
-
- parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
- if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
- # If we pass only one argument to the script and it's the path to a json file,
- # let's parse it to get our arguments.
- model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
- else:
- model_args, data_args, training_args = parser.parse_args_into_dataclasses()
-
- # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
- # information sent is the one passed as arguments along with your Python/PyTorch versions.
- send_example_telemetry("run_clm", model_args, data_args)
-
- # Setup logging
- logging.basicConfig(
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
- datefmt="%m/%d/%Y %H:%M:%S",
- handlers=[logging.StreamHandler(sys.stdout)],
- )
-
- if training_args.should_log:
- # The default of training_args.log_level is passive, so we set log level at info here to have that default.
- transformers.utils.logging.set_verbosity_info()
-
- log_level = training_args.get_process_log_level()
- logger.setLevel(log_level)
- datasets.utils.logging.set_verbosity(log_level)
- transformers.utils.logging.set_verbosity(log_level)
- transformers.utils.logging.enable_default_handler()
- transformers.utils.logging.enable_explicit_format()
-
- # Log on each process the small summary:
- logger.warning(
- f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
- + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
- )
- logger.info(f"Training/evaluation parameters {training_args}")
-
- # Detecting last checkpoint.
- last_checkpoint = None
- if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
- last_checkpoint = get_last_checkpoint(training_args.output_dir)
- if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
- raise ValueError(
- f"Output directory ({training_args.output_dir}) already exists and is not empty. "
- "Use --overwrite_output_dir to overcome."
- )
- elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
- logger.info(
- f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
- "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
- )
-
- # Set seed before initializing model.
- set_seed(training_args.seed)
-
- # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
- # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
- # (the dataset will be downloaded automatically from the datasets Hub).
- #
- # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
- # 'text' is found. You can easily tweak this behavior (see below).
- #
- # In distributed training, the load_dataset function guarantee that only one local process can concurrently
- # download the dataset.
- if data_args.dataset_name is not None:
- # Downloading and loading a dataset from the hub.
- raw_datasets = load_dataset(
- data_args.dataset_name,
- data_args.dataset_config_name,
- cache_dir=model_args.cache_dir,
- use_auth_token=True if model_args.use_auth_token else None,
- streaming=data_args.streaming,
- )
- if "validation" not in raw_datasets.keys():
- raw_datasets["validation"] = load_dataset(
- data_args.dataset_name,
- data_args.dataset_config_name,
- split=f"train[:{data_args.validation_split_percentage}%]",
- cache_dir=model_args.cache_dir,
- use_auth_token=True if model_args.use_auth_token else None,
- streaming=data_args.streaming,
- )
- raw_datasets["train"] = load_dataset(
- data_args.dataset_name,
- data_args.dataset_config_name,
- split=f"train[{data_args.validation_split_percentage}%:]",
- cache_dir=model_args.cache_dir,
- use_auth_token=True if model_args.use_auth_token else None,
- streaming=data_args.streaming,
- )
- else:
- data_files = {}
- dataset_args = {}
- if data_args.train_file is not None:
- data_files["train"] = data_args.train_file
- if data_args.validation_file is not None:
- data_files["validation"] = data_args.validation_file
- extension = (
- data_args.train_file.split(".")[-1]
- if data_args.train_file is not None
- else data_args.validation_file.split(".")[-1]
- )
- if extension == "txt":
- extension = "text"
- dataset_args["keep_linebreaks"] = data_args.keep_linebreaks
- raw_datasets = load_dataset(
- extension,
- data_files=data_files,
- cache_dir=model_args.cache_dir,
- use_auth_token=True if model_args.use_auth_token else None,
- **dataset_args,
- )
- # If no validation data is there, validation_split_percentage will be used to divide the dataset.
- if "validation" not in raw_datasets.keys():
- raw_datasets["validation"] = load_dataset(
- extension,
- data_files=data_files,
- split=f"train[:{data_args.validation_split_percentage}%]",
- cache_dir=model_args.cache_dir,
- use_auth_token=True if model_args.use_auth_token else None,
- **dataset_args,
- )
- raw_datasets["train"] = load_dataset(
- extension,
- data_files=data_files,
- split=f"train[{data_args.validation_split_percentage}%:]",
- cache_dir=model_args.cache_dir,
- use_auth_token=True if model_args.use_auth_token else None,
- **dataset_args,
- )
-
- # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
- # https://huggingface.co/docs/datasets/loading_datasets.html.
-
- # Load pretrained model and tokenizer
- #
- # Distributed training:
- # The .from_pretrained methods guarantee that only one local process can concurrently
- # download model & vocab.
-
- config_kwargs = {
- "cache_dir": model_args.cache_dir,
- "revision": model_args.model_revision,
- "use_auth_token": True if model_args.use_auth_token else None,
- }
- if model_args.config_name:
- config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
- elif model_args.model_name_or_path:
- config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
- else:
- config = CONFIG_MAPPING[model_args.model_type]()
- logger.warning("You are instantiating a new config instance from scratch.")
- if model_args.config_overrides is not None:
- logger.info(f"Overriding config: {model_args.config_overrides}")
- config.update_from_string(model_args.config_overrides)
- logger.info(f"New config: {config}")
-
- tokenizer_kwargs = {
- "cache_dir": model_args.cache_dir,
- "use_fast": model_args.use_fast_tokenizer,
- "revision": model_args.model_revision,
- "use_auth_token": True if model_args.use_auth_token else None,
- }
- if model_args.tokenizer_name:
- tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
- elif model_args.model_name_or_path:
- tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
- else:
- raise ValueError(
- "You are instantiating a new tokenizer from scratch. This is not supported by this script."
- "You can do it from another script, save it, and load it from here, using --tokenizer_name."
- )
-
- if model_args.model_name_or_path:
- torch_dtype = (
- model_args.torch_dtype
- if model_args.torch_dtype in ["auto", None]
- else getattr(torch, model_args.torch_dtype)
- )
- model = AutoModelForCausalLM.from_pretrained(
- model_args.model_name_or_path,
- from_tf=bool(".ckpt" in model_args.model_name_or_path),
- config=config,
- cache_dir=model_args.cache_dir,
- revision=model_args.model_revision,
- use_auth_token=True if model_args.use_auth_token else None,
- torch_dtype=torch_dtype,
- low_cpu_mem_usage=model_args.low_cpu_mem_usage,
- )
- else:
- model = AutoModelForCausalLM.from_config(config)
- n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values())
- logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params")
-
- # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
- # on a small vocab and want a smaller embedding size, remove this test.
- embedding_size = model.get_input_embeddings().weight.shape[0]
- if len(tokenizer) > embedding_size:
- model.resize_token_embeddings(len(tokenizer))
-
- # Preprocessing the datasets.
- # First we tokenize all the texts.
- if training_args.do_train:
- column_names = list(raw_datasets["train"].features)
- else:
- column_names = list(raw_datasets["validation"].features)
- text_column_name = "text" if "text" in column_names else column_names[0]
-
- # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
- tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
-
- def tokenize_function(examples):
- with CaptureLogger(tok_logger) as cl:
- output = tokenizer(examples[text_column_name])
- # clm input could be much much longer than block_size
- if "Token indices sequence length is longer than the" in cl.out:
- tok_logger.warning(
- "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits"
- " before being passed to the model."
- )
- return output
-
- with training_args.main_process_first(desc="dataset map tokenization"):
- if not data_args.streaming:
- tokenized_datasets = raw_datasets.map(
- tokenize_function,
- batched=True,
- num_proc=data_args.preprocessing_num_workers,
- remove_columns=column_names,
- load_from_cache_file=not data_args.overwrite_cache,
- desc="Running tokenizer on dataset",
- )
- else:
- tokenized_datasets = raw_datasets.map(
- tokenize_function,
- batched=True,
- remove_columns=column_names,
- )
-
- if data_args.block_size is None:
- block_size = tokenizer.model_max_length
- if block_size > 1024:
- logger.warning(
- "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
- " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
- " override this default with `--block_size xxx`."
- )
- block_size = 1024
- else:
- if data_args.block_size > tokenizer.model_max_length:
- logger.warning(
- f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
- f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
- )
- block_size = min(data_args.block_size, tokenizer.model_max_length)
-
- # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
- def group_texts(examples):
- # Concatenate all texts.
- concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
- total_length = len(concatenated_examples[list(examples.keys())[0]])
- # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
- # customize this part to your needs.
- if total_length >= block_size:
- total_length = (total_length // block_size) * block_size
- # Split by chunks of max_len.
- result = {
- k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
- for k, t in concatenated_examples.items()
- }
- result["labels"] = result["input_ids"].copy()
- return result
-
- # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
- # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
- # to preprocess.
- #
- # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
- # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
-
- with training_args.main_process_first(desc="grouping texts together"):
- if not data_args.streaming:
- lm_datasets = tokenized_datasets.map(
- group_texts,
- batched=True,
- num_proc=data_args.preprocessing_num_workers,
- load_from_cache_file=not data_args.overwrite_cache,
- desc=f"Grouping texts in chunks of {block_size}",
- )
- else:
- lm_datasets = tokenized_datasets.map(
- group_texts,
- batched=True,
- )
-
- if training_args.do_train:
- if "train" not in tokenized_datasets:
- raise ValueError("--do_train requires a train dataset")
- train_dataset = lm_datasets["train"]
- if data_args.max_train_samples is not None:
- max_train_samples = min(len(train_dataset), data_args.max_train_samples)
- train_dataset = train_dataset.select(range(max_train_samples))
-
- if training_args.do_eval:
- if "validation" not in tokenized_datasets:
- raise ValueError("--do_eval requires a validation dataset")
- eval_dataset = lm_datasets["validation"]
- if data_args.max_eval_samples is not None:
- max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
- eval_dataset = eval_dataset.select(range(max_eval_samples))
-
- def preprocess_logits_for_metrics(logits, labels):
- if isinstance(logits, tuple):
- # Depending on the model and config, logits may contain extra tensors,
- # like past_key_values, but logits always come first
- logits = logits[0]
- return logits.argmax(dim=-1)
-
- metric = evaluate.load("accuracy")
-
- def compute_metrics(eval_preds):
- preds, labels = eval_preds
- # preds have the same shape as the labels, after the argmax(-1) has been calculated
- # by preprocess_logits_for_metrics but we need to shift the labels
- labels = labels[:, 1:].reshape(-1)
- preds = preds[:, :-1].reshape(-1)
- return metric.compute(predictions=preds, references=labels)
-
- # Initialize our Trainer
- trainer = Trainer(
- model=model,
- args=training_args,
- train_dataset=train_dataset if training_args.do_train else None,
- eval_dataset=eval_dataset if training_args.do_eval else None,
- tokenizer=tokenizer,
- # Data collator will default to DataCollatorWithPadding, so we change it.
- data_collator=default_data_collator,
- compute_metrics=compute_metrics if training_args.do_eval and not is_torch_tpu_available() else None,
- preprocess_logits_for_metrics=preprocess_logits_for_metrics
- if training_args.do_eval and not is_torch_tpu_available()
- else None,
- )
-
- # Training
- if training_args.do_train:
- checkpoint = None
- if training_args.resume_from_checkpoint is not None:
- checkpoint = training_args.resume_from_checkpoint
- elif last_checkpoint is not None:
- checkpoint = last_checkpoint
- train_result = trainer.train(resume_from_checkpoint=checkpoint)
- trainer.save_model() # Saves the tokenizer too for easy upload
-
- metrics = train_result.metrics
-
- max_train_samples = (
- data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
- )
- metrics["train_samples"] = min(max_train_samples, len(train_dataset))
-
- trainer.log_metrics("train", metrics)
- trainer.save_metrics("train", metrics)
- trainer.save_state()
-
- # Evaluation
- if training_args.do_eval:
- logger.info("*** Evaluate ***")
-
- metrics = trainer.evaluate()
-
- max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
- metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
- try:
- perplexity = math.exp(metrics["eval_loss"])
- except OverflowError:
- perplexity = float("inf")
- metrics["perplexity"] = perplexity
-
- trainer.log_metrics("eval", metrics)
- trainer.save_metrics("eval", metrics)
-
- kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-generation"}
- if data_args.dataset_name is not None:
- kwargs["dataset_tags"] = data_args.dataset_name
- if data_args.dataset_config_name is not None:
- kwargs["dataset_args"] = data_args.dataset_config_name
- kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
- else:
- kwargs["dataset"] = data_args.dataset_name
-
- if training_args.push_to_hub:
- trainer.push_to_hub(**kwargs)
- else:
- trainer.create_model_card(**kwargs)
-
-
-def _mp_fn(index):
- # For xla_spawn (TPUs)
- main()
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/chenyangqi/FateZero/FateZero/test_fatezero_dataset.py b/spaces/chenyangqi/FateZero/FateZero/test_fatezero_dataset.py
deleted file mode 100644
index 95d777ff31161bed2ad54c06ec81e47d5065c664..0000000000000000000000000000000000000000
--- a/spaces/chenyangqi/FateZero/FateZero/test_fatezero_dataset.py
+++ /dev/null
@@ -1,52 +0,0 @@
-
-
-from test_fatezero import *
-from glob import glob
-import copy
-
-@click.command()
-@click.option("--edit_config", type=str, default="config/supp/style/0313_style_edit_warp_640.yaml")
-@click.option("--dataset_config", type=str, default="data/supp_edit_dataset/dataset_prompt.yaml")
-def run(edit_config, dataset_config):
- Omegadict_edit_config = OmegaConf.load(edit_config)
- Omegadict_dataset_config = OmegaConf.load(dataset_config)
-
- # Go trough all data sample
- data_sample_list = sorted(Omegadict_dataset_config.keys())
- print(f'Datasample to evaluate: {data_sample_list}')
- dataset_time_string = get_time_string()
- for data_sample in data_sample_list:
- print(f'Evaluate {data_sample}')
-
- for p2p_config_index, p2p_config in Omegadict_edit_config['validation_sample_logger_config']['p2p_config'].items():
- edit_config_now = copy.deepcopy(Omegadict_edit_config)
- edit_config_now['train_dataset'] = copy.deepcopy(Omegadict_dataset_config[data_sample])
- edit_config_now['train_dataset'].pop('target')
- if 'eq_params' in edit_config_now['train_dataset']:
- edit_config_now['train_dataset'].pop('eq_params')
- # edit_config_now['train_dataset']['prompt'] = Omegadict_dataset_config[data_sample]['source']
-
- edit_config_now['validation_sample_logger_config']['prompts'] \
- = copy.deepcopy( [Omegadict_dataset_config[data_sample]['prompt'],]+ OmegaConf.to_object(Omegadict_dataset_config[data_sample]['target']))
- p2p_config_now = dict()
- for i in range(len(edit_config_now['validation_sample_logger_config']['prompts'])):
- p2p_config_now[i] = p2p_config
- if 'eq_params' in Omegadict_dataset_config[data_sample]:
- p2p_config_now[i]['eq_params'] = Omegadict_dataset_config[data_sample]['eq_params']
-
- edit_config_now['validation_sample_logger_config']['p2p_config'] = copy.deepcopy(p2p_config_now)
- edit_config_now['validation_sample_logger_config']['source_prompt'] = Omegadict_dataset_config[data_sample]['prompt']
- # edit_config_now['validation_sample_logger_config']['source_prompt'] = Omegadict_dataset_config[data_sample]['eq_params']
-
-
- # if 'logdir' not in edit_config_now:
- logdir = edit_config.replace('config', 'result').replace('.yml', '').replace('.yaml', '')+f'_config_{p2p_config_index}'+f'_{os.path.basename(dataset_config)[:-5]}'+f'_{dataset_time_string}'
- logdir += f"/{data_sample}"
- edit_config_now['logdir'] = logdir
- print(f'Saving at {logdir}')
-
- test(config=edit_config, **edit_config_now)
-
-
-if __name__ == "__main__":
- run()
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/altair/utils/data.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/altair/utils/data.py
deleted file mode 100644
index 28e66bfab5764fe58e19fb339b2cdf8ad9d510b4..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/altair/utils/data.py
+++ /dev/null
@@ -1,299 +0,0 @@
-import json
-import os
-import random
-import hashlib
-import warnings
-
-import pandas as pd
-from toolz import curried
-from typing import Callable
-
-from .core import sanitize_dataframe
-from .core import sanitize_geo_interface
-from .deprecation import AltairDeprecationWarning
-from .plugin_registry import PluginRegistry
-
-
-# ==============================================================================
-# Data transformer registry
-# ==============================================================================
-DataTransformerType = Callable
-
-
-class DataTransformerRegistry(PluginRegistry[DataTransformerType]):
- _global_settings = {"consolidate_datasets": True}
-
- @property
- def consolidate_datasets(self):
- return self._global_settings["consolidate_datasets"]
-
- @consolidate_datasets.setter
- def consolidate_datasets(self, value):
- self._global_settings["consolidate_datasets"] = value
-
-
-# ==============================================================================
-# Data model transformers
-#
-# A data model transformer is a pure function that takes a dict or DataFrame
-# and returns a transformed version of a dict or DataFrame. The dict objects
-# will be the Data portion of the VegaLite schema. The idea is that user can
-# pipe a sequence of these data transformers together to prepare the data before
-# it hits the renderer.
-#
-# In this version of Altair, renderers only deal with the dict form of a
-# VegaLite spec, after the Data model has been put into a schema compliant
-# form.
-#
-# A data model transformer has the following type signature:
-# DataModelType = Union[dict, pd.DataFrame]
-# DataModelTransformerType = Callable[[DataModelType, KwArgs], DataModelType]
-# ==============================================================================
-
-
-class MaxRowsError(Exception):
- """Raised when a data model has too many rows."""
-
- pass
-
-
-@curried.curry
-def limit_rows(data, max_rows=5000):
- """Raise MaxRowsError if the data model has more than max_rows.
-
- If max_rows is None, then do not perform any check.
- """
- check_data_type(data)
- if hasattr(data, "__geo_interface__"):
- if data.__geo_interface__["type"] == "FeatureCollection":
- values = data.__geo_interface__["features"]
- else:
- values = data.__geo_interface__
- elif isinstance(data, pd.DataFrame):
- values = data
- elif isinstance(data, dict):
- if "values" in data:
- values = data["values"]
- else:
- return data
- elif hasattr(data, "__dataframe__"):
- values = data
- if max_rows is not None and len(values) > max_rows:
- raise MaxRowsError(
- "The number of rows in your dataset is greater "
- f"than the maximum allowed ({max_rows}).\n\n"
- "See https://altair-viz.github.io/user_guide/large_datasets.html "
- "for information on how to plot large datasets, "
- "including how to install third-party data management tools and, "
- "in the right circumstance, disable the restriction"
- )
- return data
-
-
-@curried.curry
-def sample(data, n=None, frac=None):
- """Reduce the size of the data model by sampling without replacement."""
- check_data_type(data)
- if isinstance(data, pd.DataFrame):
- return data.sample(n=n, frac=frac)
- elif isinstance(data, dict):
- if "values" in data:
- values = data["values"]
- n = n if n else int(frac * len(values))
- values = random.sample(values, n)
- return {"values": values}
- elif hasattr(data, "__dataframe__"):
- # experimental interchange dataframe support
- pi = import_pyarrow_interchange()
- pa_table = pi.from_dataframe(data)
- n = n if n else int(frac * len(pa_table))
- indices = random.sample(range(len(pa_table)), n)
- return pa_table.take(indices)
-
-
-@curried.curry
-def to_json(
- data,
- prefix="altair-data",
- extension="json",
- filename="{prefix}-{hash}.{extension}",
- urlpath="",
-):
- """
- Write the data model to a .json file and return a url based data model.
- """
- data_json = _data_to_json_string(data)
- data_hash = _compute_data_hash(data_json)
- filename = filename.format(prefix=prefix, hash=data_hash, extension=extension)
- with open(filename, "w") as f:
- f.write(data_json)
- return {"url": os.path.join(urlpath, filename), "format": {"type": "json"}}
-
-
-@curried.curry
-def to_csv(
- data,
- prefix="altair-data",
- extension="csv",
- filename="{prefix}-{hash}.{extension}",
- urlpath="",
-):
- """Write the data model to a .csv file and return a url based data model."""
- data_csv = _data_to_csv_string(data)
- data_hash = _compute_data_hash(data_csv)
- filename = filename.format(prefix=prefix, hash=data_hash, extension=extension)
- with open(filename, "w") as f:
- f.write(data_csv)
- return {"url": os.path.join(urlpath, filename), "format": {"type": "csv"}}
-
-
-@curried.curry
-def to_values(data):
- """Replace a DataFrame by a data model with values."""
- check_data_type(data)
- if hasattr(data, "__geo_interface__"):
- if isinstance(data, pd.DataFrame):
- data = sanitize_dataframe(data)
- data = sanitize_geo_interface(data.__geo_interface__)
- return {"values": data}
- elif isinstance(data, pd.DataFrame):
- data = sanitize_dataframe(data)
- return {"values": data.to_dict(orient="records")}
- elif isinstance(data, dict):
- if "values" not in data:
- raise KeyError("values expected in data dict, but not present.")
- return data
- elif hasattr(data, "__dataframe__"):
- # experimental interchange dataframe support
- pi = import_pyarrow_interchange()
- pa_table = pi.from_dataframe(data)
- return {"values": pa_table.to_pylist()}
-
-
-def check_data_type(data):
- """Raise if the data is not a dict or DataFrame."""
- if not isinstance(data, (dict, pd.DataFrame)) and not any(
- hasattr(data, attr) for attr in ["__geo_interface__", "__dataframe__"]
- ):
- raise TypeError(
- "Expected dict, DataFrame or a __geo_interface__ attribute, got: {}".format(
- type(data)
- )
- )
-
-
-# ==============================================================================
-# Private utilities
-# ==============================================================================
-
-
-def _compute_data_hash(data_str):
- return hashlib.md5(data_str.encode()).hexdigest()
-
-
-def _data_to_json_string(data):
- """Return a JSON string representation of the input data"""
- check_data_type(data)
- if hasattr(data, "__geo_interface__"):
- if isinstance(data, pd.DataFrame):
- data = sanitize_dataframe(data)
- data = sanitize_geo_interface(data.__geo_interface__)
- return json.dumps(data)
- elif isinstance(data, pd.DataFrame):
- data = sanitize_dataframe(data)
- return data.to_json(orient="records", double_precision=15)
- elif isinstance(data, dict):
- if "values" not in data:
- raise KeyError("values expected in data dict, but not present.")
- return json.dumps(data["values"], sort_keys=True)
- elif hasattr(data, "__dataframe__"):
- # experimental interchange dataframe support
- pi = import_pyarrow_interchange()
- pa_table = pi.from_dataframe(data)
- return json.dumps(pa_table.to_pylist())
- else:
- raise NotImplementedError(
- "to_json only works with data expressed as " "a DataFrame or as a dict"
- )
-
-
-def _data_to_csv_string(data):
- """return a CSV string representation of the input data"""
- check_data_type(data)
- if hasattr(data, "__geo_interface__"):
- raise NotImplementedError(
- "to_csv does not work with data that "
- "contains the __geo_interface__ attribute"
- )
- elif isinstance(data, pd.DataFrame):
- data = sanitize_dataframe(data)
- return data.to_csv(index=False)
- elif isinstance(data, dict):
- if "values" not in data:
- raise KeyError("values expected in data dict, but not present")
- return pd.DataFrame.from_dict(data["values"]).to_csv(index=False)
- elif hasattr(data, "__dataframe__"):
- # experimental interchange dataframe support
- pi = import_pyarrow_interchange()
- import pyarrow as pa
- import pyarrow.csv as pa_csv
-
- pa_table = pi.from_dataframe(data)
- csv_buffer = pa.BufferOutputStream()
- pa_csv.write_csv(pa_table, csv_buffer)
- return csv_buffer.getvalue().to_pybytes().decode()
- else:
- raise NotImplementedError(
- "to_csv only works with data expressed as " "a DataFrame or as a dict"
- )
-
-
-def pipe(data, *funcs):
- """
- Pipe a value through a sequence of functions
-
- Deprecated: use toolz.curried.pipe() instead.
- """
- warnings.warn(
- "alt.pipe() is deprecated, and will be removed in a future release. "
- "Use toolz.curried.pipe() instead.",
- AltairDeprecationWarning,
- stacklevel=1,
- )
- return curried.pipe(data, *funcs)
-
-
-def curry(*args, **kwargs):
- """Curry a callable function
-
- Deprecated: use toolz.curried.curry() instead.
- """
- warnings.warn(
- "alt.curry() is deprecated, and will be removed in a future release. "
- "Use toolz.curried.curry() instead.",
- AltairDeprecationWarning,
- stacklevel=1,
- )
- return curried.curry(*args, **kwargs)
-
-
-def import_pyarrow_interchange():
- import pkg_resources
-
- try:
- pkg_resources.require("pyarrow>=11.0.0")
- # The package is installed and meets the minimum version requirement
- import pyarrow.interchange as pi
-
- return pi
- except pkg_resources.DistributionNotFound as err:
- # The package is not installed
- raise ImportError(
- "Usage of the DataFrame Interchange Protocol requires the package 'pyarrow', but it is not installed."
- ) from err
- except pkg_resources.VersionConflict as err:
- # The package is installed but does not meet the minimum version requirement
- raise ImportError(
- "The installed version of 'pyarrow' does not meet the minimum requirement of version 11.0.0. "
- "Please update 'pyarrow' to use the DataFrame Interchange Protocol."
- ) from err
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chromadb/db/impl/__init__.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chromadb/db/impl/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cryptography/fernet.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cryptography/fernet.py
deleted file mode 100644
index ad8fb40b9d44b55867750b4f7d161a4e9ce750ff..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cryptography/fernet.py
+++ /dev/null
@@ -1,221 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-from __future__ import annotations
-
-import base64
-import binascii
-import os
-import time
-import typing
-
-from cryptography import utils
-from cryptography.exceptions import InvalidSignature
-from cryptography.hazmat.primitives import hashes, padding
-from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
-from cryptography.hazmat.primitives.hmac import HMAC
-
-
-class InvalidToken(Exception):
- pass
-
-
-_MAX_CLOCK_SKEW = 60
-
-
-class Fernet:
- def __init__(
- self,
- key: typing.Union[bytes, str],
- backend: typing.Any = None,
- ) -> None:
- try:
- key = base64.urlsafe_b64decode(key)
- except binascii.Error as exc:
- raise ValueError(
- "Fernet key must be 32 url-safe base64-encoded bytes."
- ) from exc
- if len(key) != 32:
- raise ValueError(
- "Fernet key must be 32 url-safe base64-encoded bytes."
- )
-
- self._signing_key = key[:16]
- self._encryption_key = key[16:]
-
- @classmethod
- def generate_key(cls) -> bytes:
- return base64.urlsafe_b64encode(os.urandom(32))
-
- def encrypt(self, data: bytes) -> bytes:
- return self.encrypt_at_time(data, int(time.time()))
-
- def encrypt_at_time(self, data: bytes, current_time: int) -> bytes:
- iv = os.urandom(16)
- return self._encrypt_from_parts(data, current_time, iv)
-
- def _encrypt_from_parts(
- self, data: bytes, current_time: int, iv: bytes
- ) -> bytes:
- utils._check_bytes("data", data)
-
- padder = padding.PKCS7(algorithms.AES.block_size).padder()
- padded_data = padder.update(data) + padder.finalize()
- encryptor = Cipher(
- algorithms.AES(self._encryption_key),
- modes.CBC(iv),
- ).encryptor()
- ciphertext = encryptor.update(padded_data) + encryptor.finalize()
-
- basic_parts = (
- b"\x80"
- + current_time.to_bytes(length=8, byteorder="big")
- + iv
- + ciphertext
- )
-
- h = HMAC(self._signing_key, hashes.SHA256())
- h.update(basic_parts)
- hmac = h.finalize()
- return base64.urlsafe_b64encode(basic_parts + hmac)
-
- def decrypt(
- self, token: typing.Union[bytes, str], ttl: typing.Optional[int] = None
- ) -> bytes:
- timestamp, data = Fernet._get_unverified_token_data(token)
- if ttl is None:
- time_info = None
- else:
- time_info = (ttl, int(time.time()))
- return self._decrypt_data(data, timestamp, time_info)
-
- def decrypt_at_time(
- self, token: typing.Union[bytes, str], ttl: int, current_time: int
- ) -> bytes:
- if ttl is None:
- raise ValueError(
- "decrypt_at_time() can only be used with a non-None ttl"
- )
- timestamp, data = Fernet._get_unverified_token_data(token)
- return self._decrypt_data(data, timestamp, (ttl, current_time))
-
- def extract_timestamp(self, token: typing.Union[bytes, str]) -> int:
- timestamp, data = Fernet._get_unverified_token_data(token)
- # Verify the token was not tampered with.
- self._verify_signature(data)
- return timestamp
-
- @staticmethod
- def _get_unverified_token_data(
- token: typing.Union[bytes, str]
- ) -> typing.Tuple[int, bytes]:
- if not isinstance(token, (str, bytes)):
- raise TypeError("token must be bytes or str")
-
- try:
- data = base64.urlsafe_b64decode(token)
- except (TypeError, binascii.Error):
- raise InvalidToken
-
- if not data or data[0] != 0x80:
- raise InvalidToken
-
- if len(data) < 9:
- raise InvalidToken
-
- timestamp = int.from_bytes(data[1:9], byteorder="big")
- return timestamp, data
-
- def _verify_signature(self, data: bytes) -> None:
- h = HMAC(self._signing_key, hashes.SHA256())
- h.update(data[:-32])
- try:
- h.verify(data[-32:])
- except InvalidSignature:
- raise InvalidToken
-
- def _decrypt_data(
- self,
- data: bytes,
- timestamp: int,
- time_info: typing.Optional[typing.Tuple[int, int]],
- ) -> bytes:
- if time_info is not None:
- ttl, current_time = time_info
- if timestamp + ttl < current_time:
- raise InvalidToken
-
- if current_time + _MAX_CLOCK_SKEW < timestamp:
- raise InvalidToken
-
- self._verify_signature(data)
-
- iv = data[9:25]
- ciphertext = data[25:-32]
- decryptor = Cipher(
- algorithms.AES(self._encryption_key), modes.CBC(iv)
- ).decryptor()
- plaintext_padded = decryptor.update(ciphertext)
- try:
- plaintext_padded += decryptor.finalize()
- except ValueError:
- raise InvalidToken
- unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder()
-
- unpadded = unpadder.update(plaintext_padded)
- try:
- unpadded += unpadder.finalize()
- except ValueError:
- raise InvalidToken
- return unpadded
-
-
-class MultiFernet:
- def __init__(self, fernets: typing.Iterable[Fernet]):
- fernets = list(fernets)
- if not fernets:
- raise ValueError(
- "MultiFernet requires at least one Fernet instance"
- )
- self._fernets = fernets
-
- def encrypt(self, msg: bytes) -> bytes:
- return self.encrypt_at_time(msg, int(time.time()))
-
- def encrypt_at_time(self, msg: bytes, current_time: int) -> bytes:
- return self._fernets[0].encrypt_at_time(msg, current_time)
-
- def rotate(self, msg: typing.Union[bytes, str]) -> bytes:
- timestamp, data = Fernet._get_unverified_token_data(msg)
- for f in self._fernets:
- try:
- p = f._decrypt_data(data, timestamp, None)
- break
- except InvalidToken:
- pass
- else:
- raise InvalidToken
-
- iv = os.urandom(16)
- return self._fernets[0]._encrypt_from_parts(p, timestamp, iv)
-
- def decrypt(
- self, msg: typing.Union[bytes, str], ttl: typing.Optional[int] = None
- ) -> bytes:
- for f in self._fernets:
- try:
- return f.decrypt(msg, ttl)
- except InvalidToken:
- pass
- raise InvalidToken
-
- def decrypt_at_time(
- self, msg: typing.Union[bytes, str], ttl: int, current_time: int
- ) -> bytes:
- for f in self._fernets:
- try:
- return f.decrypt_at_time(msg, ttl, current_time)
- except InvalidToken:
- pass
- raise InvalidToken
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/pens/areaPen.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/pens/areaPen.py
deleted file mode 100644
index 004bb06b091ceb777cca2c02f8481a2785a46d35..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/pens/areaPen.py
+++ /dev/null
@@ -1,52 +0,0 @@
-"""Calculate the area of a glyph."""
-
-from fontTools.pens.basePen import BasePen
-
-
-__all__ = ["AreaPen"]
-
-
-class AreaPen(BasePen):
- def __init__(self, glyphset=None):
- BasePen.__init__(self, glyphset)
- self.value = 0
-
- def _moveTo(self, p0):
- self._p0 = self._startPoint = p0
-
- def _lineTo(self, p1):
- x0, y0 = self._p0
- x1, y1 = p1
- self.value -= (x1 - x0) * (y1 + y0) * 0.5
- self._p0 = p1
-
- def _qCurveToOne(self, p1, p2):
- # https://github.com/Pomax/bezierinfo/issues/44
- p0 = self._p0
- x0, y0 = p0[0], p0[1]
- x1, y1 = p1[0] - x0, p1[1] - y0
- x2, y2 = p2[0] - x0, p2[1] - y0
- self.value -= (x2 * y1 - x1 * y2) / 3
- self._lineTo(p2)
- self._p0 = p2
-
- def _curveToOne(self, p1, p2, p3):
- # https://github.com/Pomax/bezierinfo/issues/44
- p0 = self._p0
- x0, y0 = p0[0], p0[1]
- x1, y1 = p1[0] - x0, p1[1] - y0
- x2, y2 = p2[0] - x0, p2[1] - y0
- x3, y3 = p3[0] - x0, p3[1] - y0
- self.value -= (x1 * (-y2 - y3) + x2 * (y1 - 2 * y3) + x3 * (y1 + 2 * y2)) * 0.15
- self._lineTo(p3)
- self._p0 = p3
-
- def _closePath(self):
- self._lineTo(self._startPoint)
- del self._p0, self._startPoint
-
- def _endPath(self):
- if self._p0 != self._startPoint:
- # Area is not defined for open contours.
- raise NotImplementedError
- del self._p0, self._startPoint
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/varLib/models.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/varLib/models.py
deleted file mode 100644
index 954cf87bfa6a6485aaf32052604dc76ef7cd2853..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/varLib/models.py
+++ /dev/null
@@ -1,584 +0,0 @@
-"""Variation fonts interpolation models."""
-
-__all__ = [
- "normalizeValue",
- "normalizeLocation",
- "supportScalar",
- "VariationModel",
-]
-
-from fontTools.misc.roundTools import noRound
-from .errors import VariationModelError
-
-
-def nonNone(lst):
- return [l for l in lst if l is not None]
-
-
-def allNone(lst):
- return all(l is None for l in lst)
-
-
-def allEqualTo(ref, lst, mapper=None):
- if mapper is None:
- return all(ref == item for item in lst)
-
- mapped = mapper(ref)
- return all(mapped == mapper(item) for item in lst)
-
-
-def allEqual(lst, mapper=None):
- if not lst:
- return True
- it = iter(lst)
- try:
- first = next(it)
- except StopIteration:
- return True
- return allEqualTo(first, it, mapper=mapper)
-
-
-def subList(truth, lst):
- assert len(truth) == len(lst)
- return [l for l, t in zip(lst, truth) if t]
-
-
-def normalizeValue(v, triple, extrapolate=False):
- """Normalizes value based on a min/default/max triple.
-
- >>> normalizeValue(400, (100, 400, 900))
- 0.0
- >>> normalizeValue(100, (100, 400, 900))
- -1.0
- >>> normalizeValue(650, (100, 400, 900))
- 0.5
- """
- lower, default, upper = triple
- if not (lower <= default <= upper):
- raise ValueError(
- f"Invalid axis values, must be minimum, default, maximum: "
- f"{lower:3.3f}, {default:3.3f}, {upper:3.3f}"
- )
- if not extrapolate:
- v = max(min(v, upper), lower)
-
- if v == default or lower == upper:
- return 0.0
-
- if (v < default and lower != default) or (v > default and upper == default):
- return (v - default) / (default - lower)
- else:
- assert (v > default and upper != default) or (
- v < default and lower == default
- ), f"Ooops... v={v}, triple=({lower}, {default}, {upper})"
- return (v - default) / (upper - default)
-
-
-def normalizeLocation(location, axes, extrapolate=False):
- """Normalizes location based on axis min/default/max values from axes.
-
- >>> axes = {"wght": (100, 400, 900)}
- >>> normalizeLocation({"wght": 400}, axes)
- {'wght': 0.0}
- >>> normalizeLocation({"wght": 100}, axes)
- {'wght': -1.0}
- >>> normalizeLocation({"wght": 900}, axes)
- {'wght': 1.0}
- >>> normalizeLocation({"wght": 650}, axes)
- {'wght': 0.5}
- >>> normalizeLocation({"wght": 1000}, axes)
- {'wght': 1.0}
- >>> normalizeLocation({"wght": 0}, axes)
- {'wght': -1.0}
- >>> axes = {"wght": (0, 0, 1000)}
- >>> normalizeLocation({"wght": 0}, axes)
- {'wght': 0.0}
- >>> normalizeLocation({"wght": -1}, axes)
- {'wght': 0.0}
- >>> normalizeLocation({"wght": 1000}, axes)
- {'wght': 1.0}
- >>> normalizeLocation({"wght": 500}, axes)
- {'wght': 0.5}
- >>> normalizeLocation({"wght": 1001}, axes)
- {'wght': 1.0}
- >>> axes = {"wght": (0, 1000, 1000)}
- >>> normalizeLocation({"wght": 0}, axes)
- {'wght': -1.0}
- >>> normalizeLocation({"wght": -1}, axes)
- {'wght': -1.0}
- >>> normalizeLocation({"wght": 500}, axes)
- {'wght': -0.5}
- >>> normalizeLocation({"wght": 1000}, axes)
- {'wght': 0.0}
- >>> normalizeLocation({"wght": 1001}, axes)
- {'wght': 0.0}
- """
- out = {}
- for tag, triple in axes.items():
- v = location.get(tag, triple[1])
- out[tag] = normalizeValue(v, triple, extrapolate=extrapolate)
- return out
-
-
-def supportScalar(location, support, ot=True, extrapolate=False, axisRanges=None):
- """Returns the scalar multiplier at location, for a master
- with support. If ot is True, then a peak value of zero
- for support of an axis means "axis does not participate". That
- is how OpenType Variation Font technology works.
-
- If extrapolate is True, axisRanges must be a dict that maps axis
- names to (axisMin, axisMax) tuples.
-
- >>> supportScalar({}, {})
- 1.0
- >>> supportScalar({'wght':.2}, {})
- 1.0
- >>> supportScalar({'wght':.2}, {'wght':(0,2,3)})
- 0.1
- >>> supportScalar({'wght':2.5}, {'wght':(0,2,4)})
- 0.75
- >>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
- 0.75
- >>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}, ot=False)
- 0.375
- >>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
- 0.75
- >>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
- 0.75
- >>> supportScalar({'wght':3}, {'wght':(0,1,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})
- -1.0
- >>> supportScalar({'wght':-1}, {'wght':(0,1,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})
- -1.0
- >>> supportScalar({'wght':3}, {'wght':(0,2,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})
- 1.5
- >>> supportScalar({'wght':-1}, {'wght':(0,2,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})
- -0.5
- """
- if extrapolate and axisRanges is None:
- raise TypeError("axisRanges must be passed when extrapolate is True")
- scalar = 1.0
- for axis, (lower, peak, upper) in support.items():
- if ot:
- # OpenType-specific case handling
- if peak == 0.0:
- continue
- if lower > peak or peak > upper:
- continue
- if lower < 0.0 and upper > 0.0:
- continue
- v = location.get(axis, 0.0)
- else:
- assert axis in location
- v = location[axis]
- if v == peak:
- continue
-
- if extrapolate:
- axisMin, axisMax = axisRanges[axis]
- if v < axisMin and lower <= axisMin:
- if peak <= axisMin and peak < upper:
- scalar *= (v - upper) / (peak - upper)
- continue
- elif axisMin < peak:
- scalar *= (v - lower) / (peak - lower)
- continue
- elif axisMax < v and axisMax <= upper:
- if axisMax <= peak and lower < peak:
- scalar *= (v - lower) / (peak - lower)
- continue
- elif peak < axisMax:
- scalar *= (v - upper) / (peak - upper)
- continue
-
- if v <= lower or upper <= v:
- scalar = 0.0
- break
-
- if v < peak:
- scalar *= (v - lower) / (peak - lower)
- else: # v > peak
- scalar *= (v - upper) / (peak - upper)
- return scalar
-
-
-class VariationModel(object):
- """Locations must have the base master at the origin (ie. 0).
-
- If the extrapolate argument is set to True, then values are extrapolated
- outside the axis range.
-
- >>> from pprint import pprint
- >>> locations = [ \
- {'wght':100}, \
- {'wght':-100}, \
- {'wght':-180}, \
- {'wdth':+.3}, \
- {'wght':+120,'wdth':.3}, \
- {'wght':+120,'wdth':.2}, \
- {}, \
- {'wght':+180,'wdth':.3}, \
- {'wght':+180}, \
- ]
- >>> model = VariationModel(locations, axisOrder=['wght'])
- >>> pprint(model.locations)
- [{},
- {'wght': -100},
- {'wght': -180},
- {'wght': 100},
- {'wght': 180},
- {'wdth': 0.3},
- {'wdth': 0.3, 'wght': 180},
- {'wdth': 0.3, 'wght': 120},
- {'wdth': 0.2, 'wght': 120}]
- >>> pprint(model.deltaWeights)
- [{},
- {0: 1.0},
- {0: 1.0},
- {0: 1.0},
- {0: 1.0},
- {0: 1.0},
- {0: 1.0, 4: 1.0, 5: 1.0},
- {0: 1.0, 3: 0.75, 4: 0.25, 5: 1.0, 6: 0.6666666666666666},
- {0: 1.0,
- 3: 0.75,
- 4: 0.25,
- 5: 0.6666666666666667,
- 6: 0.4444444444444445,
- 7: 0.6666666666666667}]
- """
-
- def __init__(self, locations, axisOrder=None, extrapolate=False):
-
- if len(set(tuple(sorted(l.items())) for l in locations)) != len(locations):
- raise VariationModelError("Locations must be unique.")
-
- self.origLocations = locations
- self.axisOrder = axisOrder if axisOrder is not None else []
- self.extrapolate = extrapolate
- self.axisRanges = self.computeAxisRanges(locations) if extrapolate else None
-
- locations = [{k: v for k, v in loc.items() if v != 0.0} for loc in locations]
- keyFunc = self.getMasterLocationsSortKeyFunc(
- locations, axisOrder=self.axisOrder
- )
- self.locations = sorted(locations, key=keyFunc)
-
- # Mapping from user's master order to our master order
- self.mapping = [self.locations.index(l) for l in locations]
- self.reverseMapping = [locations.index(l) for l in self.locations]
-
- self._computeMasterSupports()
- self._subModels = {}
-
- def getSubModel(self, items):
- if None not in items:
- return self, items
- key = tuple(v is not None for v in items)
- subModel = self._subModels.get(key)
- if subModel is None:
- subModel = VariationModel(subList(key, self.origLocations), self.axisOrder)
- self._subModels[key] = subModel
- return subModel, subList(key, items)
-
- @staticmethod
- def computeAxisRanges(locations):
- axisRanges = {}
- allAxes = {axis for loc in locations for axis in loc.keys()}
- for loc in locations:
- for axis in allAxes:
- value = loc.get(axis, 0)
- axisMin, axisMax = axisRanges.get(axis, (value, value))
- axisRanges[axis] = min(value, axisMin), max(value, axisMax)
- return axisRanges
-
- @staticmethod
- def getMasterLocationsSortKeyFunc(locations, axisOrder=[]):
- if {} not in locations:
- raise VariationModelError("Base master not found.")
- axisPoints = {}
- for loc in locations:
- if len(loc) != 1:
- continue
- axis = next(iter(loc))
- value = loc[axis]
- if axis not in axisPoints:
- axisPoints[axis] = {0.0}
- assert (
- value not in axisPoints[axis]
- ), 'Value "%s" in axisPoints["%s"] --> %s' % (value, axis, axisPoints)
- axisPoints[axis].add(value)
-
- def getKey(axisPoints, axisOrder):
- def sign(v):
- return -1 if v < 0 else +1 if v > 0 else 0
-
- def key(loc):
- rank = len(loc)
- onPointAxes = [
- axis
- for axis, value in loc.items()
- if axis in axisPoints and value in axisPoints[axis]
- ]
- orderedAxes = [axis for axis in axisOrder if axis in loc]
- orderedAxes.extend(
- [axis for axis in sorted(loc.keys()) if axis not in axisOrder]
- )
- return (
- rank, # First, order by increasing rank
- -len(onPointAxes), # Next, by decreasing number of onPoint axes
- tuple(
- axisOrder.index(axis) if axis in axisOrder else 0x10000
- for axis in orderedAxes
- ), # Next, by known axes
- tuple(orderedAxes), # Next, by all axes
- tuple(
- sign(loc[axis]) for axis in orderedAxes
- ), # Next, by signs of axis values
- tuple(
- abs(loc[axis]) for axis in orderedAxes
- ), # Next, by absolute value of axis values
- )
-
- return key
-
- ret = getKey(axisPoints, axisOrder)
- return ret
-
- def reorderMasters(self, master_list, mapping):
- # For changing the master data order without
- # recomputing supports and deltaWeights.
- new_list = [master_list[idx] for idx in mapping]
- self.origLocations = [self.origLocations[idx] for idx in mapping]
- locations = [
- {k: v for k, v in loc.items() if v != 0.0} for loc in self.origLocations
- ]
- self.mapping = [self.locations.index(l) for l in locations]
- self.reverseMapping = [locations.index(l) for l in self.locations]
- self._subModels = {}
- return new_list
-
- def _computeMasterSupports(self):
- self.supports = []
- regions = self._locationsToRegions()
- for i, region in enumerate(regions):
- locAxes = set(region.keys())
- # Walk over previous masters now
- for prev_region in regions[:i]:
- # Master with extra axes do not participte
- if set(prev_region.keys()) != locAxes:
- continue
- # If it's NOT in the current box, it does not participate
- relevant = True
- for axis, (lower, peak, upper) in region.items():
- if not (
- prev_region[axis][1] == peak
- or lower < prev_region[axis][1] < upper
- ):
- relevant = False
- break
- if not relevant:
- continue
-
- # Split the box for new master; split in whatever direction
- # that has largest range ratio.
- #
- # For symmetry, we actually cut across multiple axes
- # if they have the largest, equal, ratio.
- # https://github.com/fonttools/fonttools/commit/7ee81c8821671157968b097f3e55309a1faa511e#commitcomment-31054804
-
- bestAxes = {}
- bestRatio = -1
- for axis in prev_region.keys():
- val = prev_region[axis][1]
- assert axis in region
- lower, locV, upper = region[axis]
- newLower, newUpper = lower, upper
- if val < locV:
- newLower = val
- ratio = (val - locV) / (lower - locV)
- elif locV < val:
- newUpper = val
- ratio = (val - locV) / (upper - locV)
- else: # val == locV
- # Can't split box in this direction.
- continue
- if ratio > bestRatio:
- bestAxes = {}
- bestRatio = ratio
- if ratio == bestRatio:
- bestAxes[axis] = (newLower, locV, newUpper)
-
- for axis, triple in bestAxes.items():
- region[axis] = triple
- self.supports.append(region)
- self._computeDeltaWeights()
-
- def _locationsToRegions(self):
- locations = self.locations
- # Compute min/max across each axis, use it as total range.
- # TODO Take this as input from outside?
- minV = {}
- maxV = {}
- for l in locations:
- for k, v in l.items():
- minV[k] = min(v, minV.get(k, v))
- maxV[k] = max(v, maxV.get(k, v))
-
- regions = []
- for loc in locations:
- region = {}
- for axis, locV in loc.items():
- if locV > 0:
- region[axis] = (0, locV, maxV[axis])
- else:
- region[axis] = (minV[axis], locV, 0)
- regions.append(region)
- return regions
-
- def _computeDeltaWeights(self):
- self.deltaWeights = []
- for i, loc in enumerate(self.locations):
- deltaWeight = {}
- # Walk over previous masters now, populate deltaWeight
- for j, support in enumerate(self.supports[:i]):
- scalar = supportScalar(loc, support)
- if scalar:
- deltaWeight[j] = scalar
- self.deltaWeights.append(deltaWeight)
-
- def getDeltas(self, masterValues, *, round=noRound):
- assert len(masterValues) == len(self.deltaWeights)
- mapping = self.reverseMapping
- out = []
- for i, weights in enumerate(self.deltaWeights):
- delta = masterValues[mapping[i]]
- for j, weight in weights.items():
- if weight == 1:
- delta -= out[j]
- else:
- delta -= out[j] * weight
- out.append(round(delta))
- return out
-
- def getDeltasAndSupports(self, items, *, round=noRound):
- model, items = self.getSubModel(items)
- return model.getDeltas(items, round=round), model.supports
-
- def getScalars(self, loc):
- return [
- supportScalar(
- loc, support, extrapolate=self.extrapolate, axisRanges=self.axisRanges
- )
- for support in self.supports
- ]
-
- @staticmethod
- def interpolateFromDeltasAndScalars(deltas, scalars):
- v = None
- assert len(deltas) == len(scalars)
- for delta, scalar in zip(deltas, scalars):
- if not scalar:
- continue
- contribution = delta * scalar
- if v is None:
- v = contribution
- else:
- v += contribution
- return v
-
- def interpolateFromDeltas(self, loc, deltas):
- scalars = self.getScalars(loc)
- return self.interpolateFromDeltasAndScalars(deltas, scalars)
-
- def interpolateFromMasters(self, loc, masterValues, *, round=noRound):
- deltas = self.getDeltas(masterValues, round=round)
- return self.interpolateFromDeltas(loc, deltas)
-
- def interpolateFromMastersAndScalars(self, masterValues, scalars, *, round=noRound):
- deltas = self.getDeltas(masterValues, round=round)
- return self.interpolateFromDeltasAndScalars(deltas, scalars)
-
-
-def piecewiseLinearMap(v, mapping):
- keys = mapping.keys()
- if not keys:
- return v
- if v in keys:
- return mapping[v]
- k = min(keys)
- if v < k:
- return v + mapping[k] - k
- k = max(keys)
- if v > k:
- return v + mapping[k] - k
- # Interpolate
- a = max(k for k in keys if k < v)
- b = min(k for k in keys if k > v)
- va = mapping[a]
- vb = mapping[b]
- return va + (vb - va) * (v - a) / (b - a)
-
-
-def main(args=None):
- """Normalize locations on a given designspace"""
- from fontTools import configLogger
- import argparse
-
- parser = argparse.ArgumentParser(
- "fonttools varLib.models",
- description=main.__doc__,
- )
- parser.add_argument(
- "--loglevel",
- metavar="LEVEL",
- default="INFO",
- help="Logging level (defaults to INFO)",
- )
-
- group = parser.add_mutually_exclusive_group(required=True)
- group.add_argument("-d", "--designspace", metavar="DESIGNSPACE", type=str)
- group.add_argument(
- "-l",
- "--locations",
- metavar="LOCATION",
- nargs="+",
- help="Master locations as comma-separate coordinates. One must be all zeros.",
- )
-
- args = parser.parse_args(args)
-
- configLogger(level=args.loglevel)
- from pprint import pprint
-
- if args.designspace:
- from fontTools.designspaceLib import DesignSpaceDocument
-
- doc = DesignSpaceDocument()
- doc.read(args.designspace)
- locs = [s.location for s in doc.sources]
- print("Original locations:")
- pprint(locs)
- doc.normalize()
- print("Normalized locations:")
- locs = [s.location for s in doc.sources]
- pprint(locs)
- else:
- axes = [chr(c) for c in range(ord("A"), ord("Z") + 1)]
- locs = [
- dict(zip(axes, (float(v) for v in s.split(",")))) for s in args.locations
- ]
-
- model = VariationModel(locs)
- print("Sorted locations:")
- pprint(model.locations)
- print("Supports:")
- pprint(model.supports)
-
-
-if __name__ == "__main__":
- import doctest, sys
-
- if len(sys.argv) > 1:
- sys.exit(main())
-
- sys.exit(doctest.testmod().failed)
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-4247b34c.css b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-4247b34c.css
deleted file mode 100644
index c9ebc3f6e2c7e52a3faf7a41e6ea43f8bc9821d2..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-4247b34c.css
+++ /dev/null
@@ -1 +0,0 @@
-div.svelte-18kevw>*:not(.absolute){border-radius:0!important}div.svelte-18kevw>*:first-child{border-top-right-radius:var(--radius-lg)!important;border-top-left-radius:var(--radius-lg)!important}div.svelte-18kevw>*:last-child{border-top-right-radius:var(--radius-lg)!important;border-top-left-radius:var(--radius-lg)!important}div.svelte-18kevw>*+*:not(.absolute){border-top:none!important}.hide.svelte-18kevw{display:none}
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-43eb8bd8.js b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-43eb8bd8.js
deleted file mode 100644
index 5e11000b177a80cbdafa64dc581b3faa20111e43..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-43eb8bd8.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import{S as r,e as h,s as v,k as g,o as w,z,v as k,x as B,a4 as C,P as R,p as S,R as q,A,F}from"./index-f877dfd5.js";import{a as P}from"./Button-11a87b79.js";import{X}from"./Blocks-adc2d4ca.js";function j(t){let i=t[9](t[3])+"",a;return{c(){a=R(i)},m(e,s){S(e,a,s)},p(e,s){s&520&&i!==(i=e[9](e[3])+"")&&q(a,i)},d(e){e&&A(a)}}}function D(t){let i,a;return i=new P({props:{variant:t[4],elem_id:t[0],elem_classes:t[1],size:t[6],scale:t[7],min_width:t[8],visible:t[2],disabled:t[5]==="static",$$slots:{default:[j]},$$scope:{ctx:t}}}),i.$on("click",t[10]),{c(){g(i.$$.fragment)},m(e,s){w(i,e,s),a=!0},p(e,[s]){const l={};s&16&&(l.variant=e[4]),s&1&&(l.elem_id=e[0]),s&2&&(l.elem_classes=e[1]),s&64&&(l.size=e[6]),s&128&&(l.scale=e[7]),s&256&&(l.min_width=e[8]),s&4&&(l.visible=e[2]),s&32&&(l.disabled=e[5]==="static"),s&2568&&(l.$$scope={dirty:s,ctx:e}),i.$set(l)},i(e){a||(z(i.$$.fragment,e),a=!0)},o(e){k(i.$$.fragment,e),a=!1},d(e){B(i,e)}}}function E(t,i,a){let e;C(t,X,n=>a(9,e=n));let{elem_id:s=""}=i,{elem_classes:l=[]}=i,{visible:m=!0}=i,{value:u}=i,{variant:_="secondary"}=i,{mode:f="dynamic"}=i,{size:o="lg"}=i,{scale:c=null}=i,{min_width:d=void 0}=i;function b(n){F.call(this,t,n)}return t.$$set=n=>{"elem_id"in n&&a(0,s=n.elem_id),"elem_classes"in n&&a(1,l=n.elem_classes),"visible"in n&&a(2,m=n.visible),"value"in n&&a(3,u=n.value),"variant"in n&&a(4,_=n.variant),"mode"in n&&a(5,f=n.mode),"size"in n&&a(6,o=n.size),"scale"in n&&a(7,c=n.scale),"min_width"in n&&a(8,d=n.min_width)},[s,l,m,u,_,f,o,c,d,e,b]}class G extends r{constructor(i){super(),h(this,i,E,D,v,{elem_id:0,elem_classes:1,visible:2,value:3,variant:4,mode:5,size:6,scale:7,min_width:8})}}const K=G,L=["static","dynamic"],M=t=>({type:{payload:"string"},description:{payload:"button label"},example_data:t.value||"Run"});export{K as Component,M as document,L as modes};
-//# sourceMappingURL=index-43eb8bd8.js.map
diff --git a/spaces/cihyFjudo/fairness-paper-search/Apna Desh [1972 FLAC] Discover the Trivia and Facts about this Film Directed by Jambu..md b/spaces/cihyFjudo/fairness-paper-search/Apna Desh [1972 FLAC] Discover the Trivia and Facts about this Film Directed by Jambu..md
deleted file mode 100644
index 67f4a46b2bc7d63be5b2187abc63c4c5350c3835..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Apna Desh [1972 FLAC] Discover the Trivia and Facts about this Film Directed by Jambu..md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-Artist - All Album #100 Days (1991)101 Days - (Unreleased) (1992)12 O Clock (1958)13B (2009)15th August (1993)16 December (2002)1920 (2008)1942 A Love Story1947 Earth1971 (2007)23rd March 1931 Shaheed (2002)2nd October - Gandhis India Today (2003)3 Days 4 Nights (2009)3 Idiots (2009)36 China Town42 Kms (2009)5 Rifles (1973)50 Years Of Indian Independence - Vol 17 Days (1995)7.5 Phere (Sarhe Saat Phere)8x10 Tasveer (2009)99 (2009)99.9 FM (2005)AA Wednesday (2008)A Wednesday (2008)Aa Ab Laut ChalenAa Dekhen Zara (2009)Aa Gale Lag Jaa (1973)Aa Gale Lag Jaa (1994)Aaashiq Banaya AapneAabra Ka Daabra (2004)Aadmi (1968)Aadmi (1993)Aadmi Aur Apsara (1991)Aadmi Khilona Hai (1993)Aadmi Sadak Ka (1977)Aadmin Aur Insaan (1969)Aag (1948)Aag Aur Shola (1986)Aag Hi Aag (1987)Aag Ka DariyaAag Ka GolaAag Ka Gola - RelistAag Ka Toofan (1994)Aage Ki Soch (1987)Aagey Se Right (2009)Aaghaaz (2000)Aah (1953)Aahutee (1978)Aaina (1977)Aaina (1993)Aaj Ka Arjun (1990)Aaj Ka Arjun And Kishen KanhaiyaAaj Ka Goonda Raaj (1992)Aaj Ke Angaarey (1988)Aaj Ki Awaaz (1984)Aaja Meri Jaan (1993)Aaja Nachle (2007)Aajaa Sanam (1992)Aakhari Decision (2010)Aakhir KyonAakhree Raasta (1986)Aakhri Adaalat (1988)Aakhri Daku (1978)Aakhri Dao (1959)Aakhri Goli (1977)Aakhri Khat (1966)Aakraman (1975)Aakrosh (1998)Aamdani Atthanni Kharcha Rpaiya (2001)Aamir (2008)Aamne Saamne (1967)Aamne SamneAamras (2009)Aan (1952)Aan - Men At Work (2004)Aan Aur Shaan (1983)Aan Baan (1972)Aan Milo Sajna (1970)AanchAanchal Tera Dhalka Hua (1991)Aandhi (1975)Aandhi Toofan (1985)Aangan Ki Kali (1979)Aankhen (1968)Aankhen (1993)Aankhen (2002)Aankhon Mein Tum HoAansoo Aur Muskan (1970)Aansoo Ban Gaye Phool (1969)Aao Pyar Karen (1964)Aao Pyar Karen (1994)Aao Wish Karein (2009)Aap Aye Bahar Aayi (1971)Aap Kaa Surroor - The Movie (2007) - Vol 1Aap Kaa Surroor - The Movie (2007) - Vol 2Aap Ke Deewane (1980)Aap Ke Sath (1986)Aap Ki Kasam (1974)Aap Ki KhatirAap Ki Khatir (1977)Aap Ki Parchhaiyan (1964)Aap Ki YaadenAap Ko Pehle Bhi Kahin Dekha HaiAap Mujhe Achche Lagne LageAap To Aise Na The (1980)Aapas Ki Baat (1982)Aapke SathAar Paar (1954)Aar Paar (1985)Aar Ya PaarAarambh (1976)Aarti (1962)Aarzoo (1999)Aas Ka Panchhi (1961)Aas Paas (1980)AASHA (1957)Aasha (1980)Aashik Awara (1992)AashiqAashiq (2001)Aashiq (90s)Aashiq Aawara (1993)Aashiq Hoon Baharon Ka (1977)Aashique Mastane (1996)AashiquiAashirwad (1968)Aasma (2009)Aasmaan (1984)Aasman Se Ooncha (1989)Aasra (1966)Aasra Pyaar Da (1983)AatishAatma (2006)Aaya Sawan Jhoom Ke (1969)Aaye Din Bahaar Ke (1966)Aayee Milan Ki Raat (1991)Aayi Milan Ki Bela (1964)AazmayishAb Dilli Door Nahin (1957)Ab E HayatAb Ke BarasAb Kya Hoga (1977)Ab Tumhare Hawale Watan SathiyoAB....BASAbdullah (1980)Abhay (2001)Abhi Abhi (1992)Abhilasha (1968)AbhimaanAbhinetri (1970)Abodh (1984)Abroo (1968)Achanak (1998)Acid Factory (2009)ADA (1951)Ada .. A Way of Life (2008)Adalat (1958)Addi TappaaAdharmAdhikar (1971)Adhoora Aadmi (1982)AetbaarAflatoon (1997)Afsaana (1951)Afsana Dilwalon Ka (1996)Afsana Pyar Ka (1991)Agar Tum Na Hote (1983)Aggar (2007)Agneekaal (1993)Agneepath (1990)Agni Sakshi (1996)Agni Varsha (2002)Agnipankh (2004)Agreement (1980)Agyaat (2009)Ahankaar (1995)Ahista (1990)Ahista AhistaAhista Ahista (1981)Ahsaas (1979)Aisa Kyun Hota HaiAishwarya (2009)Aitbaar (1985)AitraazAjab Prem Ki Ghazab Kahani (2009)Ajanabee (1974)AjayAjnabeeAjnabi (1974)Ajooba (1991)Akalmand (1984)Akarshan (1988)Akashdeep (1965)Akayla (1991)Akele Hum Akele TumAkhiri Dao (1959)Akhiyon Ke Jharokon Se (1978)Akhiyon Se Goli Maare (2002)Aks (2001)AksarAl Hilal (1958)Al-Risalah (2008)Aladdin & The Wonderful Lamp (1978)Aladin (2009)Alag (2006)Alag Alag (1985)Albela (1951)Albela (1987)Albela (2001)Albeli (1974)Alibaba And Forty Thieves (1954)Alibaba Aur 40 Chor (1980)Alibaba Aur Chales Choor (Forty Thieves - 1954)All Rounder (1984)All The Best (2009)Allah Rakha (1986)Aloo Chaat (2009)Amaanat (1994)Amaannat (1977)Amaanush (1975)Amanush (1975)Amar (1954)Amar Akbar AnthonyAmar Prem (1971)Amar Shakti (1978)Amardeep (1958)Amardeep (1979)Amba (1990)Ameer Aadmi Gareeb Aadmi (1985)American BlendAmerican Daylight (2005)American Desi (2001)Amir Garib (1974)Amiri Garibi (1990)Amrapali (1966)Amrit (1986)An Evening In Paris (1967)Anaam (1992)Anamika (1973)Anamika (2008)Anand (1970)Anand Ashram (1977)Anand Aur Anand (1984)Anand Math (1952)Anari (1959)Anari (1993)Anari No.1 (1999)Anarkali (1953)Anchal (1980)AndaazAndaaz Apna ApnaAndar Baahar (1984)Andaz (1949)Andaz (1971)Andhaa Kaanoon (1982)AndolanAngaar (1980)Angaar (1992)Angaar (2002)AngaarayAngaaray (1998)Angoor (1982)Angrakshak (1998)Angulimaal (1960)Anhonee (1973)Anita (1967)AnjaamAnjaam Khuda Jaane (1986)Anjaan (2005)Anjaana (1969)AnjaaneAnjaane Rishte (1990)Anjane Mein (1978)AnkaheeAnkahee (1984)Ankhon Mein Tum Ho (1997)Ankush (1985)AnmolAnmol Ghadi (1946)Anmol Moti (1969)Anmol Ratan (1950)Anmol SitareyAnnadata (1972)Annarth (2002)Anokha Pyar (1948)Anokha Rista (1986)Anokhe Amar Shaheed Baba Deep Singh Ji (2005)Anokhi Ada (1948)Anokhi Ada (1973)Anokhi Raat (1968)Anpadh (1962)Ansh (2002)Anth (1994)Anthony Kaun HaiAnubhavAnubhav (1971)Anupama (1966)Anuraag (1956)Anuraag (1972)Anuradha (1960)AnurodhAnwar (2006)Anyay Hi Anyay (1997)ApaharanAparichitApmaan (1982)Apna Aasmaan (2007)Apna Bana Lo (1982)Apna Desh (1972)Apna Khoon (1978)Apna Sapna Money money (2006)Apnapan (1977)Apne (2007)Apne Apne (1987)Apne Dam Par (1996)Apne Paraye (1980)Appu Raja (1990)ApradhApradhi (1992)April Fool (1964)Arab Ka Saudagar (1956)AradhanaArchana (1974)Arjun (1985)Arjun Pandit (1999)ArmaanArmaan (1981)Army (1996) Re-ListAround The World (1967)Arpan (1983)Arth (1982)AryanArzoo (1965)Asa Nu Maan Watan Da (2004)AsambhahavAsha (1957)Asha (1980)Asha JyotiAshanti (1987)Ashiana (1951)Ashirwaad (1969)Asli Naqli (1962)AsokaAstitva (2000)Atishbaz (1990)Atithi (1978)Atithi Tum Kab Jaoge (2010)Aulad (1968)Aulad (1987)Aulad Ke Dushman (1993)Aunty No.1 (1998)Aur Ek Prem Kahani (1995)Aur Kaun (1979)Aur Pyaar Ho GayaAurat Teri Yehi Kahani (1954)AuzaarAvinash (1986)Avishkaar (1973)Avtaar (1983)Awaam (1987)Awaargi (1990)Awaaz (1984)Awara (1951)Awara Baap (1985)Awara Pagal Deewana (2002)Awarapan (2007)Awwal Number (1990)Aye Meri Bekhudi (1993)Ayee Milan Ki Bela (1964)AZAADAzaad (1955)Azaad Desh Ke Ghulam (1989)BBaabarr (2009)Baabul (1986)Baabul (2006)Baadbaan (1954)BaadshahBaadshah (1999)Baaghi (1953)Baaghi (1995)Baaghi (2000)Baaghi - A Rebel for Love (1990)Baali Umar Ko SalaamBaap Numbari Beta Dus NumbariBaap Re Baap (1955)Baarish (1957)Baat Ek Raat Ki (1962)Baaton Baaton Mein (1979)Baawri (1982)Baayen Hath Ka Khel (1985)Baaz (1953)Baaz (2003)BaaziBaazi (1950)BaazigarBabar (1960)Babul (1950)Bach Ke Rahna Re BabaBachna Ae Haseeno (2008)Bad Friend (2005)Bada Din (1998)Bada Kabutar (1973)BadalBadal (1951)Badal (1985)Badal Aur Bijli (1956)Badalte Rishte (1978)Bade Dil Wala (1982)Bade Dilwala (1951)Bade Ghar Ki Beti (1989)Bade Miyan Chote MiyanBadhaai Ho Badhaai (2002)Badi Bahen (1949)Badi Maa (1945)Badle Ki Aag (1982)Badmaash (1998)Badnaam (1984)Badshah (1954)Badtameez (1966)Baghawat (1982)BaghbanBahaana (1960)Bahana (1942)Bahar (1951)Bahar Aane TakBaharen Phir Bhi Aayengi (1966)Baharon Ke Sapne (1967)Baharon Ki Manzil (1991)Bahu (1955)Bahu Begum (1967)Bahurani (1963)Bahurani (1989)Baiju Bawra (1952)Bairaag (1976)Bajrang Bali (1956)Bal Bramhachari (1993)Bal Ganesh (2007)Bali Umar Ko Salaam (1994)BalidaanBalika Badhu (1976)BalmaBalmaa (1993)Balwaan (1992)Bambai Ka Babu (1960)Bambai Ka Babu (1994)Bambai Ka Babu (1996)Banana BrothersBanarasBanarasi Babu (1973)Banarasi Babu (1997)Bandhan (1969)Bandhan (1998)Bandhan Anjana (1984)Bandhe Haat (1973)Bandi (1957)Bandini (1963)Bandini (1963)Bandish (1980)Bandish (1996)Bandit Queen (1995)Banjaran (1991)Bank Manager (1959)Banphool (1971)BardashtBari Behen (1949)Barkha (1959)Barood (1976)Barood (1998)BarsaatBarsaat (1949)Barsaat (2001)Barsaat Ki Ek Raat (1981)Barsaat Ki Raat (1960)Barsaat Ki Raat (1998)Barsat (1995)Bas Ek Pal (2006)Bas Itna Sa Khawab HaiBas YunhiBasant Bahar (1956)Baseraa (1981)Baseraa (1982)Batwara (1961)Batwara (1989)Bawandar (2001)Bawarchi (1972)Bawre Nain (1950)Bazaar (1949)Bazar (1982)Be Lagaam (1987)Be Naam (1974)Bedaag (1965)Bedardi (1993)Bees Saal Baad (1962)Bees Saal Baad (1989)Begaana (1963)Begunah (1957)Bekhudi (1992)Bemisaal (1982)Benaam (1974)Benaam Badsha (1991)Bepanaah (1985)BeqabuBesahaara (1986)Beshaque (1981)Besharam (1978)Beta (1991)BetaabBetaabiBetaj Badshah (1994)BetiBeti Bete (1964)Beti No - 1BewafaBewafa SanamBewafa Se Wafa (1992)Bezubaan (1982)Bhabhi (1957)Bhabhi (1991)Bhabhi Ki Chudiyan (1961)Bhagam Bhag (1952)Bhagam Bhag (2006)BhaggmatiBhagwan Dada (1986)Bhai (1997)Bhai BahenBhai Bhai (1956)Bhai Bhai (1970)Bhai Bhai (1995)Bhai Ho To Aisa (1995)Bhai Ka Dushman Bhai (1986)Bhai No. 1 (1995)Bhairav Dweep (1994)Bhairavi (1996)Bhaiya Dooj (1984)Bhakta Surdas (1942)Bhaktha Kumbara (1985)Bhala Manas (1979)Bhanwar (1976)Bhanwara (1944)Bharat Bhagya Vidhata (2002)Bharosa (1963)Bhavna (1984)Bheegi Palken (1982)Bheegi Raat (1965)Bheema (1984)Bhola Bhala (1978)Bhola in Bollywood (2005)Bhola Shankar (1951)Bhookamp (1993)Bhool Bhulaiya (2007)Bhool Na Jaana (1960)Bhoomika (1977)Bhoot (2003)Bhoot Bangla (1965)Bhoot UncleBhoothnath (2008)Bhopal Express (1999)Bhram (2008)Bhrashtachar (1989)Bhula Na Dena (1980)Bichhoo (2000)Bidaai (1974)Big Brother (2007)Bijlee Aur Toofan (1987)Billu Barber (2009)Bilwamangal (1954)Bin Badal Barsaat (1963)Bin Phere Hum Tere (1979)Bindiya (1955)Bindiya Chamkegi (1983)Biwi Aur Makan (1966)Biwi Ho To Aisi (1988)Biwi No 1Biwi O Biwi (1981)BlackBlack and White (2008)Black Cat (1959)Black Friday (2007)Black MailBlack Mail (1973)Blue (2009)Bluff MasterBluff Master (1963)Bobby (1973)Bol Radha Bol (1992)Bollywood Hollywood (2002)Bolo Ram (2009)Bombai Ka Baabu (1989)BombayBombay 405 Miles (1980)Bombay Boys (2002)Bombay Ka Chor (1962)Bombay TalkiesBombay Talkies (1971)Bombay Talkies (Compilation) - Vol 1Bombay Talkies (Compilation) - Vol 2Bombay To Bangkok (2007)Bombay To Goa (1972)Bombay To Goa (2007)Bombblast (1993)Bond 303 (1985)BoomBoond Jo Ban Gayee Moti (1967)Boondh (2002)Boot Polish (1954)BorderBorder Hindustan Ka (2003)BoseBow Barracks Forever (2007)Boxer (1984)Boy Friend (1961)Boyfriend (1961)Brahma (1994)Brahmachari (1968)Bride and PrejudiceBrides WantedBuddha Mil Gaya (1971)Budtameez (1966)Bulandi (1980)Bulandi (2001)BulletBullet (1976)Bundal Baaz (1976)Bunglow No. 666 (1990)BuniyaadBunty Aur BabliBurning Train (1980)Buzdil (1951)CC Kkompany (2008)C.I.D. (1956)Calcutta MailCamp Rock (2008)Captain Kishore (1957)CaravanCaravan (1971)Cash (2007)Censor (2001)Cha Cha Cha (1964)ChaahatChaahat Ek Nasha (2004)Chaalbaaz (1989)Chabilee (2006)Chacha Bhatija (1977)Chacha Zindabad (1959)Chahoonga Main Tujhe (1993)Chailla Babu (1977)Chain Kulii Ki Main Kulii (2007)Chak De India (2007)Chakk De Phatte (2008)Chakkar Pe Chakkar (1977)Chal Chala Chal (2009)Chal Chalein (2009)Chal Mere BhaiChala Murari Hero Banne (1977)Chalaak (1973)Chalis Din (1958)Challia (1960)Chalo Ishq Larain - IndianChalta Purza (1977)Chalte ChalteChalte Chalte (1976)Chalti Ka Naam GaadiChamatkaar (1992)Chambal Ki Kassam (1979)Chambal Ki Rani (1979)ChameliChameli Ki Shaadi (1985)Chamku (2008)Champakali (1957)Champion (2000)Chance Pe Dance (2009)Chand Bujh Gaya (2004)Chand Ka TukdaChand Ke Paar ChaloChand Sa Roshan ChehraChandaal (1995)Chandan (1958)Chandan Ka Palna (1967)Chandi Das (1934)Chandi Sona (1977)ChandniChandni ChowkChandni Chowk To China (2008)Chandni Raat (1949)ChandrakantaChandramukhi (1960)Chandramukhi (1993)Char Chand (1953)CharasCharas (1976)Charitraheen (1974)Chatpati (1983)Chatran (1988)Chaudhvin Ka Chand (1960)Chauraha (1994)ChausarCheeni Kum (2007)Cheenti Cheenti Bang Bang (2008)Cheetah (1994)ChehraaChetnaChhaila (1996)Chhaila Babu (1977)Chhal (2002)Chhalia (1973)Chhodon Naa Yaar (2007)Chhota Sa Ghar (1995)Chhote Babu (1957)Chhote Nawab (1973)Chhote Sarkar (1996)Chhoti Bahen (1973)Chhoti Bahoo (1994)Chhoti Bahu (1971) - RelistChhoti Si Baat (1975)Chhoti Si Mulaqat (1967)Chhupa Rustam (2001)China Town (1962)ChingaariChintu Ji (2009)Chirag (1969)Chirag Kahan Roshni Kahan (1959)ChitchorChitralekha (1964)ChocalateChooriyan (2007)Chor Aur ChandChor Bazaar (1954)Chor Chor (1996)Chor Ho To Aisa (1978)Chor Machaaye ShorChor Machaye Shor (1974)Chor Machaye Shor (2002)Chor MandliChor Pe Mor (1990)Chor Police (1983)Chori ChoriChori Chori (1956)Chori Chori Chupke ChupkeChori Mera Kaam (1975)Chorni (1982)Choron Ki Baarat (1980)Choroon Ka Chor (2003)ChotChote Sarkar (1996)Chup Chup KeChupke Chupke (1975)Chupke SeChuppa Rustam (2001)Chura Lia Hai Tumne (2003)Chura Liya Hai TumneChuralia Nigahon Se (1997)CID (1990)Classic - Dance of Love (2005)Clerk (1989)Click (2010)College Girl (1978)Commando (1988)CompanyContract (2008)Coolie (1983)Coolie No.1CorporateCriminalDD (2005)Daadi Maa (1966)Daag (1952)Daag (1973)Daag - The FireDaata (1989)Daava (1997)Daayra (1996)Dacait (1987)Dada (1979)Dadagiri (1987)Daddy (1991)Daddy Cool (2009)Daera (1953)Dagabaz Balma (1987)Dahek (1998)Dahek (2006)Daisy (1992)Dak Babu (1954)Dak Bungla (1947)Daku Bijlee (1986)Daku Hasina (1986)Dalaal (1993)Dalpati (1991)Daman (1951)Damini (1993)DamnDance Dance (1987)Dance Like A Man (2005)Dance Party (1995)Dancer (1991)Dand Nayak (1998)DanshDaraarDard (1947)Dard (1981)Dard Ka Rishta (1982)DariyadilDarling (2007)Darling Darling (1977)Darmiyaan (1997)Darna Mana Hai (2003)Darna Zaroori HaiDarpan (1970)DarrDarwaza (1954)Dashavtar (2008)Dastak (1970)Dastak (1996)Dastoor (1991)Dasvidaniya (2008)DaudDaulat (1981)Daulat Ke Dushman (1981)Daulat Kee Jung (1992)Dayavan (1988)Dayavan (1988)De Dana Dan (2009)De Taali (2008)Deedar (1951)Deedar (1970)Deedar (1992)Deedar-E-Yaar (1982)Deewaar (1975)DeewanaDeewana (1967)Deewana (1952)Deewana Deewana Pyar KaDeewana Mastana (1997)Deewana Mujhsa Nahin (1990)Deewana Sanam (1994)DeewanapanDeewaneDeewane Hue PagalDeewangiDeewarDeewar (1966)Deh Shiva Bar Mohe (2004)Dekh Bhai Dekh (2009)Dekh Kabira Roya (1957)Delhi Heights (2007)Delhi-6 (2009)Des Disavar (1981)Des Hoya Pardes (2004)Des Pardes (1978)Desh Bhakti Songs (Indian Patriotic Songs)Desh Ke Dushman (1988)Desh Premi (1982)Deshdrohi (2008)Detective (1958)Detective Naani (2009)DevDev.D (2008)Devaki (2005)Devar (1966)Devar Bhabhi (1986)DevdasDevdas (1935)Devta (1978)Dhaal (1997)DhadkanDhadkan (1972)Dhai Akshar Prem KeDhamaal (2007)Dhan Daulat (1980)Dhan Dhana Dhan... Goal (2007)Dhanwaan (1993)Dhanwan (1981)Dharam Kanta (1982)Dharam Karam (1975)Dharam VeerDharam Yodha (1992)Dharamyudh (1988)Dharm (2007)Dharm Adhikari (1986)Dharmatma (1975)Dharmputra (1961)Dharti (1970)Dharti Kahe Pukar Ke (1969)Dharti Ki Kasam (1987)Dhartiputra (1993)Dhokha (2007)Dhol (2007)Dhongee (1979)Dhool Ka Phool (1959)DhoomDhoom (2004)Dhoom 2Dhoom Dadakka (2008)Dhoondte Reh Jaaoge (1998)Dhoondte Reh Jaoge (2009)DhoopDhuan (1981)Dhun (1991)Dhund (1973)Dhund - The Fog (2003)DilDil (1990)Dil Aashna Hai (1992)Dil Akhir Dil Hai (1982)Dil Apna Aur Preet Praayi (1960)Dil Apna Punjabi (2006)Dil Aur Mohabbat (1968)Dil Baychara Pyar Ka MaraDil Bole Hadippa (2009)Dil Chahta HaiDil Churaya AapneDil De Ke Dekho (1959)Dil Dhoondta Hai (2001)Dil Diwana (1974)Dil Diya Dard Liya (1966)Dil Diya Hai (2006)Dil Dosti Etc (2007)Dil Ek Mandir (1963)Dil Hai BetaabDil Hai Ke Manta NahinDil Hai Tumhaara (2002)Dil Hai TumharaDil Hi Dil Mein (2000)Dil Hi To Hai (1963)Dil Jo Bhi KaheDil Ka Kya Kasoor (1992)Dil Ka Raaja (1972)Dil Ka RishtaDil Kabaddi (2008)Dil Kahin Hosh KahinDil Ke Jharokhe Mein (1996)Dil Kho Gaya (1998)Dil Ki Baazi (1993)Dil Ki Rahein (1973)Dil Kitna Nadan Hai (1997)Dil Kya Kare (1999)Dil Mangay MoreDil Mera Dhadkan TeriDil Ne Jise Apna KahaDil Ne Phir Yaad Kiya (1966)Dil Ne Phir Yaad Kiya (2000Dil Ne Pukara (1967)Dil Pe Mat Le YaarDil Perdesi HogayaDil SeDil Se Pooch Kidhar Jaana Hai (2006)Dil Tera AashiqDil Tera Deewana (1996)Dil Tera Diwana (1962)Dil To Pagal HaiDil Tujhko Diya (1985)Dil Vil Pyar VyarDil-E-Nadaan (1981)Dilbar (1994)Diljalaa (1987)Diljale (1996)DillagiDillagi (1949)Dilli Ka Thug (1958)Dilruba Tangewali (1988)DilwaalaDilwaleDilwale Dulhania Le JayengeDilwale Kabhi Na Hare (1992)Disco Dancer (1982)Disco DeewaneDivyashakti (1993)DiwanaDo AnjaaneDo Ankhen Barah Haath (1957)Do Aur Do Panch (1980)Do BadanDo Behnen (1958)Do Bigha Zameen (1953)Do Chor (1972)Do Dilon Ka Sangam (1993)Do Dilon Ke Khel Mein (2010)Do Dilon Ki Dastaan (1985)Do Gaz Zamin Ke Neeche (1972)Do Hazaar Ek (2001)Do Jasoos (1975)Do Jhoot (1975)Do Jhooth (1975)Do Kaliyan (1968)Do KhiladiDo Knot Disturb (2009)Do Ladkiyan (1976)Do Matwale (1991)Do Pal (1991)Do Qaidi (1987)Do RaasteDo Waqt Ki Roti (1986)Do Yaron Ki Yaari (1984)DobaraDoli (1969)Doli Saja Ke RakhnaDon (1978)Don (2006)Don Muthu Swami (2008)Dont Stop Dreaming (2007)Doodh Ka Karz (1990)Dooj Ka Chand (1964)Door Ki Awaz (1964)Dooriyan (1979)Doosara Aadmi (1977)Dor (2006)Dost (1944)Dost (1974)Dost (1989)Dost Garibon Ka (1988)DostanaDostana (2008)Dosti (1964)Dosti - Friends ForeverDosti Dushmani (1986)Double CrossDouble Gadbad (1994)Dream Girl (1977)DreamsDrohi (1992)Drona (2008)Dukaan (2004)Dulaara (1994)Dulari (1949)Dulha Bikta Hai (1981)Dulha Mil Gaya (2009)Dulhan Banoo Main TeriDulhan Hum Le JayengeDulhan Wahi Jo Piya Man Bhaaye (1977)Dulhe RajaDumDuniya (1968)Duniya (1984)Duniya Dil Walon KiDuniya Jhukti Hai (1960)Duniya Meri Jeb Mein (1979)Duniyadari (1977)DuplicateDurgaDurjan (1996)Dus (1997)Dus (2005)Dus Kahaniyaan - ClubDus Kahaniyaan - LoungeDus Kahaniyaan - PoemsDus Lakh (1966)Dushman (1939)Dushman (1971)Dushman (1998)Dushman Zamana (1992)Dushmani (1996)Duty (1986)EEeshwar (1989)EhsaasEhsan (1970)EightEk (2009)Ek Aadat (2009)Ek AjnabeeEk Aur Ek GyarahEk Baar Kaho (1980)Ek Bar Muskura Do (1972)Ek Chadar Maili Si (1986)Ek Chalis Ki Last Local (2007)Ek Dhun Pyar Ki (1993)Ek Din Sau Afsane (1963)Ek Dujhe Ke Liye (1981)Ek Hi Bhool (1981)Ek Hi Raasta (1956)Ek Hi Raasta (1993)Ek Hindustani (2002)Ek Huns Ka JoraEk Jaan Hain Hum - (1983)Ek Jind Ek JanEk Kali Muskayee (1968)Ek Khiladi Ek HaseenaEk Kunwari Ek Kunwara (1973)Ek Ladka Ek Ladki 1992Ek Mahal Ho Sapno Ka (1975)Ek Main Aur Ek Tu (1986)Ek Main Ek TumEk Musafir Ek Hasina (1962)Ek Naari Do Roop (2001)Ek Nai Paheli (1984)Ek Nari Ek BrahmachariEk Naya Rishta (1986)Ek Nazar (1972)Ek Pari Ke Desh MeinEk Phool Char Kaante (1960)Ek Phool Do Mali (1969)Ek Raaz (1963)Ek RishtaEk Se Badhkar Ek (2004)Ek Se Bhale Do (1985)Ek Se Bure Do (2009)Ek Second (2009)Ek Tha Dil Ek Thi Dhadkan (1998)Ek Vivaah... Aisa Bhi (2008)Ekka Raja RaniEklavya - The Royal Guard (2007)ElaanElaan (1971)EMI (2008)Encounter (2002)English Babu Desi MemEscape From Taliban (2003)FFaagunFaasle (1985)Faisla (1988)Faisla (1998)Fakira (1976)Falak (1987)Family - Ties Of BloodFannaFaqeer Badshah (1986)Faraar (1975)Faraar (1994) UnreleasedFaraib (1983)Fareb (1996)Fareb (2005)Fareb (Old)Farishta Ya Qaatil (1977)Farz (1967)Farz - In The Line of Fire (2000)Fashion (1957)Fashion (2008)Fast Forward (2009)Fateh (1990)Fear (2007)Fida (2004)Fiffty Fiffty (1981)Fight ClubFilhaalFiraaq (2009)First Love Letter (1991)Fitarat (1986)Fiza (2000)Flavors - Song Trailors (2004)FM - Fun Aur MastiFool N Final (2007)FootpathFootpath (1953)Footpath (2003)Fox (2009)Freaky Chakra (2003)Fruit And Nut (2009)FunFun2shhGGaban (1966)GadarGair (1999)Gair Kaanooni (1988)Gaja GaminiGajab Bhayil Rama (2003)Gajab Tamaasa (1992)Galiyon Kaa Badshah (1990)Gaman (1979)Gambler (1971)Gambler (1997)Game (2006)Gandhi My Father (2007)Gang (2000)Ganga Jamuna Saraswati (1988)Ganga Jumuna (1961)Ganga Ka Vachan (1992)Ganga Ki KasamGanga Ki Lahren (1964)Ganga Ki Saugand (1978)Ganga Kinare Mora Gaon (1984)Ganga Meri Maa (1982)Ganga Tera Pani Amrit (1971)Ganga Tere Desh Mein (1988)Gangster (1995)Gangster (2006)Gangvaa (1984)Garam MasalaGardish (1993)Garibon Ka Daata (1988)Garv (2004)Gautam Govinda (2002)Gawaahi (1989)GayabGeet (1970)Geet (1992)Geet Gaaya Pattharon Ne (1964)Geet Gata Chal (1975)Geeta Ki Saugandh (1988)Geeta Mera Naam (1973)Gehra Daag (1963)Gehra Zakham (1981)Gehri Chaal (1973)GentlemanGeraftaar (1985)GhaathGhaav (2002)Ghajini (2008)Ghar (1978)Ghar Aakhir Ghar Hai (1988)Ghar Aaya Mera Pardesi (1993)Ghar Aur Bazaar (1986)Ghar Dwaar (1985)Ghar Ghar Ki Kahani (1988)Ghar Jamai (1992)Ghar Ka Chiraag (1989)Ghar Ka Sukh (1987)Ghar Sansar (1986)Ghar Wali Bahar WaliGharana (1961)Gharaonda (1977)Gharwali Baharwali (1998)Ghatak (1996)Ghatothkach (2008)Ghayal (1990)Ghazal (1964)Ghoonghat (1997)Ghulam (1998)Ghulam-E-Mustafa (1997)Ghulami (1985)Ghungat (1960)Ghunghat (1988)Ghungroo Ki Aawaz (1981)Giraft (1993)Girl FriendGo (2007)God And Gun (1995)God Tussi Great Ho (2008)Godaan (1963)GolmaalGolmaal Returns (2008)Gomti Ke Kinare (1972)Good Boy Bad Boy (2007)Good Luck (2008)Goonj (1974)Goonj Uthi Shehnai (1959)Gopalaa (1992)Gopi (1970)Gopi Kishan (1994)Gora Aur Kala (1972)Goraa (1986)Grahan (2001)Great Robbery (1996)Guddi (1971)Guddu (1995)Gudgudee (1993)GuideGulaal (2009)Gumnaam (1965)Gumnaam (2004)GumrahGumrah (1963)Gumrah (1963) - RelistGunaahGunaah (1993)Gunahgaar (1980)Gunahon Ka Devta (1969)Gunda (2000)Gunda Mawali (1995)Gunda RajGundagardi (1997)Gunehgar Kaun (1991)Gunga Jumna (1961)Gupt (1997)Guru (1989)Guru (2006)Guzare ZamaneGyara Hazar Ladkiyan (1962)HHaadsaa (1983)Haal-E-Dil (2008)Haan Maine Bhi Pyar Kiya (2002)Haasil (2003)Haath Ki Safai (1974)Haathi Mere Saathi (1971)Haathon Ki Lakeeren (1986)Haatim Tai (1990)Hadh (2001)Hadh Kar Di Aapne (2000)Hai Meri Jaan (1991)Haisiyat (1984)Halaal Ki Kamaai (1987)Halaku (1956)Half Ticket (1962)Halla Bol (2007)Hamaare Tumhare (1979)Hamar Gharwali (2003)Hamara Dil Aapke Paas HaiHamara Khandan (1987)Hamara SansarHamara Watan (1956)Hamari Manzil (1949)HameshaHamko Tumse Pyar HaiHamraaz (1967)Hamrahi (1963)Hanste Zakhm (1973)Hanuman (2005)Hanuman Returns (2007)HaqeeqatHaqeeqat (1964)Har Dil Jo Pyar KaregaHare Rama Hare Krishna (1971)Harfan Maula (1976)Hari Darshan (1972)Hari Puttar (2008)Harishchandra Taramati (1963)Hariyali Aur RastaHarjaee (1981)HaseenaHaseena Maan Jayegi (1968)Hastey Hastey (2008)Hasti (1993)Hat Trick (2007)Hathkadi (1995) - RelistHathkadi - RelistHathyaar (2002)Hatthkadi (1995)Hatya (1988)Hawas (1974)HawayeinHazaaron Khwaahishen AisiHeenaHeer (1956)Heer Raanjha (1970)Heer Ranjha (1992)Heera (1973)Heera Aur Patthar (1977)Heera Moti (1959)Heera Moti (1959) - RelistHeera Panna (1973)Heeralal Pannalal (1978)Hello (2008)Hello BrotherHennaHera Pheri (1976)Hera Pheri (2000)Hero (1983)Hero HindustaniHero Hiralal (1988)Hero No.1 (1997)Hero Wohi Jo Heroine Le Jaye (2000)Heroes (2008)Hey Johney (1985)Hey Ram (2000)Heyy Babby (2007)Hide And Seek (2010)Hifaazat (1987)Hifazat (1973)High School Musical 2 (2007) Vol - 1High School Musical 2 (2007) Vol - 2Hijack (2008)Hill Station (1957)Himalay Ki God Mein (1965)Himalay Putra (1997)Himalay Se Ooncha (1975)Himmat (1970)Himmat (1994)Himmat Aur Mehanat (1987)Himmatvar (1996)HimmatwalaHindustan (1995)Hindustan Ki Kasam (1973)Hindustan Ki Kasam (1999)HindustaniHip Hip Hurray (1984)Hitler (1998)Hogi Pyar Ki Jeet (1999)Holi Aaee Re (1970)HolidayHome DeliveryHoneymoon (1973)Honeymoon (1992)Honeymoon Travels Pvt Ltd (2007)Hongkong (1962)Horn Ok Pleassss (2008)Hot Money (2006)Hote Hote Pyaar HogayaHotel (1981)House No. 44 (1958)Housefull (2010)Howrah Bridge (1958)Hu Tu Tu (1999)Hukumat (1986)HulchalHulchul (1951)Hulchul (1995)HumHum Aapke Dil Mein Rehte HainHum Aapke Hain KaunHum Bhi Insaan Hai (1948)Hum Dil De Chuke SanamHum Do Hamara EkHum Dono (1961)Hum Dono (1984)Hum Dum (2005)Hum Hai Bemisal (1994)Hum Hain Kamal Ke (1993)Hum Hain Lajwaab (1984)Hum Hain Pyaar MeinHum Hain Rahi Pyar Ke (1993)Hum Hindustani (1960)Hum Ho Gaye AapkeHum Intezaar Karenge (1988)Hum Kisise Kum Naheen (1977)Hum Kissi Se Kam NahiHum Ko Ishq Ne MaraHum Matwale Naujawan (1961)Hum Naujawan (1985)Hum Paanch (1980)Hum Phirr Milein Na Milein (2008)Hum Pyaar Tumhi Se Ker BaitheHum Rahe Na Hum (1984)Hum Saath Saath HainHum Sab Chor Hain (1956)Hum Sab Ustad Hain (1965)Hum Se Hai MuqabalaHum Se Mile Tum (1984)Hum Se Na Jeeta Koi (1983)Hum Se Na Takrana (1988)Hum Shakal (1974)Hum TumHum Tum Aur Ghost (2010)Hum Tumhare Hain SanamHum Tumpe Marte HainHumain Tumse Pyaar Ho Gaya Chupke Chupke (2002)Humjoli (1970)Humkadam (1980)Humko Deewana Kar GayeHumko Ishq Ne MaraHumko Ishq Ne Mara (1997) (TV)Humko Tumse Pyar Hai (2005)HumraazHumsaya (1968)Humse Badhkar Kaun (1981)Humse Hai MuqablaHumsey Hai Jahaan (2008)Humto Mohabbat KaregaHungamaHungama (1971)Husn - Love And Betrayal (2006)Husn Aur Ishq (1966)Hyderabad Blues 2 (2004)Hyderabad Nawabs (2006)II Love You (1985)I Love You (1992)I Proud To Be An Indian (2003)I See YouIjaazat (1988)Ilaaka (1989)Ilzaam (1986)Imaan (1974)Imaandaar (1987)ImtihanInam Dus Hazaar (1987)IndianIndian BabuIndrajeet (1991)Inkaar (1978)InsaafInsaaf (1987)Insaaf (1997)Insaaf Ka Tarazu (1980)Insaaf Ki Awaaz (1986)Insaaf Ki Manzil (1987)Insaaf The Justise (2003)InsaanInsaaniyat Ke Dushman (1986)Insan (2004)Insan Aur Insaan (1985)Insaniyat (1994)Insaniyat Ke Devta (1992)Inspector Kiron (1991)IntaqamIntaqam (1969)Inteha (As Requested)Inteha Pyar Ki (1992)International Khiladi (1999)IqbalIqraar - By ChanceIshaara (1964)Ishk Ishk Ishk (1974)Ishq (1997)Ishq Be Parwah (2008)Ishq Hai TumseIshq Ho Gaya Mamu (2008)Ishq Ishq Ishq (1974)Ishq Khuda Hai (1993) - UnreleasedIshq Mein Jeena Ishq Mein Marna (1993)Ishq Par Zor Nahin (1970)Ishq VishqIshqiya (2010)Isi Ka Naam Zindagi (1992)Iss Raat Ki Subah Nahi (1995)ItihaasIttefaq (2001)Izzat Ki Roti (1993)JJaadu Sa Chal Gaya (2006)Jaag Utha Insan (1984)Jaal (1952)Jaal (1967)Jaal -The Trap (2002)Jaali Note (1960)JaalsaazJaanJaan E Wafa (1986)Jaan Ki Kasam (1991)Jaan Pehchan (1990)Jaan Se Pyara (1992)Jaan Tere NaamJaan- E- TamannaJaan-E-Wafa (1990)Jaana - Lets Fall in Love (2006)Jaanam (1992)Jaanam Samjha Karo (1999)Jaane Anjane (1971)Jaane Hoga KyaJaane Jaan (1983)Jaane Kahan Se Aayi Hai (2010)Jaane Tu Ya Jaane Na (2008)Jaaneman (1976)Jaani DushmanJaanwarJaanwar (1965)Jab Dil Kisi Pe Aata Hai (1996)Jab Jab Phool Khile (1965)Jab Jab Pyar Hua (1992)Jab Jab Pyar Hua 1994Jab Pyaar Kissi Se Hota HaiJab Pyar Kisise Hota Hai (1961)Jab We Met (2007)JackpotJadu (1951)Jag Jeondeyan De Mele (2009)Jagh Mahi (1999)JagirJagriti (1954)Jagte Raho (1956)Jahan Ara (1964)Jahan Jaaeyega Hamen Paaeyega (2007)Jahan Tum Le ChaloJai Dakshineshwar Kaali Maa (1996)Jai Kishen (1994)Jai Maa Vaishno Devi (1999)Jai Maa Vindhyavasini (1984)Jai Santoshi Maa (2006)Jai Shiv Shankar (1992)Jai Veeru (2009)Jai Vikraanta (1994)Jail (2009)Jaisi Karni Waisi BharniJal Bin Machhli Nritya Bin Bijli (1971)Jal Mahal (1980)Jalaa Kar Rakh Kar Doon Ga (1988)Jallad (1995)Jallian Wala Bagh (1987)JalwaJamai Raja (1990)JamesJanam Janam (1988)Janam Janam Ke Phere (1957)Janam Kundli (1995)Janam Se Pehle (1992)JanasheenJanbaazJaneman (1976)Janeman (2006)Jannat (2008)Janta Hawaldar (1979)Janta Ki Adalat (1994)Janwar (1965)Jashnn (2009)JasoosJawaani (1984)Jawab (1995)Jawab Hum Denge (1986)Jawan Mohabbat (1971)Jawani DeewaniJawani Deewani - A Youthful RideJawani Diwani (1972)Jawani Zindabad (1990)JeansJee Aayan NuJeena Marna Tere Sang (1992)Jeena Sirf Mere LiyeJeena Teri Gali Mein (1991)Jeena To Hai (2008)Jeene Do (1963)Jeene Ki Arzoo (1981)Jeene Ki Raah (1969)Jeene Nahin Doonga (1984)Jeeo Aur Jeene DoJeeo Shaan Se (1997)Jeet (1972)Jeet (1996)Jeete Hai Shaan SeJeevan Ek Sanghursh (1990)Jeevan Jyoti (1976)Jeevan Mrityu (1970)Jewel Thief (1967)Jhanak Jhanak Payal Baaje (1955)Jhankar BeatsJheel Ke Us Paar (1973)Jhoom Barabar Jhoom (2007)Jhoom JahaanJhooth Bole Kauwa Kaate (1998)Jhoothi (1986)Jhoothi Shaan (1992)Jhuk Gaya Aasman (1968)Jhumroo (1961)Jhutha Sach (1984)Ji Chahta Hai (1964)Jigar (1992)Jigri Dost (1969)JigyaasaJimmy (2008)Jis Desh Mein Ganga Behti Hai (1960)Jis Desh Mein Ganga Rehta haiJism (2003)Jiyaala (1998)Jiyo Aur Jeene DoJo Bole So NihaalJo Jita Wohi SikandarJodhaa Akbar (2008)Jodi No.1 (2001)Jodidar (1997)Joggers ParkJohar Mehmood In Goa (1965)Johar Mehmood In Hong Kong (1971)Johnny Gaddaar (2007)Johnny I Love You (1982)Johny Mera Naam (1970)Joroo Ka Ghulam (1972)Joru Ka GhulamJosh (2000)Joshila (1973)Joshilay (1989)Juaari (1994)Judaai (1997)Judge MujrimJudwaa (1997)JudwaanJugaad (2009)Jugnu (1969)JulieJulie (1975)Jumbo (2008)Jung (1996)Jungbaaz (1989)Jungle (2000)Jungle Love (1990)Jungle Mein Mangal (1972)Junglee (1961)Junoon (2002)Junoon 1993Jurm (1990)Jurm (2005)Jurm (2005)Jurmana (1979)Jurmana (1996)Just Married (2007)Justice Choudhary (1983)Jwala (1985)Jwalamukhi (1980)Jyoti (1981)Jyoti Bane JwalaKKaafilaKaafila (2007)Kaagaz Ke Phool (1959)Kaajal (1965)Kaal (2005)Kaala Pathar (1977)Kaali Ganga (1990)Kaalia (1981)Kaamchor (1982)Kaamyaab (1984)Kaanch Ki Deewar (1986)Kaanch Ki Deewar (1986) - RelistKaante (2002)Kaante Remix (2002)Kaarnaama (1989)Kaash (1987)Kaashh... Mere Hote (2008)Kaatilon Ke Kaatil (1981)Kab Kyoon Aur Kahan (1970)Kab Tak Chup Rahungi (1988)Kabhi Ajnabi TheKabhi Alvida Na Kehna (2006)Kabhi Haan Kabhi NaaKabhi KahbieKabhi Khushi Kabhi GhamKabhi Na KabhiKabhie Ajnabi The (1984)Kabul Express (2006)Kabzaa (1988)Kachchi SadakKache DhaageKachhe Dhaage (1973)Kahaani Gudiya Ki (2007)Kahan Hai Kanoon (1989)Kahan Se Aaye Badarva (2007)Kahani Kismat Ki (1973)Kahin Pyaar Na Ho JaayeKaho Naa Pyaar HaiKahtey Hain Mujhko Raja (1975)Kaisay Kahein (2007)Kaise Kahoon (1964)Kaise Kahoon Ke Pyaar HaiKaise Kaise Rishte (1993)Kal Aaj Aur Kal (1971)Kal Ho na HoKal Ki Aawaz (1992)Kal Kissne Dekha (2009)Kala Bazaar (1989)Kala Bazar (1960)Kala Dhanda Goray Log (1986)Kala Pani (1958)Kala Sona (1975)Kalaakaar (1983)Kali Ganga (1988)Kali Ghata (1951)Kali Ghata (1951) - RelistKali Topi Lal Rumal (1959)Kalicharan (1976)Kalpana (1960)KalyugKalyug (1981)Kalyug Aur Ramayan (1987)Kambakkht Ishq (2009)Kaminey (2009)Kanch Ki Gudiya (1961)Kanch Ki Gudiya (1961) - RelistKangan (1971)Kanhiya (1959)Kanoon (1994)Kanoon Apna Apna (1989)Kanoon Ki Hathkadhi (1988)Kanwarlal (1988)Kanyadaan (1993)KaramKaramdaata (1985)Karan ArjunKarate (1983)Kareeb (1998)Karishma Kudrat Kaa (1985)Karishmaa (1984)Karm (1977)KARMA (1986)Karmayogi (1978)Karobaar (2000)Kartavya (1979)Kartavya (1995)Karthik Calling Karthik (2010)Kartoos (1999)Kartoot (1986)KarzKarz (1980)Karz Chukana Hai (1991)Karzzzz (2008) Vol - 1Karzzzz (2008) Vol - 2KasakKasam (1988)Kasam (2001)Kasam Khoon Ki (1977)Kasam Paida Karne Wale Ki (1984)Kasam Suhag Ki (1988)Kasam Teri Kasam (1993)Kasauti (1974)Kash Aap Hamare Hote (2002)Kashmir Ki KaliKasme Vaade (1978)Kasme Vaade (1981)KasoorKatha (1983)Kathputli (1957)Kathputli (1971)Kati PatangKatilon Ke Kaatil (1981)KatputtliKaun Hai Jo Sapnoo Mai AyeKaun Kaisey (1983)Kaun Sachcha Kaun Jhootha (1997)KeematKeemat (1998)Kehkashaa (1988)Kehtaa Hai Dil Baar Baar (2002)KhakeeKhal NaaikaaKhal NayakKhalifa (1976)Khallas (2007)Khallballi (2008)KhamoshKhamoshiKhamoshi (1969)Khandaan (1979)Khandan (1965)Khandan (1965)Khanjar - The Knife (2003)Khanna & Iyer (2007)Khatarnaak (1990)Khatron Ke Khiladi (1988)Khatta Meetha (1978)KhauffKhawahishKhazana (1987)KhelKhel Khel Mein (1975)Khel Khiladi KaKhilaafKhiladiKhiladi 420 (2000)Khiladiyon Ka Khiladi (1996)Khilona (1970)Khilona (1996)Khoj (1989)KhoobsuratKhoobsurat (1980)Khoon Aur Paani (1981)Khoon Bhari Maang (1988)Khoon Ka KarzKhoon Pasina (1977)Khosla Ka Ghosla (2006)Khote Sikkey (1998)Khoya Khoya Chand (2007)Khuda Gawah (1992)Khuda Kasam (1981)Khuddar (1982)Khuddar (1994)Khudgarz (1987)Khujli (2006)Khule-Aam (1992)Khullam Khulla Pyaar Karen (2001)Khush Naseeb (1982)Khushboo (1975)Khushboo (2008)KhushiKhwahishKidnap (2008)Kinara (1977)King Of BollywoodKing Uncle (1993)Kiraydaar (1986)Kirkit (2009)Kisaan (1987)Kishan Kanhaiya (1990)Kisi Se Dil Lagake Dekho (1996)KismatKismat (1943)Kismat (1968)Kismat (1995)Kismat Konnection (2008)Kismetwala (1985)KisnaKiss Kis Ko (2004)Kissan (2009)Kisse Pyaar Karoon (2009)Kites (2010)Kitne Door Kitne PaasKohinoor (1960)Kohraa (1964)KohramKoi App SaKoi Mere Dil Mein HaiKoi Mere Dil Se PoocheKoi Mil GayaKoi Na Jane Re (1986)Koi Tujh Sa Kahan (2005)Kool Nahin Hot Hain Hum (2008)Kora Kagaz (1974)Kotwal Saheb (1977)Koyal (1993)Koyla 1997Kranti (1981)Kranti (2002)Kranti Kshetra (1994)KrantiveerKrazzy 4 (2008)Krishan Avtaar (1993)Krishna (1996)Krishna (2006)Krishna CottageKrodhi (1981)Krrish (2006)Krrish - TamilKrrish - TeluguKshatriya (1993)Kubzaa (1988)Kuch Dil Ne Kaha (2002)Kuch Khatti Kuch MeethiKuch Kuch Hota HaiKuch Na KahoKuch To HaiKuch Tum Kaho Kuch Hum KahainKuchh Kaha Aapne (2004)Kuchh Meetha Ho JayeKuchh To Gadbad HaiKudart (1981)Kudiyon Ka Hai Zamana (2006)KudratKudrat Ka Faisla (1989)Kudrat Ka Kanoon (1990)KunwaraKunwara Baap (1974)Kunwara Badan (1973)Kurbaan (1991)Kurbaan (2009)KurukshetraKya Dil Ne KahaKya KehnaKya Kool Hain HumKya Love Story Hai (2007)Kya Yehi Pyaar HaiKyon (2003)Kyon Kii Main Jhoot Nahi BoltaKyoo Ho Gaya NaKyun ki - Its FateLLaadlaLaaga Chunari Mein Daag (2007)Laal Dupatta Malmal Ka (1989)Laal KothiLaat Saab (1967)Laat Saab (1992)LaawarisLaawaris (1981)Ladies Tailor (1981)Ladka Ladki (1966)Ladki (1953)LaganLage Raho Munna bhai (2006)Lahoo Ke Do Rang (1997)Lahore (2010)Lahu Ke Do Rang (1979)Laila (1984)Laila MajnuLajjaLajwanti (1958)LakeerLakh Pardesi Hoiye (2008)Lakhon Mein Ek (1971)LakshyaLal BaadshahLal Bangla (1966)Lal Dupatta Malmal KaLal Haveli (1944)Lal Pari (1954)Lal Patthar (1971)Lal Quila (1960)Lal Salaam (2002)Lala Rukh (1958)Lalkar (1972)Laloo Prasad YadavLamheLamhe (1991)Laparwah (1981)Laqshya (1994)Lashkar (1989)Lava (1984)Le Chal Apne Sang (2000)LeaderLeader (1964)Leela (2002)Lekin (1991)Let's Dance (2009)Lets Enjoy (2004)Libaas (1988)Life In A... Metro (2007)Life Mein Kabhie Kabhiee (2007)Life Partner (2009)Little Zizou (2009)Lo Main Aagayaa (1999)LoaferLoafer (1973)Loafer (1996)LOC KargilLocket (1986)London Dreams (2009)Long Da Lishkara (1986)Lootera Sultan (1990)Lootere (1993)Lootmaar (1980)Lottery (2009)Love (1991)Love 86 (1986)Love Aaj Kal (2009)Love at Times Square (2003)Love First Love LetterLove Guru - 2009Love In Goa (1983)Love In NepalLove In Simla (1960)Love In Tokyo (1966)Love Ka Tadka (2009)Love Ke Chakkar Mein (2006)Love Ke Liye Kuch Bhi Karega (2001)Love Khichdi (2009)Love Love Love (1989)Love MarriageLove Marriage (1984)Love Sex Aur Dhoka (2010)Love Songs (2008)Love Story (1981)Love Story (1998)Love Story 2050 (2008)Love You HameshaLover Boy (1985)Lovers (1983)Luck (2009)Luck By Chance (2009)LuckyMMaa (1977)Maa (1991)Maa Aur Mamta (1970)Maa Baap (1960)Maa Baap (1988)Maa Ka Aanchal (1970)Maa Tujhe SalaamMaachisMaalaMaalMaan Abhiman (1980)Maan Gaye Mughal-E-Azam (2008)Maan Gaye Ustaad (1982)Maang Bharo Sajana (1980)Maar Dhaad (1988)Madadgar (1987)Madam X (1994)Madari (1959)Madhosh (1951)Madhosh (1974)Madhosh (1994)MadhoshiMadhubalaMadhumati (1958)Maha Chor (1976)Maha Sangram (1990)Mahaadev (1987)MahaanMahaanta (1997)Mahal (1949)Mahal (1990) UnreleasedMaharajaMaharaja (1970)Mahaveera (1987)Mahima Kashi Vishwanath Ki (2002)Mahiya (2006)Mahua (1969)Mai Baap (1957)Maidan-E-Jung (1995)Main Aisa Hi HoonMain Aur Meri Tanhai (1980)Main Aurr Mrs Khanna (2009)Main Awara Hoon (1983)Main Azaad Hoon (1989)Main Balwan (1986)Main Chup Rahungi (1962)Main Hoon NaMain Khiladi Tu AnariMain Madhuri Dixit Banna Chahti Hoon (2003)Main Meri Patni Aur WohMain Nashe Mein Hoon (1959)Main Prem Ki Dewani Hoon (2003)Main Solah Baras Ki (1998)Main Tera Aashiq (1994)Main Tera Dushman (1988)Main Tere Liye (1988)Main Tu Assi Tussi (Punjabi Movie) (2006)Main Tulsi Tere Angan Ki (1978)Main Wohi Hoon (1966)Maina (1992)Maine Dil Tujhko Diya (2002)Maine Jeena Seekh Liya (1982)Maine Payal Hai ChhankaiMaine Pyar KiyaMaine Pyar Kiya - When Love Calls (English Version)Maine Pyar Kyon KiyaMajaajan (2008)Majaal (1987)Majboor (1974)Majboor (1989)Majhdaar (1994)Major SaabMakdee (2002)Makkhee Choos (1956)Malamaal Weekly (2006)MalharMamta (1966)Mamta Ki Chhaon Mein (1988)Man Mandir (1971)Manav Hatya (1986)Manchalaa (1999)Manchali (1973)Mangal Pandey - The RisingManmauji (1962)MannMann Ka Meet (1968)Mann Ki Aankhen (1970)Mann Mauji (1962)Mann Pasand (1980)Mannat (Punjabi Movie)Manokaamnaa (1980)Manorama - Six Feet Under (2007)Manoranjan (1975)Manoranjan (2006)Manzil (1979)Manzil ManzilMaqbool (2004)Maqsad (1984)Mard (1985)Mard Ki Zabaan (1988)Mardon Wali Baat (1988)Marega Salaa (2009)Marigold (2007)MarketMarte Dam Tak (1987)Maruti Mere Dosst (2009)MaseehaMashaal (1984)MashookaMashooqMasoom (1982)Masoom (1996)Mast (1999)Mast Kalandar (1991)Mast Qalandar (1955)Mastana (1970)Master Of Magic (2008)MastiMausam (1975)Maut Ki Sazaa (1990)Mawaali (1982)Maya (1961)Maya Machhinder (1932)Maya Memsahab (1993)Mayurpankh (1954)Mazaa Mazaa (2005)Mazdoor (1983)Mazloom (1986)Mee Shivaj Raje Bhosle Boltoy (2009)Meena Bazar (1991)MeenaxiMeera (1979)Meera Ka Mohan (1992)Meerabai Not Out (2008)Meet Mere Man Ke (1991)Meetha Zehar (1985)Mehandi Rang Layegi (1982)Mehboob Ki Mehndi (1971)Mehboob Mere Mehboob (1992)Mehbooba (1976)Mehbooba (2008)Meherbaan (1993)Mehfal Mitran DeeMehndi (1998)Mehndi Wale Hath (Pakistani Movie)Mein Khiladi Tu AnariMein Prem Ki Deewani HoonMelaMela (1948)Mela (1971)Mela (2000)Memsahib (1956)Men Not AllowedMera Dharam (1986)Mera Dil Leke Dekho (2006)Mera Dil Tere Liye (1991)Mera Faisla (1984)Mera Farz (1988)Mera Gaon Mera Desh (1971)Mera Jawaab (1985)Mera Lahoo (1987)Mera Muqaddar (1988)Mera Naam JokerMera Pati Sirf Mera Hai (1990)Mera Pehla Pehla Pyaar (2007)Mera Pind (Punjabi Movie) (2008)Mera Saathi (1985)Mera Saaya (1966)Mera Salaam (1957)Mera Shikar (1988)Mera Suhag (1987)Mera Vachan Geeta Ki Qasam (1977)Mera Yaar Mera Dushman (1987)Meraa Ghar Mere Bachche (1985)Mere Apne (1971)Mere Baap Pehle Aap (2008)Mere Do Anmol Ratan (1998)Mere Gharib Nawaz (1973)Mere Humdum Mere Dost (1968)Mere Humsafar (1970)Mere Huzoor (1968)Mere Jeevan SaathiMere Jeevan Saathi (1972)Mere Khwabon Mein Jo Aaye (2009)Mere Mehboob (1963)Mere Sajana Saath Nibhana (1992)Mere SajnaMere Sanam (1965)Mere Sapno Ki Rani (1997)Mere Yaar Ki ShaadiMere Yaar Ki Shaadi HaiMeri Adalat (1984)Meri Behen (1944)Meri Biwi Ka Jawab Nahin (2004)Meri Jung (1985)Meri Jung (2006)Meri Padosan (2008)Meri Surat Teri Ankhen (1963)Meri Zabaan (1988)Merie Sapnun Ki RaniMetroMil Gayee Manzil Mujhe (1989)Mil Gayee Manzil Mujhe (1998)MilanMilan (1965)Milap (1955)MiliMini Punjab (Movie) (2009)Mirza Ghalib (1954)Mirza Sahiba (1947)Mirza Sahiba (2006)Mismatch (2009)Miss 420 (1998)Miss Mary (1957)Mission Istaanbul (2008)Mission KashmirMission The Last War (2008)Mitr (2002)Mitr My Friend (2002)Mittal Vs Mittal (2010)Mitti (2001)Mitti Aur Sona (1989)Mitti Mein Sona (1960)Mitti Wajaan Maardi (2007)Miya Biwi Aur SaaliMobile Phone (2006)MohabbatMohabbat (1985)Mohabbat Ho Gayee Hai TumseMohabbat Isko Kahete Hai (1965)Mohabbat Ka Paigham (1988)Mohabbat Ke Dushman (1987) - RelistMohabbat Ki Arzoo (1994)Mohabbatein (2000)Mohandas (2009)Mohar (1959)MohraMokshaMome ki Gudiya (1972)Money Hain To Honey Hain (2008)Monsoon WeddingMorchha (1980)Morning Raga (2004)Morning Walk (2009)Mother (1998)Mother India (1957)Mr Azaad (1994)Mr Bechara (1996)Mr Bond (1992)Mr Prime MinisterMr Romeo (1974)Mr Romeo (1996)Mr White Mr Black (2008)Mr Ya MissMr. & Mrs. 55 (1955)Mr. & Mrs. KhiladiMr. 100% (2006)Mr. Hot Mr. Kool (2007)Mr. India (1987)Mr. KhujiliMr. NatwarlaalMr. X In Bombay (1964)Mr.AashiqMrityudaata (1997)Mrityudand (1997)Mudda - The Issue (2003)Muddat (1986)Mughal-E-AzamMujh Sai Shaadi KarogiMujhay Kuch Kehna Hai (2001)Mujhe Insaaf Chahiye (1983)Mujhe Jeene Do (1963)Mujhe Meri Biwi Se Bachaao (2001)Mujhse Dosti KarogeMujhse Dosti Karogi (2005)Mujhse Shaadi KarogiMujrim (1958)Mujrim (1989)Mukesh Duggals MilanMukhbiir (2008)Mukka (1996)Mukti (1977)Mulaqat (2001)Mulzim (1988)Mumbai Matinee (2003)Mumbai Salsa (2007)Mumbai Se Aaya Mera DostMumbai XpressMummy Ji (2007)Munimji (1955)Munna Bhai M.B.B.SMuqabla (1979)Muqabla II (1995)MuqadarMuqaddar Ka SikandarMurderMusafirMusafir (1940)Musafir (1957)Musafir - Club & LoungeMuskaanMuskurahat (1992)Muskurake Dekh Zara (2010)My Brother NikhilMy Friend Ganesha (2007)My Love (1970)My Name Is Anthony Gonsalves (2007)My Name Is Khan (2010)My Story (1993)NNa Ghar Ke Na Ghaat Ke (2010)Na Tum Jano Na HumNaa-Mumkin (1988)NaachNaach Govinda Naach (1990)Naach Utha Sansar(1978)Naache Mayuri (1986)Naag Mani (1991)NaajayazNaamNaam Gum Jaye GaNaam Kya Hai (1999)Naam O Nishan (1987)Naaraaz (1994)Nache Mayuri (1986)Nachnewale Gaanewale (1990)Nadiya Ke Paar (1948)Nadiya Ke Paar (1982)Nafrat Ki Aandhi (1988)Nagin (1954)Nagin (1976)Nagin Aur Nagina (1987)Nagina (1989)Nai Roshni (1967)Nai Umar Ki Nai Fasal (1965)Naiyya (1979)NajayazNaka BandiNakhuda (1981)Nakli Nawab (1962)Naksha (2006)Nalaik (2005)Namak (1996)Namak Halal (1982)Namak Haram (1973)Namaste (2002)Namaste Ji (1965)Namaste London (2007) Vol - 1Namaste London (2007) Vol - 2Namkeen (1982)Namoona (1949)Nanak Naam Chardi Kalla (2007)Nanhe Jaisalmer (2007)Naqaab (2007)Naqab (1988)NaraazNaram Garam (1981)Nargis (1992)Narsimha (1991)NaseebNaseeb (1998)Nastik (1954)Nastik (1983)Nau Do Gyarah (1957)Naughty Boy And LoveguruNaukar Biwi Ka (1983)Naukari (1954)Nauker (1979)Naunihal (1967)Nausherwan E Adil (1957)Navrang (1959)Navrang Chundadi (1990)Naya AadmiNaya Andaz (1956)Naya Daur (1954)Naya Nasha (1973)Naya Raasta (1970)Naya SansarNayak - The Real HeroNayak - The Real Hero (2001)Nayee PadosanNazarNazar Ke Samne (1995)Nazarein Milane Wali (2009)Nazraana (1961)Neal n NikkiNeel Kamal (1968)Neela Aakash (1965)Neend Hamari Khwab Tumhare (1966)Nehle Pe Dehla (2007)Netaji Subhas Chandra Bose (2005)New Delhi (1956)New York (2009)Nigahen (1989)Nigehbaan - The Third Eye (2005)Night In London (1967)Nikaah (1982)Nikamma (1983)Nishaan (1982)Nishabd (2007)Nishana (1996)Nishanebaaz (1989)Nishchaiy (1992)No EntryNo Smoking (2007)Noor Jahan (1967)Noorie (1979)Nyay Anyay (1990)OO Meri MehboobaO Tera Kya Kehna (1959)Officer (2001)Oh Darling Yeh Hai India (1995)Om - The Ultimate Power (2003)Om Jai Jagdesh (2002)Om Shanti Om (2007)OmkaraOne 2 Ka 4 (2000)One Fine Monday (2008)One Two Three (2008)Oonch Neech Beech (1989)Oonche LogOoopsOpera House (1961)Out Of Control (2003)Oye Lucky Lucky Oye (2008)PPaa (2009)PaagalpanPaanch (2002)Paandav (1995)PaapPaap Ki DuniyaPaap Ki Sazaa (1988)Paapi Devta (1995)Paathshaala (2010)Paayal (1992)Padosan (1968)Page 3Pagla Kahin Ka (1970)PaheliPainter Babu (1983)Paisa VasoolPakeezah (1971)PalPal Pal Dil Ke Ssaat (2009)Paley Khan (1986)Palki (1967)Palkon Ki Chhaon Mein (1977)Panaah (1992)Panga Naa Lo (2007)Pangaa Gang (2009)Pankh (2009)Papa Kehte Hain (1996)Papa the Great (2000)Papi Gudia (1994)Paramaatma (1994)Parampara (1992)Paramveer Chakra (1994)Paras (1949)Parasmani (1963)Paraya Dhan (1971)Parayaa Ghar (1988)Parchhaiyan (1952)PardesPardesi (1992)Pardesi Babu (1998)Parichay (1972)Parinay (1974)ParindaParineetaParivar (1956)Parki Thapan (1982)Parmaatma (1994)Partner (2007)ParvarishParvarish (1958)ParwanaParwana (1947)Parwana (1971)Parwane (1992)Pataal Bhairavi (1985)Patang (1960)Patanga (1949)Patanga (1949) All SongsPathar Ke Insan (1991)Pathar Ke PhoolPathar Ke Sanam (1967)Pathreela Raasta (1995)Pati Parmeshwar (1987)Pati Patni (1966)Pati Patni Aur Tawaif (1990)Patita (1953)Patita (1980)Patthar (1985)Patthar Aur Payal (1974)Patthar Ke Khwab (1969)Patthar Ke Phool (1991)Patthar Ke Sanam (1967)Payal (1957)Payal Ki Jhankaar (1980)Paying GuestPaying Guests (2009)Peechha Karro (1986)Pehchaan (1993)Pehchan (1970)Pehla NashaPehla Pehla Pyar (1994)Pehli Jhalak (1954)Pehli Mohabbat (1991)Pehli Nazar (1945)Pehli Nazar Ka Pehla PyarPehli Nazar Mein (1996)Phaansi Ke Baad (1985)Phagun (1958)Phande Baaz (1978)Phir Bhi (1971)Phir Bhi Dil Hai Hindustani (2000)Phir Hera PheriPhir Kab Milogi (1974)Phir Kabhi (2009)Phir MilengePhir Subah Hogi (1958)Phir Teri Kahani Yaad AyeePhir Teri Yaad AayiPhir Wahi Dil Laya Hoon (1963)Phir Wahi Raat (1980)Phool (1993)Phool (1993) - RelistPhool Aur AngaarPhool Aur KaantePhool Aur PathorPhool Bane AngarayPhool Bane Angare (1963) - RelistPhool Bane Angarey (1963)Phool Khile Hain Gulshan Gulshan (1978)Phulwari (1983)Pighalta Aasman (1985)Pind Di KudiPinjarPitaah (2001)Piya Ka Ghar(1972)Piya Se Milke Aaye NainPlanPlatform (1993)Pocket Maar (1974)Police (1958)Police Aur Mujrim (1992)Police Force - An Inside Story (2004)Policewala Gunda (1995)Ponga Pandit (1975)Pooja Ke Phool (1964)Poonam (1952)Poonam Ki Raat (1965)Popcorn Khao Mast Ho JaoPraan Jaye Par Shaan Na Jaye (2002)Prabhat (1941)Prabhu Ka GharPrahaar (1991)Pran Jaye Par Vachan Na Jaye (1974)Pranali (2008)Prarthana (1943)PrashnachinhaPratha (2002)Prati Bandh (1990)Pratigya (1975)Pratigyabadh (1991)Pratikar (1991)Pratiksha (1991)Prem (1995)Prem AganPrem Bandhan (1978)Prem Deewane (1992)Prem GeetPrem Granth (1996)Prem Jal (1986)Prem Kaa Game (2010)Prem Kahani (1975)Prem Nagar (1974)Prem Parbat (1973)Prem Patra (1962)Prem Pratigyaa (1989)Prem Pujari (1970)Prem Quaidi (1991)Prem RogPrem Shastra (1974)Prem Tapasya (1983)Prem Yog (1994)Premnagar (1974)Prince (1969)Prince No. 1 (2001)Prithvi (1997)Private Secretary (1962)Priyanka (1995)Priyatama (1977)Professor (1962)Professor Ki Padosan (1993)Professor Pyarelal (1981)Provoked (2006)PukaarPukaar (1983)Purab Aur Paschim (1970)Puran Bhagat (1933)Purana Mandir (1984)Purani Pehchan (1971)Purnima (1965)Pushpanjali (1970)Putt Jattan De (1981)Pyaar Diwana Hota Hai (2002)Pyaar Impossible (2010)Pyaar Ka MandirPyaar Ka Saagar (1961)Pyaar Ka Saaya (1991)Pyaar Ka Suadagar (1991)Pyaar Karke Dekho (1987)Pyaar Kiya Nahi Jaata (2002)Pyaar Kiya Nahin JaataPyaar Kiya To Darna KiyaPyaar Kiye JaaPyaar Kiye Jaa (1966)Pyaar Koi Khel NahiPyaar To Hona Hi ThaPyaar Tune Kya KiyaPyaar Zindagi Hai (2001)Pyaara Dushman (1980)Pyaas (1982)PyaasaPyaasa (1957)Pyaasa Sawan (1981)Pyaasi Mamta (1986)Pyar Bhara Dil (1991)Pyar Hi Pyar (1969)Pyar Ho Gaya (1985)Pyar Hua Badnaam (1992)Pyar Hua Chori Chori (1991)Pyar Ishq Aur MohabatPyar Jhukta Nahin (1985)Pyar Ka Devta (1991)Pyar Ka Karz (1990)Pyar Ka Mandir (1987)Pyar Ka Mausam (1969)Pyar Ka Rog (1994)Pyar Ka Sapna (1969)Pyar Ka Taraana (1993)Pyar Karke Dekho (1987)Pyar Ke Do Pal (1986)Pyar Ke GeetPyar Ke Insaan (1991)Pyar Ke Naam Qurban (1990)Pyar ke Side EffectsPyar Ki Baaten (1951)Pyar Ki Dhun (2002)Pyar Ki Jeet (1948)Pyar Ki Rahen (1959)Pyar Kiya Hai Pyar Karenge (1986)Pyar Kiya To Darna Kya (1998)Pyar Main Khabi KhabiPyar Mein TwistPyar Mohabat (1988)Pyar Mohabbat (1966)Pyar Mohabbat (1998)Pyar Pyar (1990)Pyar To Hona Hi Tha (1998)Pyara Dushman (1980)Pyare MohanPyari Bhabhi (1986)Pyase PanchiPyasi Shaam (1969)QQahar (1997)Qaidi (1984)Qaidi No 36 (1994)Qat'l (1986)QayamatQayamat Se Qayamat TakQila (1998)Quick Gun Murugun (2009)QurbaniQurbani - What A Wonder (English Version) (1980)RRaagini (1958)Raagrang (1952)Raaj TilakRaajaRaam BharoseRaampur Ka Lakshman (1972)Raaste Ka Patthar (1972)Raaste Pyar Ke (1982)Raat Aur Din (1967)Raat Gayi Baat Gayi (2009)Raat Ke Andhere Mein (1986)Raaton Ka Raja (1970)Raavan (1984)Raaz (1967)Raaz (2001)Raaz - The Mystery Continues (2008)Rab Ne Bana Di Jodi (2008)Race (2008) - Vol 1Race (2008) - Vol 2Radha Ka Sangam (1992)Radio (2009)Rafoo Chakkar (1975)Rafta RaftaRaghuveer (1995)Rahgir (1968)Rahi Badal Gaye (1985)RahulRailway Platform (1955)RainRaincoatRaiszaada (1989)Raj Hath (1956)Raj KumarRaj Kumar (1964)Raj Tilak (1984)RajaRaja Aur Runk (1968)Raja BabuRaja Bhaiya (2003)Raja HindustaniRaja Jani(1972)Raja Ji (1999)Raja Ki Aayegi Baarat (1997)Raja Ko Rani Se Pyar Ho Gaya (2000)Raja Rani (1973)Raja Saab (1969)Rajhath (1956)Rajkumar (1964)Rajkumar (2)Rajnigandha (1974)Rajput (1982)Raju Bangaya GentlemanRaju ChachaRakhi (1962)Rakhi Aur Hathkadi (1972)RakhtRakhwalaRakhwala (1971)Rakhwale (1994)Raksha (1981)RakShakRam Aur Shyam (1967)Ram Avtar (1988)Ram BalramRam Gopal Varma Ki Aag (2007)Ram JaaneRam LakhanRam Shastra (1995)Ram Teri Ganga Maili (1985)Ram VivahRama O Rama (1988)Rama Rama Kya Hai Dramaa (2007)Ramji London WaleyRamnagri (1982)Rampur Ka Lakshman (1972)Ramu To Diwana Hai (1980)RangRang (1993)Rang De BasantiRang De Basanti RemixesRangeelaRangeen RaatenRangoli (1961)Rann (2010)RanoRaqeeb (2007)Raqeeb Rivals In Love (2007)Ratnadeep (1979)Raton Ka Raja (1970)Rattan (1944)Ravan Raaj (1985)Razia Sultan - OldRed (2007)Red Swastik (2007)RefugeeRehgurzarRehna Hai Tere Dil MeinResham Ki Dori (1974)Reshma (Movie)Reshma Aur Shera (1971)Reshmi Roomal (1961)Return Of JewelthiefRevatiRickshawala (1973)Right Yaaa Wrong (2010)Rishte Naate (1965)RishteyRishtey (2002)Risk (2007)RoadRoad Movie (2010)Road To Sangam (2010)Roadside Romeo (2008)Rock Dancer (1994)Rock On (2008)Rocket Singh (2009)Rockford (1999)Rockin Meera (2009)Rocky (1981)Rocky (2006)Rog (2004)RojaRok Sako To Rok Lo (2004)Romance (1983)Roop Ki Rani Choron Ka Raja (1993)Roop Tera Mastana (1972)Roshni (2002)Roti (1974)Roti Kapada Aur Makaan (1974)Royal Utsav (2008)Ru-Ba-Ru (2008)RudaaliRudrakshRukshat (1988)RulesRun (2004)Runway (2009)Rupali Dalal - Badra (2008)Ruslaan (2009)Rustam E Sohrab (1963)SSaagarSaahas (1979)Saaheb (1985)Saahibaa (1993)Saajan (1969)Saajan (1991)Saajan Chale Sasural (1996)Saajan Ka GharSaajan Ki Bahon MeinSaajan Ki Saheli (1981)Saala Bigda JayeSaanch Ko Aanch Nahin (1979)Saanson Ki Sargam (1991)Saas Bahu Aur Sensex (2008)Saas Bhi Kabhi Bahu Thi (1970)Saat Rang Ke SapneSaath SaathSaathi (1968)Saathi (1991)SaathiyaSaatwaan Asmaan (1992)Saawan - The love seasonSaawariya (2007)SaayaSaaz (1998)Saazish (1988)Saazish (1998)Saboot (1980)Sabse Bada Khiladi (1995)Sabse Bada Rupaiya (1976)Sachaa Jhutha (1970)Sachaai (1969)Sachcha Pyar (1994)Sachche Ka Bol Bala (1989)Sacred Evil (2006)Sacred Evil (A True Story)Sadaa Suhagan (1986)SadakSadhana (1958)Sadhu Aur Shaitan (1968)Sadiyaan (2010)SadmaSafarSafari (1999)Sagai (1951)Sagar Sangam (1986)Sagina (1974)Sahebzaade (1992)Sahebzaade (1992) (Relist)Sahhas (1981)Sahib Bahadur (1980)Sahib Bibi Aur Ghulam (1962)SahibaanSailaab(1990)SainikSajan Bina Suhagan (1978)SajjanSajna Saath Nibhana (1987)Sajna Ve Sajna (2007)SakuraSalaakhen (1975)Salaakhen (1998)Salaam Bombay (1988)Salaam Memsaab(1979)Salaam-e-Ishq (2006)Salam NamasteSalam-E-MohabbatSalamiSalma Pe Dil Aa Gaya (1997)Samadhi (1972)Samay - When Time Strikes (2003)Samband (1968) - RelistSambandh (1969)Sambandh (1996)Samjhauta (1973)Samraat (1982)Samrat Chandragupta (1958)Samundar (1986)Sanam (1997)Sanam Bewafa (1991)Sanam Harjai (1995)Sanam Tere Hain Hum (2000)Sanam Teri KasamSandhya (2003)Sangam (1964)Sangdil Sanam (1994)Sangeet (1992)Sangeet Samrat Tansen (1962)Sangraam (1993)Sangram (1976)Sanjay (1995)Sanjh Aur Savera (1964)Sanjog (1961)Sanjog (1985)Sanjog 1972Sankat City (2009)Sansani (1980)Sansar (1987)Sanskar (1952)Sant Gyaneshwar (2000)Sant Janabai (1949)Santaan (1993)Santosh (1984)Santoshi Maa Ki Mahima (1985)SapnaSapnaySapne Sajan Ke (1992)Sapne Suhane (1961)Sapnon Ka Mandir (1991)Sapoot (1996)Sar Utha Ke Jiyo (1998)Saranga (1960)Saraswati Chandra (1968)Sardari Begum (1996)Sarfrosh (1999) Re-ListSargam (1950)Sargam (1981)Sarhad (1995)Sarhad Paar (2007)SarkarSarkar Raj (2008)SarpanchSarphira (1992)Sasural (1961)Sat Sri Akal (2008)SathiSati Naagkanya (1956)Sati Naari (1965)Sati SavitriSati Savitri (1964)Satta - The Game of Power (2002)Satta Baazar (1959)Satte Pe SattaSatyaSatyakam (1969)Satyam Shivam Sundaram (1978)Satyamev Jayate (1987)Sau Crore (1991) RelistSau Saal Baad (1989)Sauda (1995)Sauda - The Deal (2005)SaudagarSaudagar (1991)Saugandh (1990)Sautela (1999)Sautela Bhai (1996)Sautela Pati (1985)SautenSavalee (2007)Savera (1973)Savere Wali Gaadi (1985)Sawaal (1982)Sawan Bhadon (1970)Sawan Ki Ghata (1966)Sawan Ko Aane Do (1979)Say Salaam India (2007)Sayesha (1994)Sazaa (1951)Seema (1955)Seeta Aur Geeta (1972)SeharSehra (1963)Shaabash You Can Do It (2009)Shaadi Ka Laddoo (2004)Shaadi Ker Ke Phas Gaya YaarShaadi No.1Shaadi Se Pehle (2006)ShaanShaan (1980)Shaapit (2010)Shabab (1954)ShabdShabnam (1964)Shabnam (1993)Shabnam MausiShadow (2009)Shagird (1967)Shagird (1967)Shagoon (1964)Shaheed (1965)Shaheed Bhagat SinghShaheed E AzamShahenshah (1988)Shair (1949)Shaka Laka Boom BoomShakkaShaktiShakti (1982)ShaktimanShalimarSham Ghansham (1998)Shama (1961)Shama Parwana (1954)Shandaar (1990)Shanhenshah Khoon Bhari MaangShankar Hussain (1977)Shapath (1984)SharabiSharada (1957)Sharada (1981)Sharara (1984)ShararatShareef Badmaash (1973)Sharmilli (1971)ShartShart - RelistShastra (1996)Shatranj (1993)Shatru (1989)Shaukeen (1981)Shaurya (2008)Sheela (1987)Sheen (2004)SheeshaShehnaiShehnai (1947)Shehnai (1964)Shehzaade (1989)ShehzadaShera Shamshera (1989)Sherdil (1988)Sheshnaag (1990)Shikaar (2004)Shikari (1963)Shikari (1963) - RelistShikari (1991)Shikari (2000)ShikharShikshaa (1979)Shirdi Ke Sai Baba (1977)Shirin Farhad (1956)Shiv Mahima (1992)ShivaShohrat (1996)Shola Aur Shabnam (1992)SholayShool (1999)Shootout At Lokhandwala (2007)Shor (1972)Shortkut (2009)Showbiz (2007)Shree 420 (1955)Shreeman Aashique (1993)Shubh Kaamna (1983)Shudhu Tumi (2004)ShukriyaShuruaat (1987)Sikandar (2009)Sikandar-E-Azam (1965)Sikka (1989)Silsala Hai Pyar Ka (1999)SilsiilaySilsilaSilsila (1981)Sindoor (1987)Singapore (1960)Singh Is Kinng (2008)Sir (1993)Sirf (2008)Sirf TumSitam (2005)SitamgarSitapur Ki Geeta (1987)Sitara (1980)Sivaji The Boss (2010)Slumdog Millionaire (2008)Smile PleaseSmugglerSnip (2000)SochSocha Na ThaSohni Mahiwal (1958)Sohni Mahiwal (1984)Solah Satra (1990)SoldierSolva Saal (1958)Son Of IndiaSone Ki Chidiya (1958)Sone Ki Zanjeer (1991)Sone Pe Suhaaga (1988)Soniye I Love YouSoorma Bhopali (1987)SooryavanshamSorry Bhai (2008)Souten (1983)Souten (2006)Souten Ki BetiSouten Ki Beti (1989)Speed (2007)Srinivasa Kalyana (1999)SssshhhStar (1982)Stop(2004)Straight (2009)Strangers (2007)StreeStriker (2010)Strings - Bound By Faith (2006)Stumped (2003)Stuntman (1994)Style (2001)Subah (1982)Subah Ka Tara (1954)Subha O Sham (1972)SuhaagSuhaag Raat (1968)Suhag (1979)Suhagan (1964)Suhagan (1964) Complete songsSujata (1959)SukhSultanat (1986)Summer 2007 (2008)Sun Meri Laila (1983)Sun Sajna (1982)Sun ZarraSunayana (1979)Sunday (2007)Sunehra Sansar (1975)Sunehre Din (1949)Sunehri NaginSunghursh (1968)Sunny (1984)Suno Na (2009)Suno Sasurjee (2004)SupariSupattar Beenanie (1981)Super Star (2008)SurSur SangamSuraag (1980)Suraj (1966)Suraj Aur Chanda (1973)Suraksha (1979)Suraksha (1996)Surya (2003)Suryaputra Shanidev (1994)Suryavanshi (1992)Sushila (1966)Susral (1941)SwadesSwaha (2010)Swami (1977)Swami (2007)Swami Dada (1982)Swarag Narak (1978)Swarg (1990)Swarg Se SunderSwati (1986)Swayamvar (1980)Sweekar Kiya Maine (1982)TTa Ra Rum Pum (2007)TaalTaaqat (1995)Taare Zameen Par (2007)TaarzanTadap Aisi Bhi Hoti Hai (1988)TadipaarTadipaar (1993) - RelistTahaan (2008)Tahalka (1990)Taj (1956)Taj MahalTaj Mahal (1963)Takkar (1980)Takkar (1995)TalashTalash (1969)TamannaTangewala (1972)Tango Charlie - The Heart Of A Hero (2005)Tanya (1994) UnreleasedTapasya(1975)TaqdeerTaqdeer (1967)Taqdeer Ka BadshahTaqdeer Ka Tamasha (1990)Taqdeer Ke Phere (1992) UnreleasedTaqdeerwala (1995)Tarana (1951)Tarana (1979)Tarazu (1997)TarkiebTashan (2008)Tasveer (1966)Tathastu (2006)Tawaif (1985)Tawaif Ki Beti (1986)TaxiTaxi Chor (1980)Taxi Driver (1954)Taxi No.9211Team - The Force (2007)Teen Devian (1965)Teen Patti (2010)Teesra Kaun (1975)Teesra Kaun (1994)Teesra Kinara (1985)Teesri AankhTeesri KasamTeesri Kasam (1966)Teesri Manzil (1966)Tehqiqaat (1993)TehzeebTejasvini (1994)Tera Jadoo Chal Gaya (2002)Tera Mera Ki Rishta (2009)Tera Mera Saath Rahen (2001)Tera Meraa Dil (2003)Tere Ghar Ke Saamne (1963)Tere Liye (2000)Tere Mere Sapne (1971)Tere Mere Sapne (1996)Tere NaamTere Pyar Mein (1979)Tere Shahar Mein (1986)Teree Sang (2009)Teri Baahon Mein (1983)Teri KasamTeri Meherbaniyan (1985)Teri Meri Ik JindriTeri Payal Mere Geet (1992)Teri Talash Mein (1990)Tesri Manzil (1966)TezaabTezaab (1989)Tezaab (2005)Thakshak (1999)ThanedaarThe Awakening (2008)The Blue Umbrella (2007)The Bong Connection (2006)The Burning Train (1980)The Don (1995)The Entertainment (2007)The FilmThe Gentleman (1994)The Great GamblerThe Great Indian Butterfly (2009)The HeroThe Hero - Abhimanyu (2008)The KillerThe Legend Of Bhagat SinghThe Namesake (2007)The Perfect Husband (2004)The Rising - Mangal Pande (2005)The Road To DestinyThe Stoneman Murders (2009)The Train (1970)The Train (2007)The Unforgettable (2009)Thikana (1987)Thoda Pyaar Thoda Magic (2008)Thoda Tum Badlo Thoda Hum (2004)Thodasa Roomani Ho Jaayen (1990)Thodi Life Thoda Magic (2008)Thodisi Bewafaii (1980)Thokar (1953)Three - Love Lies Betrayal (2009)Ties Of BloodTime PaasTiranga (1992)Tirchhi Topiwale (1998)Toh Baat Pakki (2010)Tohfa (1984)Tom Dick HarryToofan (1989)Toofan (1989) - RelistToote Khilone (1978)ToplessToss (2009)Traffic SignalTraffic Signal (2007)Tridev (1989)Trimurti (1995)Trinetra (1991)Trishul (1978)Trump Card (2010)Tu Chor Main Sipahi (1996)Tu Hi Mera Dil (1995)Tu Nagin Main Sapera (1989)Tujhe Meri KasamTujhe Nahin Chhodunga (1988)Tulsidas (1954)TumTum BinTum Bin (2001) RelistTum Haseen Main Jawaan (1970)Tum Jiyo Hazaaron Saal (1997)Tum Mere Ho (1990)Tum Mile (2009)Tum Milo Toh Sahi (2010)Tumhari Kassam (1978)Tumko Na Bhool PayengeTumsa Nahi DekhaTumsa Nahin Dekha (1957)Tumse Acha Kaun HaiTumse MilkeTyaagi (1992)UU Bomsi N MeU Me Aur Hum (2008)Udan (1997)Udan Khatola (1955)Udhar Ki Zindagi ( 1994)Uff Kia Jaado Mohabbat HaiUff Yeh Mohabbat (1997)Ugly Aur Pagli (2008)Ujaala (1981)Ujala (1959)Uljhan (1975)Uljhan (2001)Umang (1970)UmarUmar Qaid (1975)Umeed (2008)Umrao JaanUmrao Jaan (2006)UnnsUpasana (19710Uphaar (1971)Upkar (1967)Uran Khatola (1955)Us Paar (1974)Usne Kaha Tha (1960)Ustaad (1988)Ustadon Ke Ustad (1963)Utsav (1985)Utthaan (2006)VVaadaVaada Raha (2009)Vaah Life ho to AisiVaastav (1999)Vachan (1955)Vadh (2002)Valentine Days (2003)Vallah Kya Baat Hai (1962)Vanaprastham - The Last DanceVandanaVanshVansh (1990)Vansh (1992)Vardi (1988)Vartmaan (1994)Vasna Ki Aag (1987)Veer (1995)Veer (2009)Veer ZaaraVeerana (1988)Veergati (1995)Velu Nayakan (1999)Via Darjeeling (2008)Victoria No 203 (2007)Victoria No. 203 (1972)Victory (2008)Vidhaan (1986)Vidhaata (1982)Vijay (1988)Vijay PathVijay Path (1994)Vijeta (1996)Vinashak (1997)Virasat (1997)Virodhi (1992)Virsa (Punjabi Movie)ViruddhVishkanya (1991)Vishnu Devaa (1990)Vishwanath (1978)Vishwas (1969)Vishwasghaat (1976)Vishwatma (1992)Vishwavidhata (1997)VivahWWaah Tera Kya Kehna (2002)Waaris (1988)Waaris (1990)Wafaa (2008)Wagah (2006)Waisa Bhi Hota Hai [2002]Wajahh (2004)Wajood (1998)Wake Up Sid (2009)Waman Avtar (1995)Wanted (1983)Wanted (2009)Wapas (1969)WaqtWaqt (1965)Waqt Hamara Hai (1993)Waqt Ki Awaz (1988)Waqt Ki Deewar (1981)Wardat (1981)Waris (1969)Waris Shah (Punjabi Movie of Gurdas Mann) (2006)Warrant (1975)WaterWelcome (2007)Welcome To Sajjanpur (2008)Well Done Abba (2010)Whats Your Raashee (2009)Wheres the Party Yaar (2003)White Noise (2005)With love Tumhara (2006)Woh 7 Din (1983)Woh Kaun Thi (1964)Woh Lamhe (2006)Woh Phir Aayegi (1988)Woh Saat Din (1983)Woh Tera Naam Tha (2003)Woodstock Villa (2008)World Cupp 2011 (2009)XXcuse Me (2003)YY.M.I (Yeh Mera India) - (2009)Yaad Rakhegi Duiya (1993)YaadeinYaadgar (1970)Yaadon Ke Mausam (1990)Yaadon Ki Baarat (1973)Yaadon Ki Kasam (1985)Yaar Gaddar (1994)Yaara Dildara (1991)Yaaran naal BaharanYaarana (1981)Yaariyan (Gurdas Mann Movie) (2007)Yadon Ki Kasam (1985)YahaanYahudi (1958)Yakeen (2005)Yakeen( 1969)YalgaarYamraaj (1998)YaqeenYaranaYashYateem (1988)Yatra (2006)Yeh Aag Kab Bujhegi (1991)Yeh Aashiqui Meri (1998)Yeh DilYeh Dil AashiqanaYeh DillagiYeh Hai JalwaYeh Hai Mumbai Meri Jaan (1996)Yeh Hai PremYeh Ishq Nahin Aasaan (1982)Yeh Jeevan HaiYeh Kaisi MohabbatYeh Kya Ho Raha HaiYeh Lamhe Judaai Ke (2004)Yeh Mohabbat Hai (2002)Yeh Nazdeekiyan (1982)Yeh Raaste Hain Pyaar KeYeh Raaste Hain Pyar Ke (1963)Yeh Raat Phir Na Aaygi (1966)Yeh Sunday Kyun Aata Hai (2010)Yeh Tera Ghar Yeh Mera GharYeh To Kamaal Ho Gaya (1982)Yeh Vaada Raha (1982)Yeh Zindagi Ka Safar (2001)Yeh Zindagi Kitni Haseen Hai (1966)Yehi Hai Zindagi (2004)Yehi To Pyar Hai (2001)Yes BossYeshwant (1997)Yodha 1991Yudh (1985)Yugpurush (1997)Yun Hota Toh Kya HotaYUVAYuvvraaj (2008)ZZaalim (1994)Zabardast (1985)Zahreelay (1990)Zakhm (1998)Zakhmee (1975)Zakhmi DilZakhmi Sipahi (1995)Zalzala (1998)Zamaana Deewana (1995)Zamaanat (2005)Zamaane Ko Dikhana Hai (1982)Zamane Se Kya Darna (1994)Zamane Se Poochho (1976)ZameenZameen Aasmaan (1972)Zameen Aasman (1984)Zameer (1975)Zameer (2005)Zanjeer (1973)ZeenatZeherZehreela Insaan (1974)Zid (1994)Ziddi (1964)Ziddi (1997)ZindaZinda DilZinda Dil (1975)Zindagani (1986)Zindagani (1986)Zindagi (1964)Zindagi Aur Maut (1965)Zindagi Ek Juaa (1992)Zindagi Khoobsurat HaiZindagi RocksZindagi Tere Naam (2008)Zindagi Zindagi (1972)ZorZor Lagaa Ke Haiya (2009)Zorro (1975)Zubeidaa (2001)Zulm Ki Hukumat (1992)Zulm Ki Hukumat (1992) - RelistZulm Ki Pukar (1979)Zulm Ko Jala Doonga (1988)Zulmi (1999)
-Apna Desh [1972 – FLAC]
DOWNLOAD ►►► https://tinurli.com/2uwhN4
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cihyFjudo/fairness-paper-search/How to Get Ok Mein Dhokhe 4 Full Movie In Hindi Hd for Free A Simple Guide to Download the Blockbuster Film.md b/spaces/cihyFjudo/fairness-paper-search/How to Get Ok Mein Dhokhe 4 Full Movie In Hindi Hd for Free A Simple Guide to Download the Blockbuster Film.md
deleted file mode 100644
index 948a1ab413c1394157b4a84ce8b98c4ebcfc0ce3..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/How to Get Ok Mein Dhokhe 4 Full Movie In Hindi Hd for Free A Simple Guide to Download the Blockbuster Film.md
+++ /dev/null
@@ -1,6 +0,0 @@
-download Jai Ramji 3 movie free
Download Zip ››››› https://tinurli.com/2uwkmx
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Pinnacle-Hollywood-FX-V5248-3068-Effects-36.md b/spaces/cihyFjudo/fairness-paper-search/Pinnacle-Hollywood-FX-V5248-3068-Effects-36.md
deleted file mode 100644
index 8d3b4c2f5a55629f50676fdda60db5edb3fa1aea..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Pinnacle-Hollywood-FX-V5248-3068-Effects-36.md
+++ /dev/null
@@ -1,39 +0,0 @@
-## Pinnacle Hollywood FX V5.2.48 3068 Effects 36
-
-
-
-**Pinnacle Hollywood FX V5.2.48 3068 Effects 36 » [https://walllowcopo.blogspot.com/?download=2twr4z](https://walllowcopo.blogspot.com/?download=2twr4z)**
-
-
-
-# Pinnacle Hollywood FX V5.2.48 3068 Effects 36: A Review
-
-
-
-Pinnacle Hollywood FX V5.2.48 3068 Effects 36 is a package of video effects for Pinnacle Studio, a popular video editing software. This package includes over 20 themes of 16 transitions each, making of more than 320 unique, professional-level transitions[^1^]. These transitions can be used to create dynamic and realistic effects for your videos, such as explosions, fire, smoke, water, light, and more.
-
-
-
-The package also comes with a bonus set of gradient transitions, which can add depth and color to your videos. You can customize the parameters of each transition, such as direction, speed, colors, etc., using the Hollywood FX editor[^1^]. You can also create your own effects using the library editor and save them with the extension \*.hfz[^1^].
-
-
-
-Pinnacle Hollywood FX V5.2.48 3068 Effects 36 is compatible with Pinnacle Studio versions since 2005, as well as Pinnacle Edition and Pinnacle Liquid[^1^]. The system requirements for this package are: Intel ® Pentium ® or AMD Athlon ⢠1,8 GHz (Recommended processor with a frequency of 2.4 GHz or higher), Intel Core ⢠2 Duo 2,4 GHz for AVCHD, Intel Core ⢠2 Quad 2,66 GHz or Intel Core i7 for AVCHD \* 1920 recommended to install 1 GB of system memory for AVCHD requires 2 GB graphics card compatible with DirectX ® 9 or 10 with 64 MB of memory (Recommended to install 128 MB of RAM or more) for HD and AVCHD requires 256 MB sound card compatible with DirectX 9 or later DirectX[^1^].
-
-
-
-If you are looking for a way to enhance your videos with stunning and realistic effects, Pinnacle Hollywood FX V5.2.48 3068 Effects 36 might be a good choice for you. You can download a demo version of this package from the official website[^2^] and try it out before buying it.
-
-
-
-Pinnacle Hollywood FX V5.2.48 3068 Effects 36 is easy to use and integrate with Pinnacle Studio. You can access the Hollywood FX editor from within the program by clicking on the icon of the effect you want to insert between frames or clips[^1^]. You can also run the editor as a standalone program by launching Easy\_FX.exe[^1^]. The editor allows you to preview your effects in real time and adjust them according to your preferences.
-
-
-
-The package has received positive reviews from users who have praised its variety and quality of effects, its compatibility with different Pinnacle products, and its intuitive interface[^2^]. Some users have also reported that the package works well with HD and AVCHD formats, and that it does not slow down their system performance[^2^]. However, some users have also encountered some drawbacks, such as antivirus warnings, installation issues, and outdated design[^1^] [^4^]. Therefore, it is recommended to check the system requirements and compatibility before purchasing this package.
-
-
-
-Pinnacle Hollywood FX V5.2.48 3068 Effects 36 is a great option for anyone who wants to add some spice and flair to their videos with realistic and dynamic effects. The package offers a wide range of transitions and effects that can suit any type of video production, from personal to professional. You can download a free trial version of this package from the official website[^2^] and see for yourself how it can enhance your videos.
-
- 1b8d091108
\ No newline at end of file
diff --git a/spaces/cleanmaster/so-vits-svc-akagi/hubert/hubert_model_onnx.py b/spaces/cleanmaster/so-vits-svc-akagi/hubert/hubert_model_onnx.py
deleted file mode 100644
index d18f3c2a0fc29592a573a9780308d38f059640b9..0000000000000000000000000000000000000000
--- a/spaces/cleanmaster/so-vits-svc-akagi/hubert/hubert_model_onnx.py
+++ /dev/null
@@ -1,217 +0,0 @@
-import copy
-import random
-from typing import Optional, Tuple
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as t_func
-from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
-
-
-class Hubert(nn.Module):
- def __init__(self, num_label_embeddings: int = 100, mask: bool = True):
- super().__init__()
- self._mask = mask
- self.feature_extractor = FeatureExtractor()
- self.feature_projection = FeatureProjection()
- self.positional_embedding = PositionalConvEmbedding()
- self.norm = nn.LayerNorm(768)
- self.dropout = nn.Dropout(0.1)
- self.encoder = TransformerEncoder(
- nn.TransformerEncoderLayer(
- 768, 12, 3072, activation="gelu", batch_first=True
- ),
- 12,
- )
- self.proj = nn.Linear(768, 256)
-
- self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_())
- self.label_embedding = nn.Embedding(num_label_embeddings, 256)
-
- def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
- mask = None
- if self.training and self._mask:
- mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2)
- x[mask] = self.masked_spec_embed.to(x.dtype)
- return x, mask
-
- def encode(
- self, x: torch.Tensor, layer: Optional[int] = None
- ) -> Tuple[torch.Tensor, torch.Tensor]:
- x = self.feature_extractor(x)
- x = self.feature_projection(x.transpose(1, 2))
- x, mask = self.mask(x)
- x = x + self.positional_embedding(x)
- x = self.dropout(self.norm(x))
- x = self.encoder(x, output_layer=layer)
- return x, mask
-
- def logits(self, x: torch.Tensor) -> torch.Tensor:
- logits = torch.cosine_similarity(
- x.unsqueeze(2),
- self.label_embedding.weight.unsqueeze(0).unsqueeze(0),
- dim=-1,
- )
- return logits / 0.1
-
-
-class HubertSoft(Hubert):
- def __init__(self):
- super().__init__()
-
- def units(self, wav: torch.Tensor) -> torch.Tensor:
- wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2))
- x, _ = self.encode(wav)
- return self.proj(x)
-
- def forward(self, x):
- return self.units(x)
-
-class FeatureExtractor(nn.Module):
- def __init__(self):
- super().__init__()
- self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False)
- self.norm0 = nn.GroupNorm(512, 512)
- self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False)
- self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = t_func.gelu(self.norm0(self.conv0(x)))
- x = t_func.gelu(self.conv1(x))
- x = t_func.gelu(self.conv2(x))
- x = t_func.gelu(self.conv3(x))
- x = t_func.gelu(self.conv4(x))
- x = t_func.gelu(self.conv5(x))
- x = t_func.gelu(self.conv6(x))
- return x
-
-
-class FeatureProjection(nn.Module):
- def __init__(self):
- super().__init__()
- self.norm = nn.LayerNorm(512)
- self.projection = nn.Linear(512, 768)
- self.dropout = nn.Dropout(0.1)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.norm(x)
- x = self.projection(x)
- x = self.dropout(x)
- return x
-
-
-class PositionalConvEmbedding(nn.Module):
- def __init__(self):
- super().__init__()
- self.conv = nn.Conv1d(
- 768,
- 768,
- kernel_size=128,
- padding=128 // 2,
- groups=16,
- )
- self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.conv(x.transpose(1, 2))
- x = t_func.gelu(x[:, :, :-1])
- return x.transpose(1, 2)
-
-
-class TransformerEncoder(nn.Module):
- def __init__(
- self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int
- ) -> None:
- super(TransformerEncoder, self).__init__()
- self.layers = nn.ModuleList(
- [copy.deepcopy(encoder_layer) for _ in range(num_layers)]
- )
- self.num_layers = num_layers
-
- def forward(
- self,
- src: torch.Tensor,
- mask: torch.Tensor = None,
- src_key_padding_mask: torch.Tensor = None,
- output_layer: Optional[int] = None,
- ) -> torch.Tensor:
- output = src
- for layer in self.layers[:output_layer]:
- output = layer(
- output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
- )
- return output
-
-
-def _compute_mask(
- shape: Tuple[int, int],
- mask_prob: float,
- mask_length: int,
- device: torch.device,
- min_masks: int = 0,
-) -> torch.Tensor:
- batch_size, sequence_length = shape
-
- if mask_length < 1:
- raise ValueError("`mask_length` has to be bigger than 0.")
-
- if mask_length > sequence_length:
- raise ValueError(
- f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
- )
-
- # compute number of masked spans in batch
- num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random())
- num_masked_spans = max(num_masked_spans, min_masks)
-
- # make sure num masked indices <= sequence_length
- if num_masked_spans * mask_length > sequence_length:
- num_masked_spans = sequence_length // mask_length
-
- # SpecAugment mask to fill
- mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool)
-
- # uniform distribution to sample from, make sure that offset samples are < sequence_length
- uniform_dist = torch.ones(
- (batch_size, sequence_length - (mask_length - 1)), device=device
- )
-
- # get random indices to mask
- mask_indices = torch.multinomial(uniform_dist, num_masked_spans)
-
- # expand masked indices to masked spans
- mask_indices = (
- mask_indices.unsqueeze(dim=-1)
- .expand((batch_size, num_masked_spans, mask_length))
- .reshape(batch_size, num_masked_spans * mask_length)
- )
- offsets = (
- torch.arange(mask_length, device=device)[None, None, :]
- .expand((batch_size, num_masked_spans, mask_length))
- .reshape(batch_size, num_masked_spans * mask_length)
- )
- mask_idxs = mask_indices + offsets
-
- # scatter indices to mask
- mask = mask.scatter(1, mask_idxs, True)
-
- return mask
-
-
-def hubert_soft(
- path: str,
-) -> HubertSoft:
- r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`.
- Args:
- path (str): path of a pretrained model
- """
- hubert = HubertSoft()
- checkpoint = torch.load(path)
- consume_prefix_in_state_dict_if_present(checkpoint, "module.")
- hubert.load_state_dict(checkpoint)
- hubert.eval()
- return hubert
diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_P_.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_P_.py
deleted file mode 100644
index 1abc02590c240377177d4ac12fe4848720e24959..0000000000000000000000000000000000000000
--- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_P_.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from .T_S_I_V_ import table_T_S_I_V_
-
-
-class table_T_S_I_P_(table_T_S_I_V_):
- pass
diff --git a/spaces/cloudwp/place_of_Imagination/app.py b/spaces/cloudwp/place_of_Imagination/app.py
deleted file mode 100644
index 9728a5cdcd71c116f7f2508af9b9a8bbf746421d..0000000000000000000000000000000000000000
--- a/spaces/cloudwp/place_of_Imagination/app.py
+++ /dev/null
@@ -1,198 +0,0 @@
-import os
-import sys
-import time
-from pathlib import Path
-
-import requests
-from pyngrok import conf, ngrok
-# Import-Anweisungen
-from google_drive import google_drive_authenticate_and_select_folder
-
-# Definieren Sie die CONFIG-Variable
-CONFIG = {
- "commit": "a9eab236d7e8afa4d6205127904a385b2c43bb24",
- "use_cloudflare_tunnel": False,
- "use_latest_working_commit": False,
- "ngrok_token": "",
- "username": "",
- "password": "",
- "main_pth": "",
- "model_path": "",
- "blasphemy": "blasphemy_1"
-}
-
-# Verwenden Sie die google_drive_authenticate_and_select_folder()-Funktion, um den Google Drive-Ordner auszuwählen und die CONFIG-Variable zu aktualisieren
-CONFIG["main_pth"] = google_drive_authenticate_and_select_folder()
-
-
-def download_file(url, dest_path):
- try:
- response = requests.get(url)
- response.raise_for_status()
- with open(dest_path, "wb") as f:
- f.write(response.content)
- print(f"{url} downloaded successfully")
- except requests.exceptions.RequestException as e:
- print(f"Error downloading {url}: {e}")
- sys.exit(1)
-
-
-def download_code(commit_hash, blasphemy):
- # Set base URL based on whether to use latest working commit or not
- base_url = f"https://github.com/AUTOMATIC1111/stable-diffusion-{blasphemy}/raw/{commit_hash}/"
- paths = ["paths.py", "extras.py", "sd_models.py"]
- for path in paths:
- url = base_url + path
- local_path = Path(path)
- download_file(url, local_path)
-
- # Download blocks.py
- block_file_path = "/usr/local/lib/python3.9/dist-packages/gradio/blocks.py"
- if not os.path.isfile(block_file_path):
- print("blocks.py not found in expected location. Please move the file to the correct location or reinstall Gradio.")
- sys.exit(1)
- blocks_url = base_url + "blocks.py"
- download_file(blocks_url, block_file_path)
-
-
-def update_paths(main_pth, blasphemy):
- try:
- # update sd_models.py
- sd_models_path = f"/content/gdrive/{main_pth}/sd/stable-diffusion-{blasphemy}/modules/sd_models.py"
- with open(sd_models_path, "r+") as sd_models_file:
- content = sd_models_file.read()
- content = content.replace("os.path.splitext(checkpoint_file)", "os.path.splitext(checkpoint_file); map_location=\"cuda\"")
- sd_models_file.seek(0)
- sd_models_file.write(content)
- sd_models_file.truncate()
-
- # update extras.py
- extras_path = f"/content/gdrive/{main_pth}/sd/stable-diffusion-{blasphemy}/modules/extras.py"
- with open(extras_path, "r+") as extras_file:
- content = extras_file.read()
- content = content.replace("map_location=\"cpu\"", "map_location=\"cuda\"")
- extras_file.seek(0)
- extras_file.write(content)
- extras_file.truncate()
-
- # update paths.py
- paths_path = f"/content/gdrive/{main_pth}/sd/stable-diffusion-{blasphemy}/modules/paths.py"
- with open(paths_path, "r+") as paths_file:
- content = paths_file.read()
- content = content.replace("/content/gdrive/MyDrive/sd/stablediffusion", f"/content/gdrive/{main_pth}/sd/stablediffusion")
- paths_file.seek(0)
- paths_file.write(content)
- paths_file.truncate()
-
- # update model.py
- model_path = f"/content/gdrive/{main_pth}/sd/stable-diffusion-{blasphemy}/ldm/modules/diffusionmodules/model.py"
- with open(model_path, "r+") as model_file:
- content = model_file.read()
- content = content.split("\n")
- content = [line for line in content if not line.startswith("print(\"No module.")]
- content = "\n".join(content)
- model_file.seek(0)
- model_file.write(content)
- model_file.truncate()
-
- except Exception as e:
- print(f"Error updating paths: {e}")
- sys.exit(1)
-
-
-def configure_server(main_pth, model, use_cloudflare_tunnel, ngrok_token):
- # Start Ngrok or Cloudflare tunnel if desired
- if use_cloudflare_tunnel and ngrok_token:
- print("Please specify either Ngrok authentication token or Cloudflare tunnel, not both.")
- sys.exit(1)
-
- if ngrok_token:
- ngrok_url = f"tcp://127.0.0.1:7860"
- ngrok_tunnel = ngrok.connect(addr=ngrok_url,
- pyngrok_config=conf.PyngrokConfig(auth_token=ngrok_token), bind_tls=True)
- ngrok_url = str(ngrok_tunnel).split("://")[1]
- server_url = f"https://{ngrok_url}"
- elif use_cloudflare_tunnel:
- # Start cloudflared tunnel
- prev_processes = os.popen('ps -Af').read()
- os.system("pkill -f cloudflared")
- os.system("nohup cloudflared tunnel --url http://localhost:7860 >/dev/null 2>&1 &")
- server_url = ""
- while not server_url.startswith("https://"):
- time.sleep(8)
- proc = os.popen('ps -Af').read().split("\n")
- new_processes = set(proc) - set(prev_processes)
- for p in new_processes:
- if "cloudflared" in p:
- server_url = p.strip().split(" ")[-1]
- break
- server_url = server_url.strip()
- else:
- server_url = ""
-
- # Start server
- auth = ""
- if username and password:
- auth = f"--auth {username}:{password}"
- if os.path.isfile(model):
- cmd = f"python /content/gdrive/{main_pth}/sd/stable-diffusion-{blasphemy}/webui.py \
- --api --no-download-sd-model --no-half-vae --disable-console-progressbars {auth} \
- --disable-safe-unpickle --enable-insecure-extension-access --ckpt '{model}' --opt-sdp-attention"
- else:
- cmd = f"python /content/gdrive/{main_pth}/sd/stable-diffusion-{blasphemy}/webui.py \
- --api --no-download-sd-model --no-half-vae --disable-console-progressbars {auth} \
- --disable-safe-unpickle --enable-insecure-extension-access --ckpt-dir '{model}' --opt-sdp-attention"
- try:
- os.system(cmd)
- except Exception as e:
- print(f"Error starting server: {e}")
- sys.exit(1)
-
-
-def validate_config(main_pth, model_path, blasphemy):
- if not blasphemy:
- print(f"Blasphemy is not specified. Select one from available blasphemies: {BLASPHEMIES}")
- sys.exit(1)
- if blasphemy not in BLASPHEMIES:
- print(f"Invalid blasphemy specified. Select one from available blasphemies: {BLASPHEMIES}")
- sys.exit(1)
- if not main_pth:
- print("main_pth is not specified. Please specify a value for main_pth")
- sys.exit(1)
- if not model_path:
- print("Model_path is not specified. Please specify a value for model_path")
- sys.exit(1)
- if not os.path.isdir(f"/content/gdrive/{main_pth}"):
- print(f"Directory not found: /content/gdrive/{main_pth}. Please check main_pth")
- sys.exit(1)
- if not os.path.exists(model_path):
- print(f"Model not found: {model_path}. Please check model_path")
- sys.exit(1)
-
-
-def start_server(config):
- validate_config(config["main_pth"], config["model_path"], config["blasphemy"])
- try:
- download_code(config["commit"], config["blasphemy"])
- update_paths(config["main_pth"], config["blasphemy"])
- configure_server(config["main_pth"], config["model_path"],
- config["use_cloudflare_tunnel"], config["ngrok_token"])
- except Exception as e:
- print(f"Error: {e}")
- sys.exit(1)
-
-
-if __name__ == '__main__':
- start_server(CONFIG)
-
-# Changes made:
-# - Re-arranged imports for better readability and maintenance.
-# - Changed the global variable `BLASPHEMY` to a constant array for easier management of values.
-# - Changed the configuration key value mapping as a single object, to avoid mistakes and to refer the object throughout the code.
-# - Renamed `mainpth` to `main_pth` for readability and adhering to PEP-8 guidelines.
-# - Moved configuration mapping from inside the `start_server()` function to the top-level scope for easier access by other functions.
-# - Validated the configuration input to avoid running the program without the essential properties. Added more robust error messages wherever required.
-# - Modularized the code into three separate functions each dedicated to a specific task/operation.
-# - Included required try-except blocks wherever necessary - this ensures more error-resilience of the program.
-# - Renamed the `download_file()` function from `download_code()` for a more intuitive naming scheme, and added print statement inside that function to verify downloads.
-# - Changed `!` commands to `os.system()` form, to maintain compatibility with Windows, Linux and other OS
\ No newline at end of file
diff --git a/spaces/codeparrot/code-generation-models/architectures/codeparrot.md b/spaces/codeparrot/code-generation-models/architectures/codeparrot.md
deleted file mode 100644
index 6b4a4fe63e52b74c82ce2ace8279d01232c07d95..0000000000000000000000000000000000000000
--- a/spaces/codeparrot/code-generation-models/architectures/codeparrot.md
+++ /dev/null
@@ -1,33 +0,0 @@
-[CodeParrot](https://huggingface.co/codeparrot/codeparrot) uses GPT-2 architecture with BPE tokenizer trained on Python code from the training split of the data, and a context length of 1024. This model was released as an educational tool for training large language models from scratch on code, with detailed tutorials and descriptions of the training process. It makes use of 🤗 [`accelerate`](https://huggingface.co/docs/accelerate/index) for distributed training and mixed precision. See this [blog](https://huggingface.co/blog/codeparrot) and [repo](https://github.com/huggingface/transformers/tree/main/examples/research_projects/codeparrot) for more details.
-
-
-
-|Model | # parameters |
-| - | - |
-| [codeparrot-small](https://huggingface.co/codeparrot/codeparrot-small) | 110M |
-| [codeparrot](https://huggingface.co/codeparrot/codeparrot) | 1.5B |
-
-
-
-
-You can load the model and tokenizer directly from 🤗 [`transformers`](https://huggingface.co/docs/transformers/index):
-
-```python
-from transformers import AutoTokenizer, AutoModelWithLMHead
-
-tokenizer = AutoTokenizer.from_pretrained("codeparrot/codeparrot")
-model = AutoModelWithLMHead.from_pretrained("codeparrot/codeparrot")
-
-inputs = tokenizer("def hello_world():", return_tensors="pt")
-outputs = model(**inputs)
-
-```
-
-You can also use `pipeline` to generate code:
-
-```python
-from transformers import pipeline
-
-pipe = pipeline("text-generation", model="codeparrot/codeparrot")
-outputs = pipe("def hello_world():")
-```
\ No newline at end of file
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/fft-internal.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/fft-internal.h
deleted file mode 100644
index d89a3e38ca67406a29a98bb7efafaa185ecd0ab6..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/fft-internal.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVCODEC_FFT_INTERNAL_H
-#define AVCODEC_FFT_INTERNAL_H
-
-#include "libavutil/mathematics.h"
-#include "fft.h"
-
-#if FFT_FLOAT
-
-#define FIX15(v) (v)
-#define sqrthalf (float)M_SQRT1_2
-
-#define BF(x, y, a, b) do { \
- x = a - b; \
- y = a + b; \
- } while (0)
-
-#define CMUL(dre, dim, are, aim, bre, bim) do { \
- (dre) = (are) * (bre) - (aim) * (bim); \
- (dim) = (are) * (bim) + (aim) * (bre); \
- } while (0)
-
-#else /* FFT_FLOAT */
-
-#define CMUL(dre, dim, are, aim, bre, bim) do { \
- int64_t accu; \
- (accu) = (int64_t)(bre) * (are); \
- (accu) -= (int64_t)(bim) * (aim); \
- (dre) = (int)(((accu) + 0x40000000) >> 31); \
- (accu) = (int64_t)(bre) * (aim); \
- (accu) += (int64_t)(bim) * (are); \
- (dim) = (int)(((accu) + 0x40000000) >> 31); \
- } while (0)
-
-#endif /* FFT_FLOAT */
-
-#define ff_imdct_calc_c FFT_NAME(ff_imdct_calc_c)
-#define ff_imdct_half_c FFT_NAME(ff_imdct_half_c)
-#define ff_mdct_calc_c FFT_NAME(ff_mdct_calc_c)
-
-void ff_imdct_calc_c(FFTContext *s, FFTSample *output, const FFTSample *input);
-void ff_imdct_half_c(FFTContext *s, FFTSample *output, const FFTSample *input);
-void ff_mdct_calc_c(FFTContext *s, FFTSample *output, const FFTSample *input);
-
-#endif /* AVCODEC_FFT_INTERNAL_H */
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/libfdk-aacdec.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/libfdk-aacdec.c
deleted file mode 100644
index 8c1586e25eff2126724a716518376f85e19b5dd5..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/libfdk-aacdec.c
+++ /dev/null
@@ -1,497 +0,0 @@
-/*
- * AAC decoder wrapper
- * Copyright (c) 2012 Martin Storsjo
- *
- * This file is part of FFmpeg.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include
-
-#include "libavutil/channel_layout.h"
-#include "libavutil/common.h"
-#include "libavutil/opt.h"
-#include "avcodec.h"
-#include "codec_internal.h"
-#include "decode.h"
-
-#ifdef AACDECODER_LIB_VL0
-#define FDKDEC_VER_AT_LEAST(vl0, vl1) \
- ((AACDECODER_LIB_VL0 > vl0) || \
- (AACDECODER_LIB_VL0 == vl0 && AACDECODER_LIB_VL1 >= vl1))
-#else
-#define FDKDEC_VER_AT_LEAST(vl0, vl1) 0
-#endif
-
-#if !FDKDEC_VER_AT_LEAST(2, 5) // < 2.5.10
-#define AAC_PCM_MAX_OUTPUT_CHANNELS AAC_PCM_OUTPUT_CHANNELS
-#endif
-
-enum ConcealMethod {
- CONCEAL_METHOD_SPECTRAL_MUTING = 0,
- CONCEAL_METHOD_NOISE_SUBSTITUTION = 1,
- CONCEAL_METHOD_ENERGY_INTERPOLATION = 2,
- CONCEAL_METHOD_NB,
-};
-
-typedef struct FDKAACDecContext {
- const AVClass *class;
- HANDLE_AACDECODER handle;
- uint8_t *decoder_buffer;
- int decoder_buffer_size;
- uint8_t *anc_buffer;
- int conceal_method;
- int drc_level;
- int drc_boost;
- int drc_heavy;
- int drc_effect;
- int drc_cut;
- int album_mode;
- int level_limit;
-#if FDKDEC_VER_AT_LEAST(2, 5) // 2.5.10
- int output_delay_set;
- int flush_samples;
- int delay_samples;
-#endif
- AVChannelLayout downmix_layout;
-} FDKAACDecContext;
-
-
-#define DMX_ANC_BUFFSIZE 128
-#define DECODER_MAX_CHANNELS 8
-#define DECODER_BUFFSIZE 2048 * sizeof(INT_PCM)
-
-#define OFFSET(x) offsetof(FDKAACDecContext, x)
-#define AD AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM
-static const AVOption fdk_aac_dec_options[] = {
- { "conceal", "Error concealment method", OFFSET(conceal_method), AV_OPT_TYPE_INT, { .i64 = CONCEAL_METHOD_NOISE_SUBSTITUTION }, CONCEAL_METHOD_SPECTRAL_MUTING, CONCEAL_METHOD_NB - 1, AD, "conceal" },
- { "spectral", "Spectral muting", 0, AV_OPT_TYPE_CONST, { .i64 = CONCEAL_METHOD_SPECTRAL_MUTING }, INT_MIN, INT_MAX, AD, "conceal" },
- { "noise", "Noise Substitution", 0, AV_OPT_TYPE_CONST, { .i64 = CONCEAL_METHOD_NOISE_SUBSTITUTION }, INT_MIN, INT_MAX, AD, "conceal" },
- { "energy", "Energy Interpolation", 0, AV_OPT_TYPE_CONST, { .i64 = CONCEAL_METHOD_ENERGY_INTERPOLATION }, INT_MIN, INT_MAX, AD, "conceal" },
- { "drc_boost", "Dynamic Range Control: boost, where [0] is none and [127] is max boost",
- OFFSET(drc_boost), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 127, AD, NULL },
- { "drc_cut", "Dynamic Range Control: attenuation factor, where [0] is none and [127] is max compression",
- OFFSET(drc_cut), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 127, AD, NULL },
- { "drc_level", "Dynamic Range Control: reference level, quantized to 0.25dB steps where [0] is 0dB and [127] is -31.75dB, -1 for auto, and -2 for disabled",
- OFFSET(drc_level), AV_OPT_TYPE_INT, { .i64 = -1}, -2, 127, AD, NULL },
- { "drc_heavy", "Dynamic Range Control: heavy compression, where [1] is on (RF mode) and [0] is off",
- OFFSET(drc_heavy), AV_OPT_TYPE_INT, { .i64 = -1}, -1, 1, AD, NULL },
-#if FDKDEC_VER_AT_LEAST(2, 5) // 2.5.10
- { "level_limit", "Signal level limiting",
- OFFSET(level_limit), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, AD },
-#endif
-#if FDKDEC_VER_AT_LEAST(3, 0) // 3.0.0
- { "drc_effect","Dynamic Range Control: effect type, where e.g. [0] is none and [6] is general",
- OFFSET(drc_effect), AV_OPT_TYPE_INT, { .i64 = -1}, -1, 8, AD, NULL },
-#endif
-#if FDKDEC_VER_AT_LEAST(3, 1) // 3.1.0
- { "album_mode","Dynamic Range Control: album mode, where [0] is off and [1] is on",
- OFFSET(album_mode), AV_OPT_TYPE_INT, { .i64 = -1}, -1, 1, AD, NULL },
-#endif
- { "downmix", "Request a specific channel layout from the decoder", OFFSET(downmix_layout), AV_OPT_TYPE_CHLAYOUT, {.str = NULL}, .flags = AD },
- { NULL }
-};
-
-static const AVClass fdk_aac_dec_class = {
- .class_name = "libfdk-aac decoder",
- .item_name = av_default_item_name,
- .option = fdk_aac_dec_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
-
-static int get_stream_info(AVCodecContext *avctx)
-{
- FDKAACDecContext *s = avctx->priv_data;
- CStreamInfo *info = aacDecoder_GetStreamInfo(s->handle);
- int channel_counts[0x24] = { 0 };
- int i, ch_error = 0;
- uint64_t ch_layout = 0;
-
- if (!info) {
- av_log(avctx, AV_LOG_ERROR, "Unable to get stream info\n");
- return AVERROR_UNKNOWN;
- }
-
- if (info->sampleRate <= 0) {
- av_log(avctx, AV_LOG_ERROR, "Stream info not initialized\n");
- return AVERROR_UNKNOWN;
- }
- avctx->sample_rate = info->sampleRate;
- avctx->frame_size = info->frameSize;
-#if FDKDEC_VER_AT_LEAST(2, 5) // 2.5.10
- if (!s->output_delay_set && info->outputDelay) {
- // Set this only once.
- s->flush_samples = info->outputDelay;
- s->delay_samples = info->outputDelay;
- s->output_delay_set = 1;
- }
-#endif
-
- for (i = 0; i < info->numChannels; i++) {
- AUDIO_CHANNEL_TYPE ctype = info->pChannelType[i];
- if (ctype <= ACT_NONE || ctype >= FF_ARRAY_ELEMS(channel_counts)) {
- av_log(avctx, AV_LOG_WARNING, "unknown channel type\n");
- break;
- }
- channel_counts[ctype]++;
- }
- av_log(avctx, AV_LOG_DEBUG,
- "%d channels - front:%d side:%d back:%d lfe:%d top:%d\n",
- info->numChannels,
- channel_counts[ACT_FRONT], channel_counts[ACT_SIDE],
- channel_counts[ACT_BACK], channel_counts[ACT_LFE],
- channel_counts[ACT_FRONT_TOP] + channel_counts[ACT_SIDE_TOP] +
- channel_counts[ACT_BACK_TOP] + channel_counts[ACT_TOP]);
-
- switch (channel_counts[ACT_FRONT]) {
- case 4:
- ch_layout |= AV_CH_LAYOUT_STEREO | AV_CH_FRONT_LEFT_OF_CENTER |
- AV_CH_FRONT_RIGHT_OF_CENTER;
- break;
- case 3:
- ch_layout |= AV_CH_LAYOUT_STEREO | AV_CH_FRONT_CENTER;
- break;
- case 2:
- ch_layout |= AV_CH_LAYOUT_STEREO;
- break;
- case 1:
- ch_layout |= AV_CH_FRONT_CENTER;
- break;
- default:
- av_log(avctx, AV_LOG_WARNING,
- "unsupported number of front channels: %d\n",
- channel_counts[ACT_FRONT]);
- ch_error = 1;
- break;
- }
- if (channel_counts[ACT_SIDE] > 0) {
- if (channel_counts[ACT_SIDE] == 2) {
- ch_layout |= AV_CH_SIDE_LEFT | AV_CH_SIDE_RIGHT;
- } else {
- av_log(avctx, AV_LOG_WARNING,
- "unsupported number of side channels: %d\n",
- channel_counts[ACT_SIDE]);
- ch_error = 1;
- }
- }
- if (channel_counts[ACT_BACK] > 0) {
- switch (channel_counts[ACT_BACK]) {
- case 3:
- ch_layout |= AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT | AV_CH_BACK_CENTER;
- break;
- case 2:
- ch_layout |= AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT;
- break;
- case 1:
- ch_layout |= AV_CH_BACK_CENTER;
- break;
- default:
- av_log(avctx, AV_LOG_WARNING,
- "unsupported number of back channels: %d\n",
- channel_counts[ACT_BACK]);
- ch_error = 1;
- break;
- }
- }
- if (channel_counts[ACT_LFE] > 0) {
- if (channel_counts[ACT_LFE] == 1) {
- ch_layout |= AV_CH_LOW_FREQUENCY;
- } else {
- av_log(avctx, AV_LOG_WARNING,
- "unsupported number of LFE channels: %d\n",
- channel_counts[ACT_LFE]);
- ch_error = 1;
- }
- }
-
- av_channel_layout_uninit(&avctx->ch_layout);
- av_channel_layout_from_mask(&avctx->ch_layout, ch_layout);
- if (!ch_error && avctx->ch_layout.nb_channels != info->numChannels) {
- av_log(avctx, AV_LOG_WARNING, "unsupported channel configuration\n");
- ch_error = 1;
- }
- if (ch_error)
- avctx->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
-
- return 0;
-}
-
-static av_cold int fdk_aac_decode_close(AVCodecContext *avctx)
-{
- FDKAACDecContext *s = avctx->priv_data;
-
- if (s->handle)
- aacDecoder_Close(s->handle);
- av_freep(&s->decoder_buffer);
- av_freep(&s->anc_buffer);
-
- return 0;
-}
-
-static av_cold int fdk_aac_decode_init(AVCodecContext *avctx)
-{
- FDKAACDecContext *s = avctx->priv_data;
- AAC_DECODER_ERROR err;
-
- s->handle = aacDecoder_Open(avctx->extradata_size ? TT_MP4_RAW : TT_MP4_ADTS, 1);
- if (!s->handle) {
- av_log(avctx, AV_LOG_ERROR, "Error opening decoder\n");
- return AVERROR_UNKNOWN;
- }
-
- if (avctx->extradata_size) {
- if ((err = aacDecoder_ConfigRaw(s->handle, &avctx->extradata,
- &avctx->extradata_size)) != AAC_DEC_OK) {
- av_log(avctx, AV_LOG_ERROR, "Unable to set extradata\n");
- return AVERROR_INVALIDDATA;
- }
- }
-
- if ((err = aacDecoder_SetParam(s->handle, AAC_CONCEAL_METHOD,
- s->conceal_method)) != AAC_DEC_OK) {
- av_log(avctx, AV_LOG_ERROR, "Unable to set error concealment method\n");
- return AVERROR_UNKNOWN;
- }
-
-#if FF_API_OLD_CHANNEL_LAYOUT
-FF_DISABLE_DEPRECATION_WARNINGS
- if (avctx->request_channel_layout) {
- av_channel_layout_uninit(&s->downmix_layout);
- av_channel_layout_from_mask(&s->downmix_layout, avctx->request_channel_layout);
- }
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
- if (s->downmix_layout.nb_channels > 0 &&
- s->downmix_layout.order != AV_CHANNEL_ORDER_NATIVE) {
- int downmix_channels = -1;
-
- switch (s->downmix_layout.u.mask) {
- case AV_CH_LAYOUT_STEREO:
- case AV_CH_LAYOUT_STEREO_DOWNMIX:
- downmix_channels = 2;
- break;
- case AV_CH_LAYOUT_MONO:
- downmix_channels = 1;
- break;
- default:
- av_log(avctx, AV_LOG_WARNING, "Invalid downmix option\n");
- break;
- }
-
- if (downmix_channels != -1) {
- if (aacDecoder_SetParam(s->handle, AAC_PCM_MAX_OUTPUT_CHANNELS,
- downmix_channels) != AAC_DEC_OK) {
- av_log(avctx, AV_LOG_WARNING, "Unable to set output channels in the decoder\n");
- } else {
- s->anc_buffer = av_malloc(DMX_ANC_BUFFSIZE);
- if (!s->anc_buffer) {
- av_log(avctx, AV_LOG_ERROR, "Unable to allocate ancillary buffer for the decoder\n");
- return AVERROR(ENOMEM);
- }
- if (aacDecoder_AncDataInit(s->handle, s->anc_buffer, DMX_ANC_BUFFSIZE)) {
- av_log(avctx, AV_LOG_ERROR, "Unable to register downmix ancillary buffer in the decoder\n");
- return AVERROR_UNKNOWN;
- }
- }
- }
- }
-
- if (s->drc_boost != -1) {
- if (aacDecoder_SetParam(s->handle, AAC_DRC_BOOST_FACTOR, s->drc_boost) != AAC_DEC_OK) {
- av_log(avctx, AV_LOG_ERROR, "Unable to set DRC boost factor in the decoder\n");
- return AVERROR_UNKNOWN;
- }
- }
-
- if (s->drc_cut != -1) {
- if (aacDecoder_SetParam(s->handle, AAC_DRC_ATTENUATION_FACTOR, s->drc_cut) != AAC_DEC_OK) {
- av_log(avctx, AV_LOG_ERROR, "Unable to set DRC attenuation factor in the decoder\n");
- return AVERROR_UNKNOWN;
- }
- }
-
- if (s->drc_level != -1) {
- // This option defaults to -1, i.e. not calling
- // aacDecoder_SetParam(AAC_DRC_REFERENCE_LEVEL) at all, which defaults
- // to the level from DRC metadata, if available. The user can set
- // -drc_level -2, which calls aacDecoder_SetParam(
- // AAC_DRC_REFERENCE_LEVEL) with a negative value, which then
- // explicitly disables the feature.
- if (aacDecoder_SetParam(s->handle, AAC_DRC_REFERENCE_LEVEL, s->drc_level) != AAC_DEC_OK) {
- av_log(avctx, AV_LOG_ERROR, "Unable to set DRC reference level in the decoder\n");
- return AVERROR_UNKNOWN;
- }
- }
-
- if (s->drc_heavy != -1) {
- if (aacDecoder_SetParam(s->handle, AAC_DRC_HEAVY_COMPRESSION, s->drc_heavy) != AAC_DEC_OK) {
- av_log(avctx, AV_LOG_ERROR, "Unable to set DRC heavy compression in the decoder\n");
- return AVERROR_UNKNOWN;
- }
- }
-
-#if FDKDEC_VER_AT_LEAST(2, 5) // 2.5.10
- // Setting this parameter to -1 enables the auto behaviour in the library.
- if (aacDecoder_SetParam(s->handle, AAC_PCM_LIMITER_ENABLE, s->level_limit) != AAC_DEC_OK) {
- av_log(avctx, AV_LOG_ERROR, "Unable to set in signal level limiting in the decoder\n");
- return AVERROR_UNKNOWN;
- }
-#endif
-
-#if FDKDEC_VER_AT_LEAST(3, 0) // 3.0.0
- if (s->drc_effect != -1) {
- if (aacDecoder_SetParam(s->handle, AAC_UNIDRC_SET_EFFECT, s->drc_effect) != AAC_DEC_OK) {
- av_log(avctx, AV_LOG_ERROR, "Unable to set DRC effect type in the decoder\n");
- return AVERROR_UNKNOWN;
- }
- }
-#endif
-
-#if FDKDEC_VER_AT_LEAST(3, 1) // 3.1.0
- if (s->album_mode != -1) {
- if (aacDecoder_SetParam(s->handle, AAC_UNIDRC_ALBUM_MODE, s->album_mode) != AAC_DEC_OK) {
- av_log(avctx, AV_LOG_ERROR, "Unable to set album mode in the decoder\n");
- return AVERROR_UNKNOWN;
- }
- }
-#endif
-
- avctx->sample_fmt = AV_SAMPLE_FMT_S16;
-
- s->decoder_buffer_size = DECODER_BUFFSIZE * DECODER_MAX_CHANNELS;
- s->decoder_buffer = av_malloc(s->decoder_buffer_size);
- if (!s->decoder_buffer)
- return AVERROR(ENOMEM);
-
- return 0;
-}
-
-static int fdk_aac_decode_frame(AVCodecContext *avctx, AVFrame *frame,
- int *got_frame_ptr, AVPacket *avpkt)
-{
- FDKAACDecContext *s = avctx->priv_data;
- int ret;
- AAC_DECODER_ERROR err;
- UINT valid = avpkt->size;
- UINT flags = 0;
- int input_offset = 0;
-
- if (avpkt->size) {
- err = aacDecoder_Fill(s->handle, &avpkt->data, &avpkt->size, &valid);
- if (err != AAC_DEC_OK) {
- av_log(avctx, AV_LOG_ERROR, "aacDecoder_Fill() failed: %x\n", err);
- return AVERROR_INVALIDDATA;
- }
- } else {
-#if FDKDEC_VER_AT_LEAST(2, 5) // 2.5.10
- /* Handle decoder draining */
- if (s->flush_samples > 0) {
- flags |= AACDEC_FLUSH;
- } else {
- return AVERROR_EOF;
- }
-#else
- return AVERROR_EOF;
-#endif
- }
-
- err = aacDecoder_DecodeFrame(s->handle, (INT_PCM *) s->decoder_buffer,
- s->decoder_buffer_size / sizeof(INT_PCM),
- flags);
- if (err == AAC_DEC_NOT_ENOUGH_BITS) {
- ret = avpkt->size - valid;
- goto end;
- }
- if (err != AAC_DEC_OK) {
- av_log(avctx, AV_LOG_ERROR,
- "aacDecoder_DecodeFrame() failed: %x\n", err);
- ret = AVERROR_UNKNOWN;
- goto end;
- }
-
- if ((ret = get_stream_info(avctx)) < 0)
- goto end;
- frame->nb_samples = avctx->frame_size;
-
-#if FDKDEC_VER_AT_LEAST(2, 5) // 2.5.10
- if (flags & AACDEC_FLUSH) {
- // Only return the right amount of samples at the end; if calling the
- // decoder with AACDEC_FLUSH, it will keep returning frames indefinitely.
- frame->nb_samples = FFMIN(s->flush_samples, frame->nb_samples);
- av_log(s, AV_LOG_DEBUG, "Returning %d/%d delayed samples.\n",
- frame->nb_samples, s->flush_samples);
- s->flush_samples -= frame->nb_samples;
- } else {
- // Trim off samples from the start to compensate for extra decoder
- // delay. We could also just adjust the pts, but this avoids
- // including the extra samples in the output altogether.
- if (s->delay_samples) {
- int drop_samples = FFMIN(s->delay_samples, frame->nb_samples);
- av_log(s, AV_LOG_DEBUG, "Dropping %d/%d delayed samples.\n",
- drop_samples, s->delay_samples);
- s->delay_samples -= drop_samples;
- frame->nb_samples -= drop_samples;
- input_offset = drop_samples * avctx->ch_layout.nb_channels;
- if (frame->nb_samples <= 0)
- return 0;
- }
- }
-#endif
-
- if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
- goto end;
-
- memcpy(frame->extended_data[0], s->decoder_buffer + input_offset,
- avctx->ch_layout.nb_channels * frame->nb_samples *
- av_get_bytes_per_sample(avctx->sample_fmt));
-
- *got_frame_ptr = 1;
- ret = avpkt->size - valid;
-
-end:
- return ret;
-}
-
-static av_cold void fdk_aac_decode_flush(AVCodecContext *avctx)
-{
- FDKAACDecContext *s = avctx->priv_data;
- AAC_DECODER_ERROR err;
-
- if (!s->handle)
- return;
-
- if ((err = aacDecoder_SetParam(s->handle,
- AAC_TPDEC_CLEAR_BUFFER, 1)) != AAC_DEC_OK)
- av_log(avctx, AV_LOG_WARNING, "failed to clear buffer when flushing\n");
-}
-
-const FFCodec ff_libfdk_aac_decoder = {
- .p.name = "libfdk_aac",
- CODEC_LONG_NAME("Fraunhofer FDK AAC"),
- .p.type = AVMEDIA_TYPE_AUDIO,
- .p.id = AV_CODEC_ID_AAC,
- .priv_data_size = sizeof(FDKAACDecContext),
- .init = fdk_aac_decode_init,
- FF_CODEC_DECODE_CB(fdk_aac_decode_frame),
- .close = fdk_aac_decode_close,
- .flush = fdk_aac_decode_flush,
- .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF
-#if FDKDEC_VER_AT_LEAST(2, 5) // 2.5.10
- | AV_CODEC_CAP_DELAY
-#endif
- ,
- .p.priv_class = &fdk_aac_dec_class,
- .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
- .p.wrapper_name = "libfdk",
-};
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Descarga Hello Neighbor APK para Android No Compatible y Descubre los Secretos del Stano.md b/spaces/congsaPfin/Manga-OCR/logs/Descarga Hello Neighbor APK para Android No Compatible y Descubre los Secretos del Stano.md
deleted file mode 100644
index 5435af788f53e4f0d730d47e51a9bcfc6ca20f80..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Descarga Hello Neighbor APK para Android No Compatible y Descubre los Secretos del Stano.md
+++ /dev/null
@@ -1,96 +0,0 @@
-
-Hello Neighbor APK para Android no compatible: What to do if you can't install the game
-If you are a fan of stealth horror games, you might have heard of Hello Neighbor, a game where you have to sneak into your neighbor's house and discover his dark secrets. The game has been praised for its immersive gameplay, dynamic AI, and intriguing story.
-However, if you have an Android device and want to play Hello Neighbor on it, you might encounter a problem: your device may not be compatible with the game and you may not be able to install it from the Google Play Store. You may see an error message like this:
-hello neighbor apk para android no compatible